[Dart-dev] [4403] DART/trunk/models/POP/shell_scripts: Made both scripts look as similar as possible, particularly
nancy at ucar.edu
nancy at ucar.edu
Wed Jun 30 16:48:15 MDT 2010
Revision: 4403
Author: thoar
Date: 2010-06-30 16:48:15 -0600 (Wed, 30 Jun 2010)
Log Message:
-----------
Made both scripts look as similar as possible, particularly
with respect to the batch system directives.
The queueing system block also sets the value of the
mpi command to be used by advance_model.csh.
Modified Paths:
--------------
DART/trunk/models/POP/shell_scripts/advance_model.csh
DART/trunk/models/POP/shell_scripts/run_filter.batch
DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch
Property Changed:
----------------
DART/trunk/models/POP/shell_scripts/run_filter.batch
-------------- next part --------------
Modified: DART/trunk/models/POP/shell_scripts/advance_model.csh
===================================================================
--- DART/trunk/models/POP/shell_scripts/advance_model.csh 2010-06-30 20:30:13 UTC (rev 4402)
+++ DART/trunk/models/POP/shell_scripts/advance_model.csh 2010-06-30 22:48:15 UTC (rev 4403)
@@ -128,10 +128,11 @@
# last restart. The LANL version has no such mechanism, but the
# filename can be predicted from the pop_in namelist information.
#----------------------------------------------------------------------
+ # the value of MPI is inherited
rm -f ocn.log.*
- mpirun.lsf ../pop || exit 3
+ ${MPI} ../pop || exit 3
grep "Successful completion of POP run" ocn.log.*
set popstatus = $status
Modified: DART/trunk/models/POP/shell_scripts/run_filter.batch
===================================================================
--- DART/trunk/models/POP/shell_scripts/run_filter.batch 2010-06-30 20:30:13 UTC (rev 4402)
+++ DART/trunk/models/POP/shell_scripts/run_filter.batch 2010-06-30 22:48:15 UTC (rev 4403)
@@ -4,7 +4,7 @@
# provided by UCAR, "as is", without charge, subject to all terms of use at
# http://www.image.ucar.edu/DAReS/DART/DART_download
#
-# $Id: $
+# $Id$
#
# Script to assimilate observations using DART and the POP ocean model.
# This presumes two directories exists that contain all the required bits
@@ -25,6 +25,7 @@
# -q queue cheapest == [standby, economy, (regular,debug), premium] == $$$$
# -n number of processors (really)
##=============================================================================
+#
#BSUB -J filter
#BSUB -o filter.%J.log
#BSUB -q regular
@@ -51,12 +52,13 @@
## on the node with another job, so you might as well use
## them both. (ppn == Processors Per Node)
##=============================================================================
+#
#PBS -N filter
#PBS -r n
#PBS -e filter.err
#PBS -o filter.log
-#PBS -q dedicated
-#PBS -l nodes=10:ppn=2
+#PBS -q medium
+#PBS -l nodes=8:ppn=2
#----------------------------------------------------------------------
# Turns out the scripts are a lot more flexible if you don't rely on
@@ -93,7 +95,7 @@
else
#-------------------------------------------------------------------
- # You can run this interactively too ... in certain circumstances
+ # You can run this interactively to check syntax, file motion, etc.
#-------------------------------------------------------------------
setenv ORIGINALDIR `pwd`
@@ -113,7 +115,7 @@
echo "${JOBNAME} ($JOBID) submitted from $MYHOST"
echo "${JOBNAME} ($JOBID) running in queue $MYQUEUE"
echo "${JOBNAME} ($JOBID) running on $HOST"
-echo "${JOBNAME} ($JOBID) started at "`date`
+echo "${JOBNAME} ($JOBID) started at "`date`
echo
#----------------------------------------------------------------------
@@ -391,7 +393,7 @@
./advance_model.csh 0 ${ensemble_size} filter_control00000 || exit 9
echo "restarting filter."
- mpirun.lsf ./wakeup_filter
+ ${MPI} ./wakeup_filter
else
@@ -431,18 +433,18 @@
# for compas
setenv NUM_PROCS `cat nodelist-pgi | wc -l`
set MPIRUN = /opt/mpich/myrinet/pgi/bin/mpirun
- set MPICMD = $MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi
+ setenv MPI "$MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi"
# for atlas-pgi
setenv NUM_PROCS `cat nodelist-pgi | wc -l`
set MPIRUN = /share/apps/mpich1/pgi/bin/mpirun
- set MPICMD = $MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi
+ setenv MPI "$MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi"
# for atlas-gfortran
set MPIRUN = /share/apps/openmpi/gfortran/bin/mpirun
- set MPICMD = $MPIRUN --hostfile nodelist-gfortran --mca mtl mx --mca pml cm -np 72
+ setenv MPI "$MPIRUN --hostfile nodelist-gfortran --mca mtl mx --mca pml cm -np 72"
- echo "MPICMD = ${MPICMD}"
+ echo "MPI = ${MPI}"
# filter runs in parallel until time to do a model advance,
# and then this script starts up the mitgcmuv jobs, each one
@@ -458,7 +460,7 @@
# this starts filter but also returns control back to
# this script immediately.
- (setenv HOME $filterhome; ${MPICMD} ./filter) &
+ (setenv HOME $filterhome; ${MPI} ./filter) &
while ( -e filter_to_model.lock )
@@ -481,7 +483,7 @@
./advance_model.csh 0 ${ensemble_size} filter_control00000 || exit 9
echo "restarting filter."
- ${MPICMD} ./wakeup_filter
+ ${MPI} ./wakeup_filter
else
@@ -531,12 +533,13 @@
${COPY} input.nml ${experiment}/DART
${COPY} *.csh ${experiment}/DART
${COPY} $myname ${experiment}/DART
+
ls -lrt
-exit $status
+exit 0
# <next few lines under version control, do not edit>
-# $URL:$
-# $Revision: $
-# $Date: $
+# $URL$
+# $Revision$
+# $Date$
Property changes on: DART/trunk/models/POP/shell_scripts/run_filter.batch
___________________________________________________________________
Added: svn:keywords
+ Date Revision Author HeadURL Id
Modified: DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch
===================================================================
--- DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch 2010-06-30 20:30:13 UTC (rev 4402)
+++ DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch 2010-06-30 22:48:15 UTC (rev 4403)
@@ -40,6 +40,32 @@
#BSUB -R "span[ptile=2]"
#BSUB -W 2:00
#BSUB -m "cr0128en cr0129en cr0130en cr0131en cr0132en cr0133en cr0134en cr0135en cr0136en cr0137en cr0138en cr0139en cr0140en cr0141en cr0202en cr0201en"
+#
+##=============================================================================
+## This block of directives constitutes the preamble for the PBS queuing system
+## PBS is used on the CGD Linux cluster 'bangkok'
+## PBS is used on the CGD Linux cluster 'calgary'
+##
+## the normal way to submit to the queue is: qsub run_filter
+##
+## an explanation of the most common directives follows:
+## -N Job name
+## -r n Declare job non-rerunable
+## -e <arg> filename for standard error
+## -o <arg> filename for standard out
+## -q <arg> Queue name (small, medium, long, verylong)
+## -l nodes=xx:ppn=2 requests BOTH processors on the node. On both bangkok
+## and calgary, there is no way to 'share' the processors
+## on the node with another job, so you might as well use
+## them both. (ppn == Processors Per Node)
+##=============================================================================
+#
+#PBS -N POP_OSSE
+#PBS -r n
+#PBS -e POP_OSSE.err
+#PBS -o POP_OSSE.log
+#PBS -q medium
+#PBS -l nodes=8:ppn=2
#----------------------------------------------------------------------
# Turns out the scripts are a lot more flexible if you don't rely on
@@ -47,14 +73,32 @@
# 'generic' names and using the generics throughout the remainder.
#----------------------------------------------------------------------
-if ($?LSB_HOSTS) then
+if ($?LSB_QUEUE) then
+ #-------------------------------------------------------------------
+ # This is used by LSF
+ #-------------------------------------------------------------------
+
setenv ORIGINALDIR $LS_SUBCWD
- setenv JOBNAME $LSB_OUTPUTFILE:ar
+ setenv JOBNAME $LSB_JOBNAME
setenv JOBID $LSB_JOBID
setenv MYQUEUE $LSB_QUEUE
setenv MYHOST $LSB_SUB_HOST
+ setenv MPI csh
+else if ($?PBS_QUEUE) then
+
+ #-------------------------------------------------------------------
+ # This is used by PBS
+ #-------------------------------------------------------------------
+
+ setenv ORIGINALDIR $PBS_O_WORKDIR
+ setenv JOBNAME $PBS_JOBNAME
+ setenv JOBID $PBS_JOBID
+ setenv MYQUEUE $PBS_QUEUE
+ setenv MYHOST $PBS_O_HOST
+ setenv MPI csh
+
else
#-------------------------------------------------------------------
@@ -65,19 +109,20 @@
setenv JOBNAME POP
setenv JOBID $$
setenv MYQUEUE Interactive
- setenv MYHOST $host
+ setenv MYHOST $HOST
+ setenv MPI csh
endif
#----------------------------------------------------------------------
-# Just an echo of job attributes
+# Just an echo of the job attributes
#----------------------------------------------------------------------
echo
echo "${JOBNAME} ($JOBID) submitted from $ORIGINALDIR"
echo "${JOBNAME} ($JOBID) submitted from $MYHOST"
echo "${JOBNAME} ($JOBID) running in queue $MYQUEUE"
-echo "${JOBNAME} ($JOBID) running on $host"
+echo "${JOBNAME} ($JOBID) running on $HOST"
echo "${JOBNAME} ($JOBID) started at "`date`
echo
@@ -137,8 +182,8 @@
${COPY} ${DARTDIR}/shell_scripts/advance_model.csh .
# data files
+ ${COPY} ${DARTDIR}/work/input.nml .
${COPY} ${DARTDIR}/work/obs_seq.in .
- ${COPY} ${DARTDIR}/work/input.nml .
#-----------------------------------------------------------------------------
# Get the POP executable, control files, and data files.
More information about the Dart-dev
mailing list