[Dart-dev] [4362] DART/trunk/models/lorenz_96/shell_scripts: Rename this script to be consistent with other models and

nancy at ucar.edu nancy at ucar.edu
Thu May 13 12:56:06 MDT 2010


Revision: 4362
Author:   nancy
Date:     2010-05-13 12:56:06 -0600 (Thu, 13 May 2010)
Log Message:
-----------
Rename this script to be consistent with other models and
our convention to use a .csh extension on scripts.  Update
script to add comments about no async4 support here (i added 
a pointer to where one exists), and clean up the section for
batch execution without a queue system.  No functional changes.

Added Paths:
-----------
    DART/trunk/models/lorenz_96/shell_scripts/run_filter.csh

Removed Paths:
-------------
    DART/trunk/models/lorenz_96/shell_scripts/runme_filter

-------------- next part --------------
Copied: DART/trunk/models/lorenz_96/shell_scripts/run_filter.csh (from rev 4300, DART/trunk/models/lorenz_96/shell_scripts/runme_filter)
===================================================================
--- DART/trunk/models/lorenz_96/shell_scripts/run_filter.csh	                        (rev 0)
+++ DART/trunk/models/lorenz_96/shell_scripts/run_filter.csh	2010-05-13 18:56:06 UTC (rev 4362)
@@ -0,0 +1,117 @@
+#!/bin/csh
+#
+# DART software - Copyright \xA9 2004 - 2010 UCAR. This open source software is
+# provided by UCAR, "as is", without charge, subject to all terms of use at
+# http://www.image.ucar.edu/DAReS/DART/DART_download
+#
+# $Id$
+#
+# This is an example script for how to run the filter program
+# in parallel by submitting it to a batch system.  Note that
+# this version does NOT have an async 4 option because the
+# Lorenz 96 model is a serial-only program and does not use MPI.
+#
+# If you are looking for an example script for how to run async 4
+# (parallel/mpi filter AND parallel/mpi model) check the 
+# DART/models/template/shell_scripts directory.
+#
+#=============================================================================
+# This block of directives constitutes the preamble for the LSF queuing system
+# LSF is used on the IBM   Linux cluster 'lightning'
+# LSF is used on the IMAGe Linux cluster 'coral'
+# LSF is used on the IBM   'bluevista'
+# The queues on lightning and bluevista are supposed to be similar.
+#
+# the normal way to submit to the queue is:    bsub < run_filter.csh
+#
+# an explanation of the most common directives follows:
+# -J Job name (master script job.csh presumes filter_server.xxxx.log)
+# -o STDOUT filename
+# -e STDERR filename
+# -P      account
+# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
+# -n number of processors  (really)
+# -W hh:mm  max execution time (required on some platforms)
+##=============================================================================
+#BSUB -J filter
+#BSUB -o filter.%J.log
+#BSUB -q economy
+#BSUB -n 6
+#BSUB -W 0:30
+#
+#=============================================================================
+# This block of directives constitutes the preamble for the PBS queuing system
+# PBS is used on the CGD   Linux cluster 'bangkok'
+# PBS is used on the CGD   Linux cluster 'calgary'
+# 
+# the normal way to submit to the queue is:    qsub run_filter.csh
+# 
+# an explanation of the most common directives follows:
+# -N     Job name
+# -r n   Declare job non-rerunable
+# -e <arg>  filename for standard error
+# -o <arg>  filename for standard out 
+# -q <arg>   Queue name (small, medium, long, verylong)
+# -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
+#                     and calgary, there is no way to 'share' the processors
+#                     on the node with another job, so you might as well use
+#                     them both.  (ppn == Processors Per Node)
+##=============================================================================
+#PBS -N filter
+#PBS -r n
+#PBS -e filter.err
+#PBS -o filter.log
+#PBS -q medium
+#PBS -l nodes=4:ppn=2
+
+# Check for the existence of variables that are set by different 
+# queuing mechanisms.  This way, we can make a single script which
+# works for any queuing system.
+
+if ($?LS_SUBCWD) then
+
+   # LSF has a list of processors already in a variable (LSB_HOSTS)
+
+   mpirun.lsf ./filter
+   
+
+else if ($?PBS_O_WORKDIR) then
+
+   # PBS has a list of processors in a file whose name is (PBS_NODEFILE)
+
+   mpirun ./filter
+
+else if ($?NODEFILE) then
+
+   # a linux cluster with mpich or lam or openmpi and no formal
+   # queueing system. alter this to match the required nodes and
+   # to construct a simple launch script.
+
+   setenv MY_NODEFILE  ~/nodelist
+   echo "node7:2" >  $MY_NODEFILE
+   echo "node5:2" >> $MY_NODEFILE
+   echo "node3:2" >> $MY_NODEFILE
+   echo "node1:2" >> $MY_NODEFILE
+
+cat > ./filterscript <<EOF
+ cd `pwd`
+ ./filter
+EOF
+   mpirun -np 4 -nolocal -machinefile $MY_NODEFILE ./filterscript
+
+else
+
+   # interactive - e.g. you are using 'lam-mpi' and you have
+   # already run 'lamboot' once to start the lam server.
+
+   mpirun -np 4 ./filter
+
+endif
+
+exit 0
+
+# <next few lines under version control, do not edit>
+# $URL$
+# $Revision$
+# $Date$
+

Deleted: DART/trunk/models/lorenz_96/shell_scripts/runme_filter
===================================================================
--- DART/trunk/models/lorenz_96/shell_scripts/runme_filter	2010-05-13 18:51:22 UTC (rev 4361)
+++ DART/trunk/models/lorenz_96/shell_scripts/runme_filter	2010-05-13 18:56:06 UTC (rev 4362)
@@ -1,117 +0,0 @@
-#!/bin/tcsh 
-#
-# DART software - Copyright \xA9 2004 - 2010 UCAR. This open source software is
-# provided by UCAR, "as is", without charge, subject to all terms of use at
-# http://www.image.ucar.edu/DAReS/DART/DART_download
-#
-# $Id$
-#
-# start at a generic run script for the mpi version
-#
-#=============================================================================
-# This block of directives constitutes the preamble for the LSF queuing system
-# LSF is used on the IBM   Linux cluster 'lightning'
-# LSF is used on the IMAGe Linux cluster 'coral'
-# LSF is used on the IBM   'bluevista'
-# The queues on lightning and bluevista are supposed to be similar.
-#
-# the normal way to submit to the queue is:    bsub < runme_filter
-#
-# an explanation of the most common directives follows:
-# -J Job name (master script job.csh presumes filter_server.xxxx.log)
-# -o STDOUT filename
-# -e STDERR filename
-# -P      account
-# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
-# -n number of processors  (really)
-# -W hh:mm  execution time (must be specified on some hosts)
-##=============================================================================
-#BSUB -J filter
-#BSUB -o filter.%J.log
-#BSUB -q regular
-#BSUB -n 4
-#BSUB -W 00:10
-#
-#
-##=============================================================================
-## This block of directives constitutes the preamble for the PBS queuing system
-## PBS is used on the CGD   Linux cluster 'bangkok'
-## PBS is used on the CGD   Linux cluster 'calgary'
-## 
-## the normal way to submit to the queue is:    qsub runme_filter
-## 
-## an explanation of the most common directives follows:
-## -N     Job name
-## -r n   Declare job non-rerunable
-## -e <arg>  filename for standard error
-## -o <arg>  filename for standard out 
-## -q <arg>   Queue name (small, medium, long, verylong)
-## -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
-##                     and calgary, there is no way to 'share' the processors
-##                     on the node with another job, so you might as well use
-##                     them both.  (ppn == Processors Per Node)
-##=============================================================================
-#PBS -N filter
-#PBS -r n
-#PBS -e filter.err
-#PBS -o filter.log
-#PBS -q medium
-#PBS -l nodes=4:ppn=2
-
-# A common strategy for the beginning is to check for the existence of
-# some variables that get set by the different queuing mechanisms.
-# This way, we know which queuing mechanism we are working with,
-# and can set 'queue-independent' variables for use for the remainder
-# of the script.
-
-if ($?LS_SUBCWD) then
-
-   # LSF has a list of processors already in a variable (LSB_HOSTS)
-   #  alias submit 'bsub < \!*'
-
-   mpirun.lsf ./filter
-   
-
-else if ($?PBS_O_WORKDIR) then
-
-   # PBS has a list of processors in a file whose name is (PBS_NODEFILE)
-   #  alias submit 'qsub \!*'
-
-   mpirun ./filter
-
-else if ($?OCOTILLO_NODEFILE) then
-
-   # ocotillo is a 'special case'. It is the only cluster I know of with
-   # no queueing system.  You must generate a list of processors in a
-   # file whose name is given to the mpirun command, and the executable
-   # needs to be wrapped with a script that cds to the right directory.
-   setenv OCOTILLO_NODEFILE  ~/nodelist
-   echo "node7:2" > $OCOTILLO_NODEFILE
-   echo "node5:2" >> $OCOTILLO_NODEFILE
-   echo "node3:2" >> $OCOTILLO_NODEFILE
-   echo "node1:2" >> $OCOTILLO_NODEFILE
-
-   cat > ./filterscript <<EOF
-cd `pwd`
-./filter
-EOF
-
-   mpirun -np 4 -nolocal -machinefile $OCOTILLO_NODEFILE ./filterscript
-
-else
-
-   # interactive - assume you are using 'lam-mpi' and that you have
-   # already run 'lamboot' once to start the lam server.
-   # alias submit 'mpirun \!*'
-
-   mpirun -np 4 ./filter
-
-endif
-
-exit 0
-
-# <next few lines under version control, do not edit>
-# $URL$
-# $Revision$
-# $Date$
-


More information about the Dart-dev mailing list