[Dart-dev] [3908] DART/trunk/models/lorenz_96: Moving a shell script to the proper directory.

nancy at ucar.edu nancy at ucar.edu
Thu Jun 4 15:53:17 MDT 2009


An HTML attachment was scrubbed...
URL: http://mailman.ucar.edu/pipermail/dart-dev/attachments/20090604/0a00935a/attachment.html 
-------------- next part --------------
Copied: DART/trunk/models/lorenz_96/shell_scripts/runme_filter (from rev 3907, DART/trunk/models/lorenz_96/work/runme_filter)
===================================================================
--- DART/trunk/models/lorenz_96/shell_scripts/runme_filter	                        (rev 0)
+++ DART/trunk/models/lorenz_96/shell_scripts/runme_filter	2009-06-04 21:53:17 UTC (rev 3908)
@@ -0,0 +1,115 @@
+#!/bin/tcsh 
+#
+# Data Assimilation Research Testbed -- DART
+# Copyright 2004-2007, Data Assimilation Research Section
+# University Corporation for Atmospheric Research
+# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
+#
+# <next few lines under version control, do not edit>
+# $URL$
+# $Id$
+# $Revision$
+# $Date$
+#
+# start at a generic run script for the mpi version
+#
+#=============================================================================
+# This block of directives constitutes the preamble for the LSF queuing system
+# LSF is used on the IBM   Linux cluster 'lightning'
+# LSF is used on the IMAGe Linux cluster 'coral'
+# LSF is used on the IBM   'bluevista'
+# The queues on lightning and bluevista are supposed to be similar.
+#
+# the normal way to submit to the queue is:    bsub < runme_filter
+#
+# an explanation of the most common directives follows:
+# -J Job name (master script job.csh presumes filter_server.xxxx.log)
+# -o STDOUT filename
+# -e STDERR filename
+# -P      account
+# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
+# -n number of processors  (really)
+# -W hh:mm  execution time (must be specified on some hosts)
+##=============================================================================
+#BSUB -J filter
+#BSUB -o filter.%J.log
+#BSUB -q regular
+#BSUB -n 4
+#BSUB -W 00:10
+#
+#
+##=============================================================================
+## This block of directives constitutes the preamble for the PBS queuing system
+## PBS is used on the CGD   Linux cluster 'bangkok'
+## PBS is used on the CGD   Linux cluster 'calgary'
+## 
+## the normal way to submit to the queue is:    qsub runme_filter
+## 
+## an explanation of the most common directives follows:
+## -N     Job name
+## -r n   Declare job non-rerunable
+## -e <arg>  filename for standard error
+## -o <arg>  filename for standard out 
+## -q <arg>   Queue name (small, medium, long, verylong)
+## -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
+##                     and calgary, there is no way to 'share' the processors
+##                     on the node with another job, so you might as well use
+##                     them both.  (ppn == Processors Per Node)
+##=============================================================================
+#PBS -N filter
+#PBS -r n
+#PBS -e filter.err
+#PBS -o filter.log
+#PBS -q medium
+#PBS -l nodes=4:ppn=2
+
+# A common strategy for the beginning is to check for the existence of
+# some variables that get set by the different queuing mechanisms.
+# This way, we know which queuing mechanism we are working with,
+# and can set 'queue-independent' variables for use for the remainder
+# of the script.
+
+if ($?LS_SUBCWD) then
+
+   # LSF has a list of processors already in a variable (LSB_HOSTS)
+   #  alias submit 'bsub < \!*'
+
+   mpirun.lsf ./filter
+   
+
+else if ($?PBS_O_WORKDIR) then
+
+   # PBS has a list of processors in a file whose name is (PBS_NODEFILE)
+   #  alias submit 'qsub \!*'
+
+   mpirun ./filter
+
+else if ($?OCOTILLO_NODEFILE) then
+
+   # ocotillo is a 'special case'. It is the only cluster I know of with
+   # no queueing system.  You must generate a list of processors in a
+   # file whose name is given to the mpirun command, and the executable
+   # needs to be wrapped with a script that cds to the right directory.
+   setenv OCOTILLO_NODEFILE  ~/nodelist
+   echo "node7:2" > $OCOTILLO_NODEFILE
+   echo "node5:2" >> $OCOTILLO_NODEFILE
+   echo "node3:2" >> $OCOTILLO_NODEFILE
+   echo "node1:2" >> $OCOTILLO_NODEFILE
+
+   cat > ./filterscript <<EOF
+cd `pwd`
+./filter
+EOF
+
+   mpirun -np 4 -nolocal -machinefile $OCOTILLO_NODEFILE ./filterscript
+
+else
+
+   # interactive - assume you are using 'lam-mpi' and that you have
+   # already run 'lamboot' once to start the lam server.
+   # alias submit 'mpirun \!*'
+
+   mpirun -np 4 ./filter
+
+endif
+

Deleted: DART/trunk/models/lorenz_96/work/runme_filter
===================================================================
--- DART/trunk/models/lorenz_96/work/runme_filter	2009-06-04 21:52:14 UTC (rev 3907)
+++ DART/trunk/models/lorenz_96/work/runme_filter	2009-06-04 21:53:17 UTC (rev 3908)
@@ -1,115 +0,0 @@
-#!/bin/tcsh 
-#
-# Data Assimilation Research Testbed -- DART
-# Copyright 2004-2007, Data Assimilation Research Section
-# University Corporation for Atmospheric Research
-# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
-#
-# <next few lines under version control, do not edit>
-# $URL$
-# $Id$
-# $Revision$
-# $Date$
-#
-# start at a generic run script for the mpi version
-#
-#=============================================================================
-# This block of directives constitutes the preamble for the LSF queuing system
-# LSF is used on the IBM   Linux cluster 'lightning'
-# LSF is used on the IMAGe Linux cluster 'coral'
-# LSF is used on the IBM   'bluevista'
-# The queues on lightning and bluevista are supposed to be similar.
-#
-# the normal way to submit to the queue is:    bsub < runme_filter
-#
-# an explanation of the most common directives follows:
-# -J Job name (master script job.csh presumes filter_server.xxxx.log)
-# -o STDOUT filename
-# -e STDERR filename
-# -P      account
-# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
-# -n number of processors  (really)
-# -W hh:mm  execution time (must be specified on some hosts)
-##=============================================================================
-#BSUB -J filter
-#BSUB -o filter.%J.log
-#BSUB -q regular
-#BSUB -n 4
-#BSUB -W 00:10
-#
-#
-##=============================================================================
-## This block of directives constitutes the preamble for the PBS queuing system
-## PBS is used on the CGD   Linux cluster 'bangkok'
-## PBS is used on the CGD   Linux cluster 'calgary'
-## 
-## the normal way to submit to the queue is:    qsub runme_filter
-## 
-## an explanation of the most common directives follows:
-## -N     Job name
-## -r n   Declare job non-rerunable
-## -e <arg>  filename for standard error
-## -o <arg>  filename for standard out 
-## -q <arg>   Queue name (small, medium, long, verylong)
-## -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
-##                     and calgary, there is no way to 'share' the processors
-##                     on the node with another job, so you might as well use
-##                     them both.  (ppn == Processors Per Node)
-##=============================================================================
-#PBS -N filter
-#PBS -r n
-#PBS -e filter.err
-#PBS -o filter.log
-#PBS -q medium
-#PBS -l nodes=4:ppn=2
-
-# A common strategy for the beginning is to check for the existence of
-# some variables that get set by the different queuing mechanisms.
-# This way, we know which queuing mechanism we are working with,
-# and can set 'queue-independent' variables for use for the remainder
-# of the script.
-
-if ($?LS_SUBCWD) then
-
-   # LSF has a list of processors already in a variable (LSB_HOSTS)
-   #  alias submit 'bsub < \!*'
-
-   mpirun.lsf ./filter
-   
-
-else if ($?PBS_O_WORKDIR) then
-
-   # PBS has a list of processors in a file whose name is (PBS_NODEFILE)
-   #  alias submit 'qsub \!*'
-
-   mpirun ./filter
-
-else if ($?OCOTILLO_NODEFILE) then
-
-   # ocotillo is a 'special case'. It is the only cluster I know of with
-   # no queueing system.  You must generate a list of processors in a
-   # file whose name is given to the mpirun command, and the executable
-   # needs to be wrapped with a script that cds to the right directory.
-   setenv OCOTILLO_NODEFILE  ~/nodelist
-   echo "node7:2" > $OCOTILLO_NODEFILE
-   echo "node5:2" >> $OCOTILLO_NODEFILE
-   echo "node3:2" >> $OCOTILLO_NODEFILE
-   echo "node1:2" >> $OCOTILLO_NODEFILE
-
-   cat > ./filterscript <<EOF
-cd `pwd`
-./filter
-EOF
-
-   mpirun -np 4 -nolocal -machinefile $OCOTILLO_NODEFILE ./filterscript
-
-else
-
-   # interactive - assume you are using 'lam-mpi' and that you have
-   # already run 'lamboot' once to start the lam server.
-   # alias submit 'mpirun \!*'
-
-   mpirun -np 4 ./filter
-
-endif
-


More information about the Dart-dev mailing list