[Dart-dev] [4398] DART/trunk/models/POP: This advances the LANL/ POP 2_0_1 version of POP ... but still
nancy at ucar.edu
nancy at ucar.edu
Mon Jun 28 16:53:23 MDT 2010
Revision: 4398
Author: thoar
Date: 2010-06-28 16:53:23 -0600 (Mon, 28 Jun 2010)
Log Message:
-----------
This advances the LANL/POP 2_0_1 version of POP ... but still cannot be used
for assimilation until the POP code is modified to do a forward euler
timestep. This compiles and runs on coral (SLES10) with the
ifort (IFORT) 10.1 20090203 compiler with the following flags:
FFLAGS = -O0 -fpe0 -vec-report0 -assume byterecl
Coral is an Intel-based machine - so all binary files were little-endian.
I like to append a ".le" suffix on those files.
It was checked in the gx3v5 configuration.
For coral with the openmpi framework, it is necessary to
specify input.nml:&mpi_utilities_nml:reverse_task_layout = .true.,
For bluefire ... it must be .false. (the default).
Modified Paths:
--------------
DART/trunk/models/POP/shell_scripts/advance_model.csh
DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch
DART/trunk/models/POP/work/input.nml
Added Paths:
-----------
DART/trunk/models/POP/shell_scripts/run_filter.batch
-------------- next part --------------
Modified: DART/trunk/models/POP/shell_scripts/advance_model.csh
===================================================================
--- DART/trunk/models/POP/shell_scripts/advance_model.csh 2010-06-28 20:48:10 UTC (rev 4397)
+++ DART/trunk/models/POP/shell_scripts/advance_model.csh 2010-06-28 22:53:23 UTC (rev 4398)
@@ -103,7 +103,7 @@
cp -pv ../${RESTARTFILE} pop.r.nc || exit 2
else
- echo "Pointer file for ensemble member $ensemble_member is missing."
+ echo "ERROR: Pointer file for ensemble member $ensemble_member is missing."
echo "Looking for "`pwd`" ../rpointer.ocn.${ensemble_member}.restart"
echo "Exiting ... (pointer file not found in CENTRALDIR)"
exit 2
@@ -129,7 +129,17 @@
# filename can be predicted from the pop_in namelist information.
#----------------------------------------------------------------------
+ rm -f ocn.log.*
+
mpirun.lsf ../pop || exit 3
+
+ grep "Successful completion of POP run" ocn.log.*
+ set popstatus = $status
+ if ( $popstatus != 0 ) then
+ echo "ERROR - POP ensemble member $ensemble_member did not complete successfully"
+ echo "ERROR - POP ensemble member $ensemble_member did not complete successfully"
+ exit 3
+ endif
#----------------------------------------------------------------------
# Block 4: Convert the ocean model output to form needed by DART
@@ -138,9 +148,18 @@
ls -lrt
# POP makes a new restart file and updates the pointer file
+ # Rename the POP pointer file contents to contain the ensemble member info
+
set RESTARTFILE = `head -1 rpointer.ocn.restart`
- echo "POP member $ensemble_member made restart file $RESTARTFILE"
- ln -svf ${RESTARTFILE} pop.r.nc || exit 2
+ set NEWFILE = `echo $RESTARTFILE | sed -e "s/pop/pop.$ensemble_member/"`
+ echo "POP member $ensemble_member made restart file $NEWFILE"
+
+ mv -v $RESTARTFILE $NEWFILE
+
+ echo $NEWFILE >! rpointer.ocn.restart
+ echo "RESTART_FMT=nc" >> rpointer.ocn.restart
+
+ ln -svf ${NEWFILE} pop.r.nc || exit 4
# pop_to_dart reads the restart file after the model advance and writes
# out an updated DART 'initial conditions' file. This initial conditions
@@ -153,7 +172,7 @@
# Move the updated files back to 'centraldir'
mv -v dart.ud ../$output_file || exit 4
mv -v rpointer.ocn.restart ../rpointer.ocn.${ensemble_member}.restart || exit 4
- mv -v ${RESTARTFILE} ../${RESTARTFILE} || exit 4
+ mv -v ${NEWFILE} ../${NEWFILE} || exit 4
# bookkeeping
Added: DART/trunk/models/POP/shell_scripts/run_filter.batch
===================================================================
--- DART/trunk/models/POP/shell_scripts/run_filter.batch (rev 0)
+++ DART/trunk/models/POP/shell_scripts/run_filter.batch 2010-06-28 22:53:23 UTC (rev 4398)
@@ -0,0 +1,542 @@
+#!/bin/csh
+#
+# DART software - Copyright \xA9 2004 - 2010 UCAR. This open source software is
+# provided by UCAR, "as is", without charge, subject to all terms of use at
+# http://www.image.ucar.edu/DAReS/DART/DART_download
+#
+# $Id: $
+#
+# Script to assimilate observations using DART and the POP ocean model.
+# This presumes two directories exists that contain all the required bits
+# for POP and for DART.
+#
+#=============================================================================
+# This block of directives constitutes the preamble for the LSF queuing system
+# LSF is used on the IMAGe Linux cluster 'coral'
+# LSF is used on the IBM 'bluefire'
+#
+# the normal way to submit to the queue is: bsub < run_filter
+#
+# an explanation of the most common directives follows:
+# -J Job name (master script job.csh presumes filter_server.xxxx.log)
+# -o STDOUT filename
+# -e STDERR filename
+# -P account
+# -q queue cheapest == [standby, economy, (regular,debug), premium] == $$$$
+# -n number of processors (really)
+##=============================================================================
+#BSUB -J filter
+#BSUB -o filter.%J.log
+#BSUB -q regular
+#BSUB -n 16
+#BSUB -P 86850054
+#BSUB -W 2:00
+#BSUB -N -u ${USER}@ucar.edu
+#
+##=============================================================================
+## This block of directives constitutes the preamble for the PBS queuing system
+## PBS is used on the CGD Linux cluster 'bangkok'
+## PBS is used on the CGD Linux cluster 'calgary'
+##
+## the normal way to submit to the queue is: qsub run_filter
+##
+## an explanation of the most common directives follows:
+## -N Job name
+## -r n Declare job non-rerunable
+## -e <arg> filename for standard error
+## -o <arg> filename for standard out
+## -q <arg> Queue name (small, medium, long, verylong)
+## -l nodes=xx:ppn=2 requests BOTH processors on the node. On both bangkok
+## and calgary, there is no way to 'share' the processors
+## on the node with another job, so you might as well use
+## them both. (ppn == Processors Per Node)
+##=============================================================================
+#PBS -N filter
+#PBS -r n
+#PBS -e filter.err
+#PBS -o filter.log
+#PBS -q dedicated
+#PBS -l nodes=10:ppn=2
+
+#----------------------------------------------------------------------
+# Turns out the scripts are a lot more flexible if you don't rely on
+# the queuing-system-specific variables -- so I am converting them to
+# 'generic' names and using the generics throughout the remainder.
+#----------------------------------------------------------------------
+
+if ($?LSB_QUEUE) then
+
+ #-------------------------------------------------------------------
+ # This is used by LSF
+ #-------------------------------------------------------------------
+
+ setenv ORIGINALDIR $LS_SUBCWD
+ setenv JOBNAME $LSB_JOBNAME
+ setenv JOBID $LSB_JOBID
+ setenv MYQUEUE $LSB_QUEUE
+ setenv MYHOST $LSB_SUB_HOST
+ setenv MPI mpirun.lsf
+
+else if ($?PBS_QUEUE) then
+
+ #-------------------------------------------------------------------
+ # This is used by PBS
+ #-------------------------------------------------------------------
+
+ setenv ORIGINALDIR $PBS_O_WORKDIR
+ setenv JOBNAME $PBS_JOBNAME
+ setenv JOBID $PBS_JOBID
+ setenv MYQUEUE $PBS_QUEUE
+ setenv MYHOST $PBS_O_HOST
+ setenv MPI mpirun
+
+else
+
+ #-------------------------------------------------------------------
+ # You can run this interactively too ... in certain circumstances
+ #-------------------------------------------------------------------
+
+ setenv ORIGINALDIR `pwd`
+ setenv JOBNAME POP
+ setenv JOBID $$
+ setenv MYQUEUE Interactive
+ setenv MYHOST $HOST
+
+endif
+
+#----------------------------------------------------------------------
+# Just an echo of the job attributes
+#----------------------------------------------------------------------
+
+echo
+echo "${JOBNAME} ($JOBID) submitted from $ORIGINALDIR"
+echo "${JOBNAME} ($JOBID) submitted from $MYHOST"
+echo "${JOBNAME} ($JOBID) running in queue $MYQUEUE"
+echo "${JOBNAME} ($JOBID) running on $HOST"
+echo "${JOBNAME} ($JOBID) started at "`date`
+echo
+
+#----------------------------------------------------------------------
+# Make a unique, (empty, clean) temporary directory.
+#----------------------------------------------------------------------
+
+setenv TMPDIR /ptmp/${user}/${JOBNAME}/job_${JOBID}
+
+mkdir -p ${TMPDIR}
+cd ${TMPDIR}
+
+set CENTRALDIR = `pwd`
+set myname = $0 # this is the name of this script
+
+# some systems don't like the -v option to any of the following
+
+set OSTYPE = `uname -s`
+switch ( ${OSTYPE} )
+ case IRIX64:
+ setenv REMOVE 'rm -rf'
+ setenv COPY 'cp -p'
+ setenv MOVE 'mv -f'
+ breaksw
+ case AIX:
+ setenv REMOVE 'rm -rf'
+ setenv COPY 'cp -p'
+ setenv MOVE 'mv -f'
+ breaksw
+ default:
+ setenv REMOVE 'rm -rvf'
+ setenv COPY 'cp -vp'
+ setenv MOVE 'mv -fv'
+ breaksw
+endsw
+
+echo "${JOBNAME} ($JOBID) CENTRALDIR == $CENTRALDIR"
+
+#-----------------------------------------------------------------------------
+# ensure namelists have desired values ...
+#-----------------------------------------------------------------------------
+# We need to run the editor in batch mode. If you have 'vim' it needs
+# one flag; if you have only the older vanilla 'vi' you need another.
+# On several systems 'vi' is a link to 'vim' and uses the newer syntax
+# so you cannot distinguish which flag will be needed based only on name.
+# First try to run 'vim' by full name and then back off to plain 'vi'
+# if it is not found. Punt if neither is found.
+#-------------------------------------------------------------------------
+set VI_EXE = `which vim`
+if ( -x "${VI_EXE}" ) then
+ setenv VI 'vim -e'
+else
+ set VI_EXE = `which vi`
+ if ( -x "${VI_EXE}" ) then
+ setenv VI 'vi -s'
+ else
+ echo ""
+ echo "Neither the vim nor the vi editor were found. This script"
+ echo "cannot continue unless it can use one of them to update"
+ echo "the test input namelist files."
+ echo ""
+ exit 2
+ endif
+endif
+
+#-----------------------------------------------------------------------------
+# Set variables containing various directory names where we will GET things
+#-----------------------------------------------------------------------------
+
+set DARTDIR = /fs/image/home/${user}/SVN/DART/models/POP
+set POPDIR = /ptmp/${user}/POP/osse
+set OBSERVATIONDIR = /ptmp/${user}/POP_OSSE/1_7_Jan2000
+
+#-----------------------------------------------------------------------------
+# Get the DART executables, scripts, and input files
+#-----------------------------------------------------------------------------
+
+# executables
+ ${COPY} ${DARTDIR}/work/filter .
+ ${COPY} ${DARTDIR}/work/wakeup_filter .
+ ${COPY} ${DARTDIR}/work/dart_to_pop .
+ ${COPY} ${DARTDIR}/work/pop_to_dart .
+ ${COPY} ${DARTDIR}/work/restart_file_tool .
+
+# shell scripts
+ ${COPY} ${DARTDIR}/shell_scripts/advance_model.csh .
+
+# data files
+ ${COPY} ${DARTDIR}/work/input.nml .
+ ${COPY} ${OBSERVATIONDIR}/obs_seq.out .
+
+#-----------------------------------------------------------------------------
+# Get the POP executable, control files, and data files.
+# trying to use the CCSM naming conventions
+#-----------------------------------------------------------------------------
+
+ ${COPY} ${POPDIR}/pop .
+ ${COPY} ${POPDIR}/pop_in.part1 .
+ ${COPY} ${POPDIR}/pop_in.part2 .
+
+ ${COPY} ${POPDIR}/gx3v5_tavg_contents .
+ ${COPY} ${POPDIR}/gx3v5_movie_contents .
+ ${COPY} ${POPDIR}/gx3v5_history_contents .
+ ${COPY} ${POPDIR}/gx3v5_transport_contents .
+
+ ${COPY} ${POPDIR}/vert_grid.gx3v5 .
+ ${COPY} ${POPDIR}/horiz_grid.gx3v5.r8ieee.le .
+ ${COPY} ${POPDIR}/topography.gx3v5.i4ieee.le .
+
+#-----------------------------------------------------------------------------
+# Determine the number of ensemble members from input.nml,
+# It may exist in more than one place - we will use the first instance.
+# Parse out the filter_nml string and use the next hunk of lines.
+# ditto for the advance command
+#-----------------------------------------------------------------------------
+
+set ENSEMBLESTRING = `grep -A 42 filter_nml input.nml | grep ens_size`
+set ADVANCESTRING = `grep -A 42 filter_nml input.nml | grep adv_ens_command`
+set ensemble_size = `echo $ENSEMBLESTRING[3] | sed -e "s#,##"`
+set ADV_CMD = `echo $ADVANCESTRING[3] | sed -e 's#,##' -e 's#"##g'`
+
+echo "The model advance command is ${ADV_CMD}"
+
+#-----------------------------------------------------------------------------
+# detect whether the model is supposed to run as an MPI job or not
+# by reading the "async = " from the &filter_nml namelist in input.nml.
+#-----------------------------------------------------------------------------
+
+set ASYNCSTRING = `grep -A 42 filter_nml input.nml | grep async`
+set ASYNC_TYPE = `echo $ASYNCSTRING[3] | sed -e 's#,##'`
+
+if ( "${ASYNC_TYPE}" == "0" || "${ASYNC_TYPE}" == "2") then
+ set parallel_model = "false"
+ echo "The model is believed to be single-threaded."
+else if ( "${ASYNC_TYPE}" == "4") then
+ set parallel_model = "true"
+ echo "The model is believed to be MPI-aware."
+else
+ echo 'ERROR - Cannot autodetect async value in the filter_nml namelist in input.nml.'
+ echo 'ERROR - hardcode the parallel_model shell variable and comment out these lines.'
+ exit -1
+ set parallel_model = "false"
+endif
+
+#-----------------------------------------------------------------------------
+# Block 1: convert N POP restart files to DART initial conditions file(s).
+# Since the initial ensemble may not all have the desired timestamp, we
+# will use restart_file_tool to use a consistent date in the header of
+# all the DART initial conditions files. At the end of this block,
+# we have DART restart files filter_ics.[1-N] that
+# came from pointer files rpointer.ocn.[1-N].restart
+#
+# DART requires that POP uses pointer files and that the POP restart files
+# are netCDF format. The experiment should be initialized such that there
+# are "ensemble_size" number of POP restart files and matching pointer files.
+# The pointer files should have the absolute path to the restart file.
+#
+# DART namelist settings appropriate/required:
+# &filter_nml: restart_in_file_name = 'filter_ics'
+# &ensemble_manager_nml: single_restart_file_in = '.false.'
+# &pop_to_dart_nml: pop_to_dart_output_file = 'dart.ud',
+#
+# &restart_file_tool_nml: <see list that follows>
+# input_file_name = "filter_restart",
+# output_file_name = "filter_updated_restart",
+# ens_size = 1,
+# single_restart_file_in = .true.,
+# single_restart_file_out = .true.,
+# overwrite_data_time = .true.,
+# overwrite_advance_time = .true.,
+# new_data_days = 144731, [1 january 2000]
+# new_data_secs = 0, [midnight]
+# input_is_model_advance_file = .false.,
+# output_is_model_advance_file = .false.,
+# gregorian_cal = .true.
+# new_advance_days = -1,
+# new_advance_secs = -1
+#-----------------------------------------------------------------------------
+
+# Gregorian 1 Jan 2000 <==> DART 145731
+
+echo ':0' >! vi_script
+echo '/restart_file_tool_nml' >> vi_script
+echo '/write_binary_restart_files' >> vi_script
+echo ':s/.false./.true./' >> vi_script
+echo '/overwrite_data_time' >> vi_script
+echo ':s/.false./.true./' >> vi_script
+echo '/new_data_days' >> vi_script
+echo ':s/-1/145731/' >> vi_script
+echo '/new_data_secs' >> vi_script
+echo ':s/-1/0/' >> vi_script
+echo ':wq' >> vi_script
+
+( ${VI} input.nml < vi_script )
+
+\rm -f vi_script
+
+cat pop_in.part1 pop_in.part2 >! pop_in
+
+set member = 1
+while ($member <= $ensemble_size)
+
+ # grap the POP pointer file and dereference it
+ # Copy the POP restart file ... we will be updating it.
+ # then link the POP restart file to the name for 'pop_to_dart'
+ ${COPY} ${POPDIR}/rpointer.ocn.$member.restart .
+ set OCN_RESTART_FILENAME = `head -1 rpointer.ocn.$member.restart`
+ ${COPY} ${POPDIR}/${OCN_RESTART_FILENAME} .
+ ln -sf ${OCN_RESTART_FILENAME} pop.r.nc
+
+# echo "Changing iyear of ${OCN_RESTART_FILENAME} to 2000"
+# ncatted -O -h -a iyear,global,o,l,2000 ${OCN_RESTART_FILENAME}
+
+ ./pop_to_dart || exit 1
+
+ ${MOVE} dart.ud filter_restart
+
+ ./restart_file_tool || exit 1
+
+ # set the filename expected by DART for the initial conditions
+ set DART_IC_FILE = `printf filter_ics.%04d $member`
+
+ ${MOVE} filter_updated_restart ${DART_IC_FILE}
+
+ @ member++
+end
+
+#-----------------------------------------------------------------------------
+# A common strategy for the beginning is to check for the existence of
+# some variables that get set by the different queuing mechanisms.
+# This way, we know which queuing mechanism we are working with,
+# and can set 'queue-independent' variables for use for the remainder
+# of the script.
+
+if ($?LSB_QUEUE || $?PBS_QUEUE) then
+
+ # Must be using LSF or PBS as the queueing system.
+ echo "Using ${MPI} for execution"
+
+ # each filter task advances the ensembles, each running on 1 proc.
+ if ( "$parallel_model" == "false" ) then
+
+ ${MPI} ./filter
+
+ else
+
+ # 1) filter runs in parallel until time to do a model advance.
+ # 2) advance_model.csh successively runs N POP instances,
+ # each using the entire processor set.
+ # 3) wakeup_filter wakes up filter so it can continue.
+
+ \rm -f model_to_filter.lock filter_to_model.lock
+ mkfifo model_to_filter.lock filter_to_model.lock
+
+ set filterhome = ~/.filter$$
+ if ( ! -e $filterhome) mkdir $filterhome
+
+ # this starts filter but also returns control back to
+ # this script immediately.
+
+ ( setenv HOME $filterhome; ${MPI} ./filter ) &
+
+ while ( -e filter_to_model.lock )
+
+ set todo=`cat < filter_to_model.lock`
+ echo "todo received, value = ${todo}"
+
+ if ( "${todo}" == "finished" ) then
+ echo "main script: filter done."
+ wait
+ break
+
+ else if ( "${todo}" == "advance" ) then
+
+ echo "calling model advance now:"
+ ./advance_model.csh 0 ${ensemble_size} filter_control00000 || exit 9
+
+ echo "restarting filter."
+ mpirun.lsf ./wakeup_filter
+
+ else
+
+ echo "main script: unexpected value received."
+ break
+
+ endif
+
+ end
+
+ echo "filter finished, removing pipes."
+ \rm -f model_to_filter.lock filter_to_model.lock
+
+ if ( -d $filterhome) rmdir $filterhome
+ endif
+
+else
+
+ # If you have a linux cluster with no queuing software, use this
+ # section. The list of computational nodes is given to the mpirun
+ # command and it assigns them as they appear in the file. In some
+ # cases it seems to be necessary to wrap the command in a small
+ # script that changes to the current directory before running.
+
+ echo "running with no queueing system"
+ echo "This is untested for POP -- ending now."
+ exit
+
+ # before running this script, do this once. the syntax is
+ # node name : how many tasks you can run on it
+ #setenv MYNODEFILE ~/nodelist
+ #echo "node7:2" >! $MYNODEFILE
+ #echo "node5:2" >> $MYNODEFILE
+ #echo "node3:2" >> $MYNODEFILE
+ #echo "node1:2" >> $MYNODEFILE
+
+# for compas
+ setenv NUM_PROCS `cat nodelist-pgi | wc -l`
+ set MPIRUN = /opt/mpich/myrinet/pgi/bin/mpirun
+ set MPICMD = $MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi
+
+# for atlas-pgi
+ setenv NUM_PROCS `cat nodelist-pgi | wc -l`
+ set MPIRUN = /share/apps/mpich1/pgi/bin/mpirun
+ set MPICMD = $MPIRUN -np $NUM_PROCS -nolocal -machinefile nodelist-pgi
+
+# for atlas-gfortran
+ set MPIRUN = /share/apps/openmpi/gfortran/bin/mpirun
+ set MPICMD = $MPIRUN --hostfile nodelist-gfortran --mca mtl mx --mca pml cm -np 72
+
+ echo "MPICMD = ${MPICMD}"
+
+ # filter runs in parallel until time to do a model advance,
+ # and then this script starts up the mitgcmuv jobs, each one
+ # running in parallel. then it runs wakeup_filter to wake
+ # up filter so it can continue.
+
+ \rm -f model_to_filter.lock filter_to_model.lock
+ mkfifo model_to_filter.lock filter_to_model.lock
+
+ set filterhome = ~/.filter$$
+ if ( ! -e $filterhome) mkdir $filterhome
+
+ # this starts filter but also returns control back to
+ # this script immediately.
+
+ (setenv HOME $filterhome; ${MPICMD} ./filter) &
+
+ while ( -e filter_to_model.lock )
+
+ set todo=`cat < filter_to_model.lock`
+ echo "todo received, value = ${todo}"
+
+ if ( "${todo}" == "finished" ) then
+ echo "main script: filter done."
+ wait
+ break
+
+ else if ( "${todo}" == "advance" ) then
+
+ # the second number below must match the number
+ # of ensembles. Also, in input.nml, the advance model
+ # command must have -np N with N equal to the number
+ # of processors this job is using.
+
+ echo "calling model advance now:"
+ ./advance_model.csh 0 ${ensemble_size} filter_control00000 || exit 9
+
+ echo "restarting filter."
+ ${MPICMD} ./wakeup_filter
+
+ else
+
+ echo "main script: unexpected value received."
+ break
+
+ endif
+
+ end
+
+ echo "filter finished, removing pipes."
+ \rm -f model_to_filter.lock filter_to_model.lock
+
+ if ( -d $filterhome) rmdir $filterhome
+
+endif
+
+#-----------------------------------------------------------------------------
+# Move the output to storage after filter completes.
+# At this point, all the restart,diagnostic files are in the CENTRALDIR
+# and need to be moved to the 'experiment permanent' directory.
+# We have had problems with some, but not all, files being moved
+# correctly, so we are adding bulletproofing to check to ensure the filesystem
+# has completed writing the files, etc. Sometimes we get here before
+# all the files have finished being written.
+#-----------------------------------------------------------------------------
+
+echo "Listing contents of CENTRALDIR before archiving"
+ls -l
+
+exit
+
+${MOVE} *.data *.meta ${experiment}/POP
+${MOVE} data data.cal ${experiment}/POP
+${MOVE} STD* ${experiment}/POP
+
+${MOVE} filter_restart* ${experiment}/DART
+${MOVE} assim_model_state_ud[1-9]* ${experiment}/DART
+${MOVE} assim_model_state_ic[1-9]* ${experiment}/DART
+${MOVE} Posterior_Diag.nc ${experiment}/DART
+${MOVE} Prior_Diag.nc ${experiment}/DART
+${MOVE} obs_seq.final ${experiment}/DART
+${MOVE} dart_log.out ${experiment}/DART
+
+# Good style dictates that you save the scripts so you can see what worked.
+
+${COPY} input.nml ${experiment}/DART
+${COPY} *.csh ${experiment}/DART
+${COPY} $myname ${experiment}/DART
+ls -lrt
+
+exit $status
+
+# <next few lines under version control, do not edit>
+# $URL:$
+# $Revision: $
+# $Date: $
+
Property changes on: DART/trunk/models/POP/shell_scripts/run_filter.batch
___________________________________________________________________
Added: svn:executable
+ *
Modified: DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch
===================================================================
--- DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch 2010-06-28 20:48:10 UTC (rev 4397)
+++ DART/trunk/models/POP/shell_scripts/run_perfect_model_obs.batch 2010-06-28 22:53:23 UTC (rev 4398)
@@ -78,7 +78,7 @@
echo "${JOBNAME} ($JOBID) submitted from $MYHOST"
echo "${JOBNAME} ($JOBID) running in queue $MYQUEUE"
echo "${JOBNAME} ($JOBID) running on $host"
-echo "${JOBNAME} ($JOBID) started at "`date`
+echo "${JOBNAME} ($JOBID) started at "`date`
echo
#----------------------------------------------------------------------
Modified: DART/trunk/models/POP/work/input.nml
===================================================================
--- DART/trunk/models/POP/work/input.nml 2010-06-28 20:48:10 UTC (rev 4397)
+++ DART/trunk/models/POP/work/input.nml 2010-06-28 22:53:23 UTC (rev 4398)
@@ -179,6 +179,10 @@
nmlfilename = 'dart_log.nml'
/
+&mpi_utilities_nml
+ reverse_task_layout = .false.,
+ /
+
# Gregorian: 12 Z 1 January 1996 <==> DART: 144270 days 43200 seconds
# Gregorian: 12 Z 1 January 2000 <==> DART: 145731 days 43200 seconds
More information about the Dart-dev
mailing list