[Dart-dev] [7733] DART/trunk/models/cam: remove scripts no longer needed; those possibly still useful

nancy at ucar.edu nancy at ucar.edu
Mon Mar 16 11:34:58 MDT 2015


Revision: 7733
Author:   nancy
Date:     2015-03-16 11:34:58 -0600 (Mon, 16 Mar 2015)
Log Message:
-----------
remove scripts no longer needed; those possibly still useful
moved to the deprecated directory.

Added Paths:
-----------
    DART/trunk/models/cam/deprecated/input.nml.diag.template
    DART/trunk/models/cam/deprecated/monthlydiags.csh
    DART/trunk/models/cam/doc/map_karspeck_pop_batch

Removed Paths:
-------------
    DART/trunk/models/cam/shell_scripts/advance_model.csh
    DART/trunk/models/cam/shell_scripts/advance_model.retry.csh
    DART/trunk/models/cam/shell_scripts/check_model.csh
    DART/trunk/models/cam/shell_scripts/input.nml.diag.template
    DART/trunk/models/cam/shell_scripts/job.simple.csh
    DART/trunk/models/cam/shell_scripts/monthlydiags.csh
    DART/trunk/models/cam/shell_scripts/run-cam.csh
    DART/trunk/models/cam/shell_scripts/st_archive_hybrid.sh

-------------- next part --------------
Copied: DART/trunk/models/cam/deprecated/input.nml.diag.template (from rev 7729, DART/trunk/models/cam/shell_scripts/input.nml.diag.template)
===================================================================
--- DART/trunk/models/cam/deprecated/input.nml.diag.template	                        (rev 0)
+++ DART/trunk/models/cam/deprecated/input.nml.diag.template	2015-03-16 17:34:58 UTC (rev 7733)
@@ -0,0 +1,115 @@
+# The times in the namelist for the obs_diag program are vectors
+# that follow the following sequence:
+# year   month   day   hour   minute   second
+# max_num_bins can be used to specify a fixed number of bins,
+# in which case last_bin_center should be safely in the future.
+#
+# Acceptable latitudes range from  [-90,  90]
+# Acceptable longitudes range from [  0, 360]
+
+&obs_diag_nml
+   obs_sequence_name = 'obs_NNNN/obs_seq.final',
+   first_bin_center =  20YY,MM, 1, 6, 0, 0 ,
+   last_bin_center  =  20YY,MM,DD, 0, 0, 0 ,
+   bin_separation   =     0, 0, 0, 6, 0, 0 ,
+   bin_width        =     0, 0, 0, 6, 0, 0 ,
+   time_to_skip     =     0, 0, 0, 0, 0, 0 ,
+   max_num_bins     = 1000,
+   trusted_obs      = 'null',
+   Nregions   = 4,
+   lonlim1    =   0.0,   0.0,   0.0, 235.0,
+   lonlim2    = 360.0, 360.0, 360.0, 295.0,
+   latlim1    =  20.0, -80.0, -20.0,  25.0,
+   latlim2    =  80.0, -20.0,  20.0,  55.0,
+   reg_names  = 'Northern Hemisphere', 'Southern Hemisphere', 'Tropics', 'North America',
+   print_mismatched_locs = .false.,
+   create_rank_histogram = .true.,
+   outliers_in_histogram = .true.,
+   use_zero_error_obs    = .false.,
+   verbose               = .false.
+   /
+
+&ensemble_manager_nml
+   single_restart_file_in = .false.,
+   single_restart_file_out = .false.,
+   perturbation_amplitude  = 0.2  /
+
+&assim_tools_nml
+   filter_kind                     = 1,
+   cutoff                          = 0.2,
+   sort_obs_inc                    = .false.,
+   spread_restoration              = .false.,
+   sampling_error_correction       = .false.,
+   print_every_nth_obs             = 10000,
+   adaptive_localization_threshold = -1/
+
+&cov_cutoff_nml
+   select_localization = 1  /
+
+&reg_factor_nml
+   select_regression = 1,
+   input_reg_file = "time_mean_reg" 
+   save_reg_diagnostics = .false.,
+   reg_diagnostics_file = 'reg_diagnostics' /
+
+&obs_sequence_nml
+   write_binary_obs_sequence = .true.  /
+
+&obs_kind_nml
+  /
+
+&assim_model_nml
+   write_binary_restart_files = .true. /
+
+&model_nml
+   output_state_vector      = .false.,
+   model_version            = '4.0.1',
+   model_config_file        = 'caminput.nc',
+   state_num_0d             = 0,
+   state_num_1d             = 0,
+   state_num_2d             = 1,
+   state_num_3d             = 6,
+   state_names_2d = 'PS      '
+   state_names_3d = 'T       ','US      ','VS      ','Q       ','CLDLIQ  ','CLDICE  '
+   which_vert_1d            = 0,
+   which_vert_2d            = -1,
+   which_vert_3d            = 6*1,
+   pert_names         = '        ',
+   pert_sd           = -888888.0d0,
+   pert_base_vals    = -888888.0d0,
+   highest_obs_pressure_mb   = 100.0,
+   highest_state_pressure_mb = 150.0,
+   max_obs_lat_degree        = 89.0,
+   Time_step_seconds = 21600,
+   Time_step_days = 0    /
+
+! pert_sd < 0. signals pert_base_vals to be used as the values 
+!   for each ensemble member (for 1 field), instead of the value for each field.
+!   DART special value -888888.0d0 can be used for that.
+! pert_sd > 0 allows each point of the pert_names fields of each ens member 
+!   to be randomly perturbed with a standard deviation of pert_sd.  
+!   Used by filter's call to pert_model_state.
+! pert_base_vals /= -888888.0d0 means that the values of the fields in pert_names
+!   sill be reset to the values in pert_base_vals.  
+
+
+&location_nml
+   horiz_dist_only = .false.,
+   vert_normalization_pressure = 100000.0, 
+   vert_normalization_height = 10000.0,
+   vert_normalization_level = 20.0,
+   approximate_distance = .true.,
+   nlon = 141, 
+   nlat = 72,
+   output_box_info  = .false.  /
+
+&utilities_nml
+   TERMLEVEL = 1,
+   logfilename = 'dart_log.out'  /
+
+&mpi_utilities_nml
+   /
+
+
+&obs_def_gps_nml
+ /

Copied: DART/trunk/models/cam/deprecated/monthlydiags.csh (from rev 7729, DART/trunk/models/cam/shell_scripts/monthlydiags.csh)
===================================================================
--- DART/trunk/models/cam/deprecated/monthlydiags.csh	                        (rev 0)
+++ DART/trunk/models/cam/deprecated/monthlydiags.csh	2015-03-16 17:34:58 UTC (rev 7733)
@@ -0,0 +1,104 @@
+#!/bin/csh 
+#
+# DART software - Copyright 2004 - 2013 UCAR. This open source software is
+# provided by UCAR, "as is", without charge, subject to all terms of use at
+# http://www.image.ucar.edu/DAReS/DART/DART_download
+#
+# DART $Id$
+#
+# run obs_diag on the mainframe a month at a time.  produce
+# obs_diag_output.nc file(s) which need to be copied to 
+# another machine which has matlab to finish generating 
+# the plots that are normally looked at.
+
+# this program is expected to be started from the directory 
+# which has a collection of obs_NNNN dirs, each of which 
+# contains an obs_seq.final file.
+
+#-----------------------------------------------------------------------------
+# run in share queue on bluefire.
+#-----------------------------------------------------------------------------
+
+#BSUB -J obs_diags
+#BSUB -o obs_diags.%J.log
+#BSUB -P xxxxxxxx
+#BSUB -q share
+#BSUB -W 2:00
+#BSUB -n 1
+
+
+# general settings - check these to see they are appropriate
+
+# where to find the DART obs_diag program, and a template for
+# the input.nml file.   obs_diag should exist in this dir.
+set DIAG = $DART/models/cam/work
+
+
+# time period our standard experiments have covered.  if you
+# move to another time, you'll need to regenerate an appropriate
+# table.  you can set the start and stop month number below.
+# remember month 1 is the spinup month, so generally our
+# experiments run from month 2 to 13.
+
+set startmon = 2
+set endmon   = 3
+
+
+# tables intended to ease the calendar computations.
+# these are very date specific and MUST be changed for any other
+# time spans that are run.  day 1 = aug 1, 2006
+# should go one month past last day of simulation
+set months = (aug06 sep06 oct06 nov06 dec06 jan07 feb07 mar07 \
+              apr07 may07 jun07 jul07 aug07 sep07)
+
+#               1  2  3   4   5   6   7   8   9  10  11  12  13  14
+set startd  = ( 1 32 62  93 123 154 185 213 244 274 305 335 366 397)
+set endd    = (31 61 92 122 153 184 212 243 273 304 334 365 396 426)
+set year    = (06 06 06  06  06  07  07  07  07  07  07  07  07  07)
+set calmon  = ( 8  9 10  11  12   1   2   3   4   5   6   7   8   9)
+set daysmon = (31 30 31  30  31  31  28  31  30  31  30  31  31  30)
+
+@ monnum = $startmon
+while ( $monnum <= $endmon )
+  set diagname = $months[$monnum]
+
+  # assume there is an input.nml.diag.template here with the right 
+  # patterns for sed to search and replace below.
+  # should error check for this file here.
+
+  set sdy = `printf '%04d' $startd[$monnum]`
+  set smn = $calmon[$monnum]
+  set mdy = $daysmon[$monnum]
+  set syr = $year[$monnum]
+   
+  sed -e "s/NNNN/$sdy/" \
+      -e "s/YY/$syr/"    \
+      -e "s/MM/$smn/"    \
+      -e "s/DD/$mdy/"    \
+      input.nml.diag.template >! input.nml
+
+  echo updated input.nml here
+
+  echo running obs_diag here
+  $DIAG/obs_diag >&! obs_diag.out
+
+  set nextdir = ${startd[$monnum]}-${endd[$monnum]}s0_def_reg
+
+  mkdir -p $nextdir
+
+  touch $diagname
+  mv -f $diagname obs_diag_output.nc input.nml obs_diag.out \
+        LargeInnov.txt $nextdir
+
+  @ monnum++
+end
+
+echo all done
+
+exit 0
+
+# <next few lines under version control, do not edit>
+# $URL$
+# $Revision$
+# $Date$
+

Added: DART/trunk/models/cam/doc/map_karspeck_pop_batch
===================================================================
--- DART/trunk/models/cam/doc/map_karspeck_pop_batch	                        (rev 0)
+++ DART/trunk/models/cam/doc/map_karspeck_pop_batch	2015-03-16 17:34:58 UTC (rev 7733)
@@ -0,0 +1,35 @@
+Alicia developed these scripts to avoid having to reenter the queue after every assimilation,
+and to remove the st_archive.sh from the batch runs, run it single threaded instead, and 
+eliminate/reduce the copies that slow it down so much.
+The originals are in /glade/p/work/aliciak/cases/cesm_6hr_1970b/
+
+Copy these scripts into $CASE.
+
+Copy $CASE.run  into $CASE.run_batch       
+   modify wall clock to accomodate N forecasts+assims
+   change
+      #   $BATCHSUBMIT ./$CASE.run
+          ./$CASE.run_batch
+
+future; make a DART script update pre_run.csh and post_run.csh to have the correct 
+   'cd $CASE' 
+   account #
+   job name
+   RESUBMIT #
+
+-> submit_dependent_jobs.csh
+      has a loop over the number of batch jobs (each has N advances+assims), 
+      which can be run before the scratch space is filled, and [sl]t_archive must be run.
+   -> pre_run.csh
+      -> set RESUBMIT = N (she uses 10) in env_run.xml   
+         choose number based on how many can fit in a single wall clock limit,
+         or some other useful criterion.
+      -> call $CASE.run_batch
+         -> calls the next $CASE.run_batch, NOT as a batch job.
+            ...-> Repeat until N is exhausted
+         Unwind all of the .run_batches, each of which is waiting for its child to finish.
+
+   -> post_run.csh
+      -> archive_select_restarts.csh
+      -> archive_history_files.csh
+      -> lt_archive.sh -m copy_dirs_hsi

Deleted: DART/trunk/models/cam/shell_scripts/advance_model.csh
===================================================================
--- DART/trunk/models/cam/shell_scripts/advance_model.csh	2015-03-13 22:53:44 UTC (rev 7732)
+++ DART/trunk/models/cam/shell_scripts/advance_model.csh	2015-03-16 17:34:58 UTC (rev 7733)
@@ -1,214 +0,0 @@
-#!/bin/csh
-#
-# DART software - Copyright 2004 - 2013 UCAR. This open source software is
-# provided by UCAR, "as is", without charge, subject to all terms of use at
-# http://www.image.ucar.edu/DAReS/DART/DART_download
-#
-# DART $Id$
-#
-# Script to advance one ensemble member one filter "time step"
-# when the model advance is executed as a separate process.
-# Called by the filter executable (for async=2 or 4)
-# Calls run-cam.csh, the CAM execution script.
-# Calls translation program to translate time and model state.
-# Runs on one of the compute nodes allotted to the filter executable.
-#
-# Arguments are 
-# arg#1  the process number of caller
-# arg#2  the number of state copies belonging to that process
-# arg#3  the name of the filter_control_file for that process
-
-set process = $1
-set num_states = $2
-set control_file = $3
-
-# Get unique name for temporary working directory for this process's stuff
-set temp_dir = 'advance_temp'${process}
-
-# args to previous version of this script
-set     myname = $0
-set CENTRALDIR = `pwd`
-
-# Create a clean temporary directory and go there
-\rm -rf  $temp_dir
-mkdir -p $temp_dir
-cd       $temp_dir
-
-# People have the craziest aliases. These prevent the obsessive-compulsive
-# from causing themselves no end of angst.
-if ( ! $?REMOVE ) then
-  set REMOVE = 'rm -rf'
-endif
-if ( ! $?COPY ) then
-  set COPY = 'cp -fp'
-endif
-if ( ! $?MOVE ) then
-  set MOVE = 'mv -f'
-endif
-if ( ! $?LINK ) then
-  set LINK = 'ln -fs'
-endif
-
-echo "advance_model.csh args = $1 $2 $3"                    >  cam_out_temp
-echo "CENTRALDIR is ${CENTRALDIR}"                          >> cam_out_temp
-echo "temp_dir is $temp_dir"                                >> cam_out_temp
-
-# Get information about this experiment from file "casemodel", 
-# created by the main controlling script (job.csh)
-
-# set $case = the case and 
-# $model = the directory name (in the CAM source tree) 
-# where CAM executable will be found. 
-# set locations of the CAM and CLM input files
-set case        = `head -1 ${CENTRALDIR}/casemodel | tail -1`
-set model       = `head -2 ${CENTRALDIR}/casemodel | tail -1`
-set cam_init    = `head -3 ${CENTRALDIR}/casemodel | tail -1`
-set clm_init    = `head -4 ${CENTRALDIR}/casemodel | tail -1`
-set list        = `head -5 ${CENTRALDIR}/casemodel | tail -1`
-set ice_init    = $list[1]
-
-# output diagnostic information to the same file as the CAM list-directed output
-echo "case $case model $model"    >> cam_out_temp
-echo "cam init is $cam_init"      >> cam_out_temp
-echo "clm init is $clm_init"      >> cam_out_temp
-echo "ice init is $ice_init"      >> cam_out_temp
-
-# Loop through each ensemble this task is responsible for advancing.
-set ensemble_number_line = 1
-set input_file_line      = 2
-set output_file_line     = 3
-set state_copy = 1
-while($state_copy <= $num_states)
-
-   # loop through the control file, extracting lines in groups of 3.
-   set ensemble_number = `head -$ensemble_number_line ../$control_file | tail -1`
-   set input_file      = `head -$input_file_line      ../$control_file | tail -1`
-   set output_file     = `head -$output_file_line     ../$control_file | tail -1`
-
-   # the previous script used element instead of ensemble_number.  make them
-   # the same for now.
-   set element = $ensemble_number
-   echo "starting ${myname} for ens member $element at "`date` >> cam_out_temp
-
-   # get model state initial conditions for this ensemble member
-   ${LINK} ${CENTRALDIR}/$input_file dart_restart
-
-   # get filter namelists for use by cam
-   ${COPY} ${CENTRALDIR}/input.nml input.nml
-
-   # this just creates a file that helps you figure out which member is
-   # being advanced in this directory. FYI only, you don't need it.
-   echo $element >! element
-   cp element element$element
-
-   echo "ls $temp_dir for element $element" >> cam_out_temp
-   ls -lRt                                  >> cam_out_temp
-    
-   # Need a base CAM initial file into which to copy state vector from filter.
-   # c[al]minput_$element also carry along CAM/CLM fields which are not updated
-   #  by the filter (not part of the filter model state).
-   # Look for c[al]minput.nc resulting from the previous advance of this ensemble
-   #  member from within the same day/obs_seq.out time span (in CENTRALDIR)
-   # When starting an experiment which has no spun up set of members, you
-   #  should have already copied the single CAM initial file (e.g. campinput_0.nc)
-   #  from the CAM build directory into the CENTRALDIR and made N copies of it.
-   
-   if (-e     ${CENTRALDIR}/caminput_${element}.nc) then
-      ${COPY} ${CENTRALDIR}/caminput_${element}.nc caminput.nc
-      echo "caminput comes from ${CENTRALDIR}/caminput_${element}.nc" >> cam_out_temp
-   else
-      echo ERROR - need $CENTRALDIR/caminput_${element}.nc to exist
-      echo ERROR - need $CENTRALDIR/caminput_${element}.nc to exist >> cam_out_temp
-      exit -${element}
-   endif
-   
-   if ( -e     ${CENTRALDIR}/clminput_${element}.nc) then
-       ${COPY} ${CENTRALDIR}/clminput_${element}.nc clminput.nc
-   else
-      echo ERROR - need $CENTRALDIR/clminput_${element}.nc to exist
-      echo ERROR - need $CENTRALDIR/clminput_${element}.nc to exist >> cam_out_temp
-      exit -${element}
-   endif
-   
-   if ( -e     ${CENTRALDIR}/iceinput_${element}.nc) then
-       ${COPY} ${CENTRALDIR}/iceinput_${element}.nc iceinput.nc
-   else
-      echo ERROR - need $CENTRALDIR/iceinput_${element}.nc to exist
-      echo ERROR - need $CENTRALDIR/iceinput_${element}.nc to exist >> cam_out_temp
-      exit -${element}
-      # or if no ice restart file available; start it with ice_in = 'default' 
-      # via existence of iceinput in run-cam.csh
-   endif
-   
-   # topography infomation
-   ${LINK} ${CENTRALDIR}/cam_phis.nc .
-
-   # translate DART state vector into a CAM caminput.nc file, and create an
-   # ascii 'times' file, which will be used to set the namelist for cam to tell
-   # it how far to advance the model.
-   if (-e dart_restart && -e ${CENTRALDIR}/dart_to_cam) then
-      echo ' '                                           >> cam_out_temp
-      echo 'advance_model: executing dart_to_cam '`date` >> cam_out_temp
-      ${CENTRALDIR}/dart_to_cam                          >> cam_out_temp
-      ls -lt                                             >> cam_out_temp
-      ${COPY} times ${CENTRALDIR}
-   else
-      echo "ERROR: either dart_restart file for $element or dart_to_cam not available" >> cam_out_temp
-      exit -${element}
-   endif
-   
-   # advance cam 
-   echo executing: ${CENTRALDIR}/run-cam.csh ${case}-$element $model ${CENTRALDIR}  >> cam_out_temp
-   ${CENTRALDIR}/run-cam.csh ${case}-$element $model ${CENTRALDIR}  >>& cam_out_temp
-   
-   grep 'END OF MODEL RUN' cam_out_temp > /dev/null
-   if ($status == 0) then
-      # Extract the new state vector information from the new caminput.nc and
-      # put it in '$output_file' (time followed by state)
-      echo ' '                           >> cam_out_temp
-      echo 'Executing cam_to_dart'       >> cam_out_temp
-      ${CENTRALDIR}/cam_to_dart          >> cam_out_temp
-   
-      # Move updated state vector and new CAM/CLM initial files back to experiment
-      # directory for use by filter and the next advance.
-
-      ${MOVE} dart_ics        ${CENTRALDIR}/$output_file
-      ${MOVE} namelist        ${CENTRALDIR}
-      ${MOVE} caminput.nc     ${CENTRALDIR}/caminput_${element}.nc
-      ${MOVE} clminput.nc     ${CENTRALDIR}/clminput_${element}.nc
-      ${MOVE} iceinput.nc     ${CENTRALDIR}/iceinput_${element}.nc
-   
-      echo "finished ${myname} for ens member $element at "`date` >> cam_out_temp
-      ${COPY} cam_out_temp ${CENTRALDIR}/H${hour}/cam_out_temp$element
-      ${MOVE} cam_out_temp ${CENTRALDIR}/cam_out_temp$element
-   else
-      echo "WARNING - CAM $element stopped abnormally"
-      echo "WARNING - CAM $element stopped abnormally" >> cam_out_temp
-      echo "=========================================" >> cam_out_temp
-      exit -${element}
-   endif
-end
-
-   # if this process needs to advance more than one model, read the next set of
-   # filenames and ensemble number at the top of this loop.
-
-   @ state_copy++
-   @ ensemble_number_line = $ensemble_number_line + 3
-   @ input_file_line      = $input_file_line + 3
-   @ output_file_line     = $output_file_line + 3
-end
-
-cd ${CENTRALDIR}
-
-${REMOVE} $temp_dir/*
-
-# Remove the filter_control file to signal completion
-\rm -rf $control_file
-
-exit 0
-
-# <next few lines under version control, do not edit>
-# $URL$
-# $Revision$
-# $Date$
-

Deleted: DART/trunk/models/cam/shell_scripts/advance_model.retry.csh
===================================================================
--- DART/trunk/models/cam/shell_scripts/advance_model.retry.csh	2015-03-13 22:53:44 UTC (rev 7732)
+++ DART/trunk/models/cam/shell_scripts/advance_model.retry.csh	2015-03-16 17:34:58 UTC (rev 7733)
@@ -1,285 +0,0 @@
-#!/bin/csh
-#
-# DART software - Copyright 2004 - 2013 UCAR. This open source software is
-# provided by UCAR, "as is", without charge, subject to all terms of use at
-# http://www.image.ucar.edu/DAReS/DART/DART_download
-#
-# DART $Id$
-#
-#----------------------------------------------------------------------
-# advance_model.csh
-#
-# Script to advance one ensemble member one filter "time step"
-# when the model advance is executed as a separate process.
-# Called by the filter executable (for async=2 or 4)
-# Calls run-cam.csh, the CAM execution script.
-# Calls 3 translation routines to translate time and model state.
-# Runs on one of the compute nodes allotted to the filter executable.
-#
-# Arguments are 
-# arg#1  the process number of caller
-# arg#2  the number of state copies belonging to that process
-# arg#3  the name of the filter_control_file for that process
-
-#----------------------------------------------------------------------
-
-set process = $1
-set num_states = $2
-set control_file = $3
-
-set retry_max = 2
-
-# Get unique name for temporary working directory for this process's stuff
-set temp_dir = 'advance_temp'${process}
-
-# args to previous version of this script
-set     myname = $0
-set CENTRALDIR = `pwd`
-
-# Create a clean temporary directory and go there
-\rm -rf  $temp_dir
-mkdir -p $temp_dir
-cd       $temp_dir
-
-# People have the craziest aliases. These prevent the obsessive-compulsive
-# from causing themselves no end of angst.
-if ( ! $?REMOVE ) then
-  set REMOVE = 'rm -rf'
-endif
-if ( ! $?COPY ) then
-  set COPY = 'cp -fp'
-endif
-if ( ! $?MOVE ) then
-  set MOVE = 'mv -f'
-endif
-if ( ! $?LINK ) then
-  set LINK = 'ln -fs'
-endif
-
-echo "advance_model.csh args = $1 $2 $3"                    >  cam_out_temp
-echo "CENTRALDIR is ${CENTRALDIR}"                          >> cam_out_temp
-echo "temp_dir is $temp_dir"                                >> cam_out_temp
-
-# Get information about this experiment from file "casemodel", 
-# created by the main controlling script (job.csh)
-
-# set $case = the case and 
-# $model = the directory name (in the CAM source tree) 
-# where CAM executable will be found. 
-# set locations of the CAM and CLM input files
-set case        = `head -1 ${CENTRALDIR}/casemodel | tail -1`
-set model       = `head -2 ${CENTRALDIR}/casemodel | tail -1`
-set cam_init    = `head -3 ${CENTRALDIR}/casemodel | tail -1`
-set clm_init    = `head -4 ${CENTRALDIR}/casemodel | tail -1`
-set list        = `head -5 ${CENTRALDIR}/casemodel | tail -1`
-set ice_init    = $list[1]
-
-# output diagnostic information to the same file as the CAM list-directed output
-echo "case $case model $model"    >> cam_out_temp
-echo "cam init is $cam_init"      >> cam_out_temp
-echo "clm init is $clm_init"      >> cam_out_temp
-echo "ice init is $ice_init"      >> cam_out_temp
-
-# Loop through each ensemble this task is responsible for advancing.
-set ensemble_number_line = 1
-set input_file_line      = 2
-set output_file_line     = 3
-set state_copy = 1
-while($state_copy <= $num_states)
-
-   # loop through the control file, extracting lines in groups of 3.
-   set ensemble_number = `head -$ensemble_number_line ../$control_file | tail -1`
-   set input_file      = `head -$input_file_line      ../$control_file | tail -1`
-   set output_file     = `head -$output_file_line     ../$control_file | tail -1`
-
-   # the previous script used element instead of ensemble_number.  make them
-   # the same for now.
-   set element = $ensemble_number
-   touch cam_out_temp
-   echo "starting ${myname} for ens member $element at "`date` >> cam_out_temp
-
-   # get model state initial conditions for this ensemble member
-   ${LINK} ${CENTRALDIR}/$input_file dart_restart
-
-   # get filter namelists for use by cam
-   ${COPY} ${CENTRALDIR}/input.nml input.nml
-
-   # this just creates a file that helps you figure out which member is
-   # being advanced in this directory. FYI only, you don't need it.
-   echo $element >! element
-   cp element element$element
-
-   echo "ls $temp_dir for element $element" >> cam_out_temp
-   ls -lRt                                  >> cam_out_temp
-    
-   # Need a base CAM initial file into which to copy state vector from filter.
-   # c[al]minput_$element also carry along CAM/CLM fields which are not updated
-   #      by the filter (not part of the filter model state).
-   # First look for c[al]minput.nc resulting from the previous advance of this ensemble
-   #      member from within the same day/obs_seq.out time span (in CENTRALDIR)
-   # Failing that, look for the results of the last advance of this ensemble member
-   #      of the previous obs_seq.out (i.e. in CENTRALDIR/exp_name/day/CAM)
-   # Failing that (when starting an experiment which has no spun up set of members)
-   #      get a copy of a single CAM initial file (usually from somewhere independent
-   #      of this experiment, i.e. /scratch/.../New_state/T42_GWD/CAM/caminput_0.nc)
-   
-   if (-e     ${CENTRALDIR}/caminput_${element}.nc) then
-      ${COPY} ${CENTRALDIR}/caminput_${element}.nc caminput.nc
-      echo "CENTRALDIR caminput comes from ${CENTRALDIR}/caminput_${element}.nc" >> cam_out_temp
-   else if (-e ${cam_init}${element}.nc) then
-      ${COPY}  ${cam_init}${element}.nc caminput.nc
-      echo "cam_init caminput comes from ${cam_init}${element}.nc" >> cam_out_temp
-   else
-      ${COPY}  ${cam_init}0.nc caminput.nc
-      echo "DEFAULT caminput comes from ${cam_init}0.nc" >> cam_out_temp
-   endif
-   
-   if ( -e     ${CENTRALDIR}/clminput_${element}.nc) then
-       ${COPY} ${CENTRALDIR}/clminput_${element}.nc clminput.nc
-   else if (-e ${clm_init}${element}.nc) then
-       ${COPY} ${clm_init}${element}.nc clminput.nc
-   else
-       ${COPY} ${clm_init}0.nc clminput.nc
-   endif
-   
-# Pond restart files have unchangable names like
-# FV_2deg-noleap-O2-Dev20-5-10.cice.r.volpn.2003-06-01-21600
-# Get iceinput and the related restart files for meltpond and aero
-# Early versions of CAM3.6 (<3.56.59)
-#   if ( -e     ${CENTRALDIR}/iceinput_${element}.tar) then
-#       tar -x -f ${CENTRALDIR}/iceinput_${element}.tar
-#   else if (-e ${ice_init}${element}.tar) then
-#       tar -x -f ${ice_init}${element}.tar 
-   if ( -e     ${CENTRALDIR}/iceinput_${element}.nc) then
-       ${COPY} ${CENTRALDIR}/iceinput_${element}.nc iceinput.nc
-   else if (-e ${ice_init}${element}.nc) then
-       ${COPY} ${ice_init}${element}.nc iceinput.nc
-   else
-       # no ice restart file available; start it with ice_in = 'default' via existence of iceinput
-       # in run-cam.csh
-   endif
-   
-   ${LINK} ${CENTRALDIR}/cam_phis.nc .
-
-   # Create 'times' file for CAM from DART times in assim_model_state_ic#
-   # This info is passed to CAM through the creation of its namelist(s)
-   # Also extract state variables from assim_model_state_ic# into the caminput.nc file
-   if (-e dart_restart && -e ${CENTRALDIR}/dart_to_cam) then
-      echo ' '                           >> cam_out_temp
-      echo 'Executing dart_to_cam'       >> cam_out_temp
-      ${CENTRALDIR}/dart_to_cam          >> cam_out_temp
-      ls -ltR                            >> cam_out_temp
-      ${COPY} times ${CENTRALDIR}
-   else
-      echo "ERROR: either ic file $element or dart_to_cam not available for dart_to_cam" >> cam_out_temp
-      exit 1
-   endif
-   
-   # advance cam 
-   #   echo executing: ${model:h}/run-cam.csh ${case}-$element $model ${CENTRALDIR} >> cam_out_temp
-      set retry = 0
-      while ($retry < $retry_max)
-         echo executing: ${CENTRALDIR}/run-cam.csh ${case}-$element $model ${CENTRALDIR} \
-              >> cam_out_temp
-         ${CENTRALDIR}/run-cam.csh ${case}-$element $model ${CENTRALDIR}  >>& cam_out_temp
-# kdr added 12/28/2010
-         set retry_status = $status
-         if ($retry_status == 0) then
-            grep 'END OF MODEL RUN' cam_out_temp > /dev/null
-            set retry_status = $status
-         else 
-            echo 'run-cam.csh DIED with status'  >> cam_out_temp
-            exit $retry_status
-         endif
-#         grep 'END OF MODEL RUN' cam_out_temp > /dev/null
-#         if ($status == 0) then
-         if ($retry_status == 0) then
-# kdr end
-            set retry = $retry_max
-      # Extract the new state vector information from the new caminput.nc and
-      # put it in temp_ud (time followed by state)
-            echo ' '                           >> cam_out_temp
-            echo 'Executing cam_to_dart'       >> cam_out_temp
-            ${CENTRALDIR}/cam_to_dart          >> cam_out_temp
-   
-      # Save CLM and CAM files for storage of analyses in CAM initial file format (analyses2initial)
-            # get the forecast time, which is the time of this CLM initial file
-            set seconds = (`head -1 times`)
-            if ($seconds[2] == 0) then
-               set hour = 24
-            else
-               @ hour = $seconds[2] / 3600
-            endif
-            if ($hour < 10) set hour = 0$hour
-            # Directory for storing CAM initial files until they can be averaged during
-            # archiving for analyses
-            if (! -d ${CENTRALDIR}/H${hour}) mkdir ${CENTRALDIR}/H${hour}
-      # Move updated state vector and new CAM/CLM initial files back to experiment
-      # directory for use by filter and the next advance.
-      # Store them in H## directories for later generation of  ensemble average CAM and CLM 
-      # initial file after all members are done.
-            ${MOVE} temp_ud         ${CENTRALDIR}/$output_file
-            ${MOVE} namelist        ${CENTRALDIR}
-            ${MOVE} caminput.nc     ${CENTRALDIR}/H${hour}/caminput_${element}.nc
-            ${MOVE} clminput.nc     ${CENTRALDIR}/H${hour}/clminput_${element}.nc
-            ${MOVE} iceinput.nc     ${CENTRALDIR}/H${hour}/iceinput_${element}.nc 
-            # Earlier versions      ${MOVE} iceinput.tar ${CENTRALDIR}/H${hour}/iceinput_${element}.tar 
-
-            # link the new initial files into the CENTRAL directory where filter will find them.
-            ${LINK} ${CENTRALDIR}/H${hour}/caminput_${element}.nc \
-                    ${CENTRALDIR}/caminput_${element}.nc
-            ${LINK} ${CENTRALDIR}/H${hour}/clminput_${element}.nc \
-                    ${CENTRALDIR}/clminput_${element}.nc
-            ${LINK} ${CENTRALDIR}/H${hour}/iceinput_${element}.nc  \
-                    ${CENTRALDIR}/iceinput_${element}.nc 
-            # Earlier versions
-            # ${LINK} ${CENTRALDIR}/H${hour}/iceinput_${element}.tar  \
-            #         ${CENTRALDIR}/iceinput_${element}.tar 
-   
-            echo "finished ${myname} for ens member $element at "`date` >> cam_out_temp
-               ${COPY} cam_out_temp ${CENTRALDIR}/H${hour}/cam_out_temp$element
-            ${MOVE} cam_out_temp ${CENTRALDIR}/cam_out_temp$element
-         else
-            @ retry++
-            if ($retry < $retry_max) then
-# Add section to make CAM write out something every time step during this retry.
-# Could be added to casemodel, but be careful of how run-cam.csh uses the number of
-# lines in casemodel.
-               echo "WARNING - CAM $element stopped abnormally; will be retried"
-               echo "WARNING - CAM $element stopped abnormally; will be retried" >> cam_out_temp
-               echo "===========================================================" >> cam_out_temp
-            else
-               set DEADDIR = ${temp_dir}_dead
-               echo "WARNING - CAM $element stopped abnormally; see $DEADDIR"
-               echo "WARNING - CAM $element stopped abnormally; see $DEADDIR" >> cam_out_temp
-               ${COPY} cam_out_temp ${CENTRALDIR}/cam_out_temp${element}_died
-               mkdir $DEADDIR
-               ${MOVE} * $DEADDIR
-               exit -${element}
-            endif
-         endif
-      end
-
-   # if this process needs to advance more than one model, read the next set of
-   # filenames and ensemble number at the top of this loop.
-
-   @ state_copy++
-   @ ensemble_number_line = $ensemble_number_line + 3
-   @ input_file_line      = $input_file_line + 3
-   @ output_file_line     = $output_file_line + 3
-end
-
-cd ${CENTRALDIR}
-
-${REMOVE} $temp_dir/*
-
-# Remove the filter_control file to signal completion
-\rm -rf $control_file
-
-exit 0
-
-# <next few lines under version control, do not edit>
-# $URL$
-# $Revision$
-# $Date$
-

Deleted: DART/trunk/models/cam/shell_scripts/check_model.csh
===================================================================
--- DART/trunk/models/cam/shell_scripts/check_model.csh	2015-03-13 22:53:44 UTC (rev 7732)
+++ DART/trunk/models/cam/shell_scripts/check_model.csh	2015-03-16 17:34:58 UTC (rev 7733)
@@ -1,27 +0,0 @@
-#!/bin/csh
-#
-# DART software - Copyright 2004 - 2013 UCAR. This open source software is
-# provided by UCAR, "as is", without charge, subject to all terms of use at
-# http://www.image.ucar.edu/DAReS/DART/DART_download
-#
-# DART $Id$
-
-if ($#argv < 1) then
-   echo "usage; check_cam num_ens_members"
-   exit
-endif 
-
-set n = 1
-while ($n <= $1)
-     tail -40 cam_out_temp$n | grep 'END OF MODEL RUN' > /dev/null
-     if ($status != 0) echo cam_out_temp$n finished abnormally
-     @ n++
-end
-
-exit 0
-
-# <next few lines under version control, do not edit>
-# $URL$
-# $Revision$
-# $Date$
-

Deleted: DART/trunk/models/cam/shell_scripts/input.nml.diag.template
===================================================================
--- DART/trunk/models/cam/shell_scripts/input.nml.diag.template	2015-03-13 22:53:44 UTC (rev 7732)
+++ DART/trunk/models/cam/shell_scripts/input.nml.diag.template	2015-03-16 17:34:58 UTC (rev 7733)
@@ -1,115 +0,0 @@
-# The times in the namelist for the obs_diag program are vectors
-# that follow the following sequence:
-# year   month   day   hour   minute   second
-# max_num_bins can be used to specify a fixed number of bins,
-# in which case last_bin_center should be safely in the future.
-#
-# Acceptable latitudes range from  [-90,  90]
-# Acceptable longitudes range from [  0, 360]
-
-&obs_diag_nml
-   obs_sequence_name = 'obs_NNNN/obs_seq.final',
-   first_bin_center =  20YY,MM, 1, 6, 0, 0 ,
-   last_bin_center  =  20YY,MM,DD, 0, 0, 0 ,
-   bin_separation   =     0, 0, 0, 6, 0, 0 ,
-   bin_width        =     0, 0, 0, 6, 0, 0 ,
-   time_to_skip     =     0, 0, 0, 0, 0, 0 ,
-   max_num_bins     = 1000,
-   trusted_obs      = 'null',
-   Nregions   = 4,
-   lonlim1    =   0.0,   0.0,   0.0, 235.0,
-   lonlim2    = 360.0, 360.0, 360.0, 295.0,
-   latlim1    =  20.0, -80.0, -20.0,  25.0,
-   latlim2    =  80.0, -20.0,  20.0,  55.0,
-   reg_names  = 'Northern Hemisphere', 'Southern Hemisphere', 'Tropics', 'North America',
-   print_mismatched_locs = .false.,
-   create_rank_histogram = .true.,
-   outliers_in_histogram = .true.,
-   use_zero_error_obs    = .false.,
-   verbose               = .false.
-   /
-
-&ensemble_manager_nml
-   single_restart_file_in = .false.,
-   single_restart_file_out = .false.,
-   perturbation_amplitude  = 0.2  /
-
-&assim_tools_nml
-   filter_kind                     = 1,
-   cutoff                          = 0.2,
-   sort_obs_inc                    = .false.,
-   spread_restoration              = .false.,
-   sampling_error_correction       = .false.,
-   print_every_nth_obs             = 10000,
-   adaptive_localization_threshold = -1/
-
-&cov_cutoff_nml
-   select_localization = 1  /
-
-&reg_factor_nml
-   select_regression = 1,
-   input_reg_file = "time_mean_reg" 
-   save_reg_diagnostics = .false.,
-   reg_diagnostics_file = 'reg_diagnostics' /
-
-&obs_sequence_nml
-   write_binary_obs_sequence = .true.  /
-
-&obs_kind_nml
-  /
-
-&assim_model_nml
-   write_binary_restart_files = .true. /
-
-&model_nml
-   output_state_vector      = .false.,
-   model_version            = '4.0.1',
-   model_config_file        = 'caminput.nc',
-   state_num_0d             = 0,
-   state_num_1d             = 0,
-   state_num_2d             = 1,
-   state_num_3d             = 6,
-   state_names_2d = 'PS      '
-   state_names_3d = 'T       ','US      ','VS      ','Q       ','CLDLIQ  ','CLDICE  '
-   which_vert_1d            = 0,
-   which_vert_2d            = -1,
-   which_vert_3d            = 6*1,
-   pert_names         = '        ',
-   pert_sd           = -888888.0d0,
-   pert_base_vals    = -888888.0d0,
-   highest_obs_pressure_mb   = 100.0,
-   highest_state_pressure_mb = 150.0,
-   max_obs_lat_degree        = 89.0,
-   Time_step_seconds = 21600,
-   Time_step_days = 0    /
-
-! pert_sd < 0. signals pert_base_vals to be used as the values 
-!   for each ensemble member (for 1 field), instead of the value for each field.
-!   DART special value -888888.0d0 can be used for that.
-! pert_sd > 0 allows each point of the pert_names fields of each ens member 
-!   to be randomly perturbed with a standard deviation of pert_sd.  
-!   Used by filter's call to pert_model_state.
-! pert_base_vals /= -888888.0d0 means that the values of the fields in pert_names
-!   sill be reset to the values in pert_base_vals.  
-
-
-&location_nml
-   horiz_dist_only = .false.,
-   vert_normalization_pressure = 100000.0, 
-   vert_normalization_height = 10000.0,
-   vert_normalization_level = 20.0,
-   approximate_distance = .true.,
-   nlon = 141, 
-   nlat = 72,
-   output_box_info  = .false.  /
-
-&utilities_nml
-   TERMLEVEL = 1,
-   logfilename = 'dart_log.out'  /
-
-&mpi_utilities_nml
-   /
-
-
-&obs_def_gps_nml
- /

Deleted: DART/trunk/models/cam/shell_scripts/job.simple.csh
===================================================================
--- DART/trunk/models/cam/shell_scripts/job.simple.csh	2015-03-13 22:53:44 UTC (rev 7732)
+++ DART/trunk/models/cam/shell_scripts/job.simple.csh	2015-03-16 17:34:58 UTC (rev 7733)
@@ -1,373 +0,0 @@
-#!/bin/csh
-#
-# DART software - Copyright 2004 - 2013 UCAR. This open source software is
-# provided by UCAR, "as is", without charge, subject to all terms of use at
-# http://www.image.ucar.edu/DAReS/DART/DART_download
-#
-# DART $Id$
-#
-# Top level script to run a single assimilation experiment.
-#
-# Unlike the more complex job.csh, this script only processes a single 
-# observation file.  Still fairly complex; requires a raft of
-# data files and most of them are in hardcoded locations.
-#
-# You need to know which of several batch systems you are using.  The most
-# common one is LSF.   PBS is also common.  (POE is another but is
-# not supported directly by this script.  It is not recommended that you have a
-# parallel cluster without a batch system (it schedules which nodes are assigned
-# to which processes) but it is possible to run that way -- you have to do
-# more work to get the information about which nodes are involved to the 
-# parallel tasks -- but anyway, there is a section below that uses ssh and no
-# batch.
-#
-# How to submit this job:
-#  1. Look at the #BSUB or #PBS sections below and adjust any of the parameters
-#     on your cluster.  Queue names are very system specific; some systems 
-#     require wall-clock limits; some require an explicit charge code.
-#  2. Submit this script to the queue:
-#        LSF:   bsub < job.simple.csh
-#        PBS:   qsub job.simple.csh
-#       NONE:   job.simple.csh
-#
-# The script moves the necessary files to the current directory and then
-# starts 'filter' as a parallel job on all nodes; each of these tasks will 
-# call some a separate model_advance.csh when necessary.
-#
-# The central directory is where the scripts reside and where script and 
-# program I/O are expected to happen.
-# 
-# 
-#=============================================================================
-# This block of directives constitutes the preamble for the LSF queuing system 
-# LSF is used on the IBM   Linux cluster 'lightning'
-# LSF is used on the IMAGe Linux cluster 'coral'
-# LSF is used on the IBM   'bluevista'
-# The queues on lightning and bluevista are supposed to be similar.
-#
-# the normal way to submit to the queue is:    bsub < job.simple.csh
-#
-# an explanation of the most common directives follows:
-# -J Job name
-# -o STDOUT filename
-# -e STDERR filename
-# -P      account
-# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
-# -n number of processors  (really)
-# -W hr:mn   max wallclock time (required on some systems)
-##=============================================================================
-#BSUB -J DARTCAM
-#BSUB -o DARTCAM.%J.log
-#BSUB -q regular
-#BSUB -n 1
-#
-#
-##=============================================================================
-## This block of directives constitutes the preamble for the PBS queuing system 
-## PBS is used on the CGD   Linux cluster 'bangkok'
-## PBS is used on the CGD   Linux cluster 'calgary'
-##
-## the normal way to submit to the queue is:    qsub job.simple.csh
-##
-## an explanation of the most common directives follows:
-## -N     Job name
-## -r n   Declare job non-rerunable
-## -e <arg>  filename for standard error 
-## -o <arg>  filename for standard out 
-## -q <arg>   Queue name (small, medium, long, verylong)
-## -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
-##                     and calgary, there is no way to 'share' the processors 
-##                     on the node with another job, so you might as well use 
-##                     them both.  (ppn == Processors Per Node)
-##=============================================================================
-#PBS -N DARTCAM
-#PBS -r n
-#PBS -e DARTCAM.err
-#PBS -o DARTCAM.log
-#PBS -q medium
-#PBS -l nodes=2:ppn=2
-
-# A common strategy for the beginning is to check for the existence of
-# some variables that get set by the different queuing mechanisms.
-# This way, we know which queuing mechanism we are working with,
-# and can set 'queue-independent' variables for use for the remainder 
-# of the script.
-
-if ($?LS_SUBCWD) then
-
-   # LSF has a list of processors already in a variable (LSB_HOSTS)
-

@@ Diff output truncated at 40000 characters. @@


More information about the Dart-dev mailing list