<p><b>duda</b> 2012-05-03 15:04:07 -0600 (Thu, 03 May 2012)</p><p>BRANCH COMMIT<br>
<br>
Update atmos_physics branch to the top of the trunk, including PIO changes.<br>
</p><hr noshade><pre><font color="gray">
Property changes on: branches/atmos_physics
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/cam_mpas_nh:1260-1270
/branches/ocean_projects/imp_vert_mix_mrp:754-986
/branches/ocean_projects/split_explicit_mrp:1134-1138
/branches/ocean_projects/split_explicit_timestepping:1044-1097
/branches/ocean_projects/vert_adv_mrp:704-745
/branches/source_renaming:1082-1113
/branches/time_manager:924-962
+ /branches/cam_mpas_nh:1260-1270
/branches/ocean_projects/ale_split_exp:1437-1483
/branches/ocean_projects/ale_vert_coord:1225-1383
/branches/ocean_projects/ale_vert_coord_new:1387-1428
/branches/ocean_projects/gmvar:1214-1514,1517-1738
/branches/ocean_projects/imp_vert_mix_mrp:754-986
/branches/ocean_projects/monotonic_advection:1499-1640
/branches/ocean_projects/split_explicit_mrp:1134-1138
/branches/ocean_projects/split_explicit_timestepping:1044-1097
/branches/ocean_projects/vert_adv_mrp:704-745
/branches/ocean_projects/zstar_restart_new:1762-1770
/branches/omp_blocks/block_decomp:1374-1569
/branches/omp_blocks/ddt_reorg:1301-1414
/branches/omp_blocks/halo:1570-1638
/branches/omp_blocks/io:1639-1787
/branches/source_renaming:1082-1113
/branches/time_manager:924-962
/trunk/mpas:1371-1863
Modified: branches/atmos_physics/Makefile
===================================================================
--- branches/atmos_physics/Makefile        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/Makefile        2012-05-03 21:04:07 UTC (rev 1864)
@@ -14,10 +14,10 @@
dummy:
-        ( make error )
+        ( $(MAKE) error )
xlf:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpxlf90" \
        "CC_PARALLEL = mpcc" \
        "FC_SERIAL = xlf90" \
@@ -25,6 +25,9 @@
        "FFLAGS_OPT = -O3 -qrealsize=8" \
        "CFLAGS_OPT = -O3" \
        "LDFLAGS_OPT = -O3" \
+        "FFLAGS_DEBUG = -O0 -g -C -qrealsize=8" \
+        "CFLAGS_DEBUG = -O0 -g" \
+        "LDFLAGS_DEBUG = -O0 -g" \
        "CORE = $(CORE)" \
        "DEBUG = $(DEBUG)" \
        "SERIAL = $(SERIAL)" \
@@ -32,7 +35,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
ftn:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = ftn" \
        "CC_PARALLEL = cc" \
        "FC_SERIAL = ftn" \
@@ -47,7 +50,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpif90" \
        "CC_PARALLEL = mpicc" \
        "FC_SERIAL = pgf90" \
@@ -65,7 +68,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi-nersc:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = ftn" \
        "CC_PARALLEL = cc" \
        "FC_SERIAL = ftn" \
@@ -80,7 +83,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi-llnl:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpipgf90" \
        "CC_PARALLEL = pgcc" \
        "FC_SERIAL = pgf90" \
@@ -95,7 +98,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
ifort:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpif90" \
        "CC_PARALLEL = gcc" \
        "FC_SERIAL = ifort" \
@@ -113,7 +116,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
gfortran:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpif90" \
        "CC_PARALLEL = mpicc" \
        "FC_SERIAL = gfortran" \
@@ -131,7 +134,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
g95:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = mpif90" \
        "CC_PARALLEL = mpicc" \
        "FC_SERIAL = g95" \
@@ -146,7 +149,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pathscale-nersc:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = ftn" \
        "CC_PARALLEL = cc" \
        "FC_SERIAL = ftn" \
@@ -161,7 +164,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
cray-nersc:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = ftn" \
        "CC_PARALLEL = cc" \
        "FC_SERIAL = ftn" \
@@ -176,7 +179,7 @@
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
intel-nersc:
-        ( make all \
+        ( $(MAKE) all \
        "FC_PARALLEL = ftn" \
        "CC_PARALLEL = cc" \
        "FC_SERIAL = ftn" \
@@ -190,9 +193,9 @@
        "USE_PAPI = $(USE_PAPI)" \
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-CPPINCLUDES = -I../inc -I$(NETCDF)/include
-FCINCLUDES = -I../inc -I$(NETCDF)/include
-LIBS = -L$(NETCDF)/lib -lnetcdf
+CPPINCLUDES = -I../inc -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include
+FCINCLUDES = -I../inc -I$(NETCDF)/include -I$(PIO) -I$(PNETCDF)/include
+LIBS = -L$(PIO) -L$(PNETCDF)/lib -L$(NETCDF)/lib -lpio -lpnetcdf -lnetcdf
RM = rm -f
CPP = cpp -C -P -traditional
@@ -228,7 +231,7 @@
        DEBUG_MESSAGE="Debug flags are not defined for this compile group. Defaulting to Optimized flags"
else # FFLAGS_DEBUG IF
        FFLAGS=$(FFLAGS_DEBUG)
-        CFLAGS=$(CFLAGS_DEBUG)
+        CFLAGS=$(CFLAGS_DEBUG) -DMPAS_DEBUG
        LDFLAGS=$(LDFLAGS_DEBUG)
        DEBUG_MESSAGE="Debugging is on."
endif # FFLAGS_DEBUG IF
@@ -264,10 +267,14 @@
        PAPI_MESSAGE="Papi libraries are off."
endif # USE_PAPI IF
+ifneq ($(wildcard $(NETCDF)/lib/libnetcdff.*), ) # CHECK FOR NETCDF4
+        LIBS += -lnetcdff
+endif # CHECK FOR NETCDF4
+
all: mpas_main
mpas_main:
-        cd src; make FC="$(FC)" \
+        cd src; $(MAKE) -j1 FC="$(FC)" \
CC="$(CC)" \
SFC="$(SFC)" \
SCC="$(SCC)" \
@@ -287,7 +294,7 @@
        @echo $(SERIAL_MESSAGE)
        @echo $(PAPI_MESSAGE)
clean:
-        cd src; make clean RM="$(RM)" CORE="$(CORE)"
+        cd src; $(MAKE) clean RM="$(RM)" CORE="$(CORE)"
        $(RM) $(CORE)_model.exe
error: errmsg
@@ -305,7 +312,7 @@
errmsg:
        @echo ""
-        @echo "Usage: make target CORE=[core] [options]"
+        @echo "Usage: $(MAKE) target CORE=[core] [options]"
        @echo ""
        @echo "Example targets:"
        @echo " ifort"
Modified: branches/atmos_physics/namelist.input.hyd_atmos
===================================================================
--- branches/atmos_physics/namelist.input.hyd_atmos        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.hyd_atmos        2012-05-03 21:04:07 UTC (rev 1864)
@@ -26,8 +26,17 @@
config_restart_name = 'restart.nc'
config_output_interval = '1_00:00:00'
config_frames_per_outfile = 0
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = '1000_00:00:00'
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.init_nhyd_atmos
===================================================================
--- branches/atmos_physics/namelist.input.init_nhyd_atmos        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.init_nhyd_atmos        2012-05-03 21:04:07 UTC (rev 1864)
@@ -35,9 +35,17 @@
&io
config_input_name = 'x1.40962.geogrid.nc'
config_output_name = 'x1.40962.init.2010-10-23.nc'
- config_decomp_file_prefix = 'x1.40962.graph.info.part.'
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'x1.40962.graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = 3000
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.nhyd_atmos
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.nhyd_atmos        2012-05-03 21:04:07 UTC (rev 1864)
@@ -45,10 +45,18 @@
config_restart_name = 'restart.nc'
config_output_interval = '1_00:00:00'
config_frames_per_outfile = 1
- config_decomp_file_prefix = 'x1.40962.graph.info.part.'
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
config_sfc_update_name = 'sfc_update.nc'
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'x1.40962.graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = '1_00:00:00'
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.nhyd_atmos_jw
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos_jw        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.nhyd_atmos_jw        2012-05-03 21:04:07 UTC (rev 1864)
@@ -42,8 +42,17 @@
config_input_name = 'grid.nc'
config_output_name = 'output.nc'
config_restart_name = 'restart.nc'
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = 3000
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.nhyd_atmos_mtn_wave
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos_mtn_wave        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.nhyd_atmos_mtn_wave        2012-05-03 21:04:07 UTC (rev 1864)
@@ -38,8 +38,17 @@
config_restart_name = 'restart.nc'
config_output_interval = '00:30:00'
config_frames_per_outfile = 0
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = '1_00:00:00'
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.nhyd_atmos_squall
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos_squall        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.nhyd_atmos_squall        2012-05-03 21:04:07 UTC (rev 1864)
@@ -38,8 +38,17 @@
config_restart_name = 'restart.nc'
config_output_interval = '00:30:00'
config_frames_per_outfile = 0
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
+
&restart
config_restart_interval = '1_00:00:00'
config_do_restart = .false.
Modified: branches/atmos_physics/namelist.input.ocean
===================================================================
--- branches/atmos_physics/namelist.input.ocean        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.ocean        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,27 +1,35 @@
&sw_model
config_test_case = 0
- config_time_integration = 'RK4'
+ config_time_integration = 'split_explicit'
config_rk_filter_btr_mode = .false.
- config_dt = 100.0
+ config_dt = 180.0
config_start_time = '0000-01-01_00:00:00'
- config_run_duration = '2000_00:00:00'
- config_stats_interval = 1920
+ config_run_duration = '1_00:00:00'
+ config_stats_interval = 480
/
&io
config_input_name = 'grid.nc'
- config_output_name = 'output.nc'
+ config_output_name = 'output..nc'
config_restart_name = 'restart.nc'
- config_output_interval = '20_00:00:00'
+ config_output_interval = '1_00:00:00'
config_frames_per_outfile = 1000000
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
&restart
config_do_restart = .false.
config_restart_interval = '120_00:00:00'
/
&grid
- config_vert_grid_type = 'zlevel'
+ config_vert_grid_type = 'isopycnal'
config_pressure_type = 'pressure'
- config_rho0 = 1000
+ config_rho0 = 1014.65
/
&split_explicit_ts
config_n_ts_iter = 2
@@ -39,15 +47,17 @@
config_btr_solve_SSH2 = .false.
/
&hmix
- config_h_mom_eddy_visc2 = 1.0e5
+ config_h_mom_eddy_visc2 = 100.0
config_h_mom_eddy_visc4 = 0.0
+ config_h_kappa = 0.0
+ config_h_kappa_q = 0.0
config_visc_vorticity_term = .true.
config_h_tracer_eddy_diff2 = 1.0e5
config_h_tracer_eddy_diff4 = 0.0
/
&vmix
- config_vert_visc_type = 'rich'
- config_vert_diff_type = 'rich'
+ config_vert_visc_type = 'const'
+ config_vert_diff_type = 'const'
config_implicit_vertical_mix = .true.
config_convective_visc = 1.0
config_convective_diff = 1.0
@@ -71,14 +81,13 @@
config_zWidth_tanh = 100
/
&eos
- config_eos_type = 'jm'
+ config_eos_type = 'linear'
/
&advection
config_vert_tracer_adv = 'stencil'
config_vert_tracer_adv_order = 2
- config_tracer_adv_order = 2
+ config_horiz_tracer_adv_order = 2
config_thickness_adv_order = 2
- config_positive_definite = .false.
config_monotonic = .false.
/
&restore
Modified: branches/atmos_physics/namelist.input.sw
===================================================================
--- branches/atmos_physics/namelist.input.sw        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/namelist.input.sw        2012-05-03 21:04:07 UTC (rev 1864)
@@ -25,8 +25,15 @@
config_restart_name = 'restart.nc'
config_output_interval = '1_00:00:00'
config_frames_per_outfile = 0
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
-
+&decomposition
+ config_number_of_blocks = 0
+ config_block_decomp_file_prefix = 'graph.info.part.'
+ config_explicit_proc_decomp = .false.
+ config_proc_decomp_file_prefix = 'graph.info.part.'
+/
&restart
config_restart_interval = '15_00:00:00'
config_do_restart = .false.
Property changes on: branches/atmos_physics/src
___________________________________________________________________
Deleted: svn:mergeinfo
- /branches/ocean_projects/rayleigh/src:1298-1311
Modified: branches/atmos_physics/src/Makefile
===================================================================
--- branches/atmos_physics/src/Makefile        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/Makefile        2012-05-03 21:04:07 UTC (rev 1864)
@@ -6,35 +6,35 @@
        $(FC) $(LDFLAGS) -o $(CORE)_model.exe driver/*.o -L. -ldycore -lops -lframework $(LIBS) -I./external/esmf_time_f90 -L./external/esmf_time_f90 -lesmf_time
reg_includes:
-        ( cd registry; make CC="$(SCC)" )
+        ( cd registry; $(MAKE) CC="$(SCC)" )
        ( cd inc; $(CPP) ../core_$(CORE)/Registry | ../registry/parse > Registry.processed)
-externals:
-        ( cd external; make FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" )
+externals: reg_includes
+        ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" )
-frame:
-        ( cd framework; make all )
+frame: reg_includes externals
+        ( cd framework; $(MAKE) all )
        ln -sf framework/libframework.a libframework.a
-ops:
-        ( cd operators; make all )
+ops: reg_includes externals frame
+        ( cd operators; $(MAKE) all )
        ln -sf operators/libops.a libops.a
-dycore:
-        ( cd core_$(CORE); make all )
+dycore: reg_includes externals frame ops
+        ( cd core_$(CORE); $(MAKE) all )
        ln -sf core_$(CORE)/libdycore.a libdycore.a
-drver:
-        ( cd driver; make all )
+drver: reg_includes externals frame ops dycore
+        ( cd driver; $(MAKE) all )
clean:
        $(RM) $(CORE)_model.exe libframework.a libops.a libdycore.a
-        ( cd registry; make clean )
-        ( cd external; make clean )
-        ( cd framework; make clean )
-        ( cd operators; make clean )
+        ( cd registry; $(MAKE) clean )
+        ( cd external; $(MAKE) clean )
+        ( cd framework; $(MAKE) clean )
+        ( cd operators; $(MAKE) clean )
        ( cd inc; rm -f *.inc Registry.processed )
        if [ -d core_$(CORE) ] ; then \
-         ( cd core_$(CORE); make clean ) \
+         ( cd core_$(CORE); $(MAKE) clean ) \
        fi;
-        ( cd driver; make clean )
+        ( cd driver; $(MAKE) clean )
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_camrad_init.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_camrad_init.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_camrad_init.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -3,6 +3,7 @@
!=============================================================================================
module mpas_atmphys_camrad_init
use mpas_dmpar
+ use mpas_kind_types
use mpas_grid_types
use mpas_atmphys_constants,only: cp,degrad,ep_2,g,R_d,R_v,stbolt
@@ -236,7 +237,7 @@
integer:: ln_eh2owid ! var. id for line trans. for wndw ems.
!character*(NF_MAX_NAME) tmpname! dummy variable for var/dim names
- character(len=256):: locfn ! local filename
+ character(len=StrKIND):: locfn ! local filename
integer:: tmptype ! dummy variable for variable type
integer:: ndims ! number of dimensions
!integer dims(NF_MAX_VAR_DIMS) ! vector of dimension ids
@@ -251,7 +252,7 @@
integer:: i,istat,cam_abs_unit
logical:: opened
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: i_te,i_rh
@@ -503,7 +504,7 @@
integer:: i,istat,cam_aer_unit
logical:: opened
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
!---------------------------------------------------------------------------------------------
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_date_time.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_date_time.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_date_time.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -9,7 +9,7 @@
monthly_interp_to_date, &
monthly_min_max
- character(len=24),public:: current_date
+ character(len=StrKIND),public:: current_date
contains
@@ -18,7 +18,7 @@
!=============================================================================================
!input arguments:
- character(len=24),intent(in):: date_str
+ character(len=StrKIND),intent(in):: date_str
!output arguments:
integer,intent(out):: julyr
@@ -54,7 +54,7 @@
!=============================================================================================
!input arguments:
- character(len=24),intent(in):: date
+ character(len=StrKIND),intent(in):: date
!output arguments:
integer,intent(out):: century_year,month,day,hour,minute,second,ten_thousandth
@@ -76,7 +76,7 @@
!=============================================================================================
!input arguments:
- character(len=24),intent(in):: date_str
+ character(len=StrKIND),intent(in):: date_str
integer,intent(in):: npoints
real(kind=RKIND),intent(in) ,dimension(12,npoints):: field_in
@@ -84,8 +84,8 @@
real(kind=RKIND),intent(out),dimension(npoints):: field_out
!local variables:
- character(len=2):: day15,mon
- character(len=4):: yr
+ character(len=StrKIND):: day15,mon
+ character(len=StrKIND):: yr
integer:: l,n
integer:: julyr,julday,int_month,month1,month2
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_initialize_real.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_initialize_real.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_initialize_real.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -32,7 +32,7 @@
type(fg_type),intent(inout):: fg
!local variables:
- character(len=32):: timeString
+ character(len=StrKIND):: timeString
integer:: i,j,iCell,istatus
integer,dimension(5) :: interp_list
@@ -155,7 +155,7 @@
type(fg_type),intent(inout):: fg
!local variables:
- character(len=24):: initial_date
+ character(len=StrKIND):: initial_date
integer:: iCell,nCellsSolve
integer,dimension(:),pointer:: landmask
@@ -585,7 +585,7 @@
type(fg_type),intent(inout):: fg
!local variables:
- character(len=128):: mess
+ character(len=StrKIND):: mess
integer:: iCell,iSoil,nCellsSolve,nSoilLevels
integer:: num_seaice_changes
integer,dimension(:),pointer:: landmask,isltyp,ivgtyp
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_landuse.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_landuse.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_landuse.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -59,8 +59,8 @@
integer,intent(in):: julday
!local variables:
- character(len=35) :: lutype
- character(len=128):: mess
+ character(len=StrKIND) :: lutype
+ character(len=StrKIND):: mess
integer,parameter:: land_unit = 15
integer,parameter:: open_ok = 0
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_manager.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_manager.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_manager.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,6 +1,7 @@
!=============================================================================================
module mpas_atmphys_manager
use mpas_configure
+ use mpas_kind_types
use mpas_grid_types
use mpas_timekeeping
@@ -77,7 +78,7 @@
type(MPAS_Time_Type):: currTime
logical:: LeapYear
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer:: ierr
real(kind=RKIND):: utc_s
real(kind=RKIND):: xtime_m
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_lwinit.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_lwinit.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_lwinit.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -3,6 +3,7 @@
!=============================================================================================
module mpas_atmphys_rrtmg_lwinit
use mpas_dmpar
+ use mpas_kind_types
use mpas_grid_types
use mpas_atmphys_constants
use mpas_atmphys_utilities
@@ -44,7 +45,7 @@
!local variables:
integer:: i,istat,rrtmg_unit
logical:: opened
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
!---------------------------------------------------------------------------------------------
!get a unit to open init file:
@@ -130,7 +131,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -213,7 +214,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -290,7 +291,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -407,7 +408,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -495,7 +496,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -603,7 +604,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -683,7 +684,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -788,7 +789,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -901,7 +902,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1004,7 +1005,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1081,7 +1082,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1173,7 +1174,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1241,7 +1242,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1332,7 +1333,7 @@
integer,intent(in) :: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1414,7 +1415,7 @@
integer, intent(in) :: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
@@ -1494,7 +1495,7 @@
integer,intent(in) :: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Arrays fracrefao and fracrefbo are the Planck fractions for the lower
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_swinit.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_swinit.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_rrtmg_swinit.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -5,6 +5,7 @@
!=============================================================================================
module mpas_atmphys_rrtmg_swinit
use mpas_dmpar
+ use mpas_kind_types
use mpas_grid_types
use mpas_atmphys_constants
use mpas_atmphys_utilities
@@ -45,7 +46,7 @@
!local variables:
integer:: i,istat,rrtmg_unit
logical:: opened
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
!---------------------------------------------------------------------------------------------
!get a unit to open init file:
@@ -129,7 +130,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -211,7 +212,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -294,7 +295,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -376,7 +377,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -458,7 +459,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -542,7 +543,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -624,7 +625,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -706,7 +707,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -777,7 +778,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -867,7 +868,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -925,7 +926,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -961,7 +962,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -1034,7 +1035,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
@@ -1102,7 +1103,7 @@
integer,intent(in):: rrtmg_unit
!local variables:
- character(len=80):: errmess
+ character(len=StrKIND):: errmess
integer:: istat
! Array sfluxrefo contains the Kurucz solar source function for this band.
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -12,21 +12,17 @@
contains
!=============================================================================================
- subroutine physics_addtend(dminfo,cellsToSend,cellsToRecv,mesh,state,diag,tend, &
- tend_physics,mass,mass_edge)
+subroutine physics_addtend(mesh, state, diag, tend, tend_physics, mass, mass_edge)
!=============================================================================================
!input variables:
!----------------
- type(dm_info), intent(in):: dminfo
- type(mesh_type),intent(in):: mesh
- type(exchange_list),pointer:: cellsToSend,cellsToRecv
-
+type(mesh_type),intent(in):: mesh
type(state_type),intent(in):: state
type(diag_type),intent(in):: diag
- type(tend_physics_type),intent(in):: tend_physics
- real(kind=RKIND),dimension(:,:):: mass
- real(kind=RKIND),dimension(:,:):: mass_edge
+ type(tend_physics_type),intent(inout):: tend_physics
+ real(kind=RKIND),dimension(:,:),intent(in):: mass
+ real(kind=RKIND),dimension(:,:),intent(in):: mass_edge
!inout variables:
!----------------
@@ -34,6 +30,9 @@
!local variables:
!----------------
+
+ type(block_type),pointer :: block
+
integer:: i,iCell,k,n,nCells,nCellsSolve,nEdges,nEdgesSolve,nVertLevels
real(kind=RKIND),dimension(:,:),pointer:: theta_m,qv
@@ -57,6 +56,8 @@
!=============================================================================================
!write(0,*)
!write(0,*) '--- enter subroutine physics_add_tend:'
+
+ block => mesh % block
nCells = mesh % nCells
nEdges = mesh % nEdges
@@ -102,7 +103,7 @@
if(config_pbl_scheme .ne. 'off') then
allocate(rublten_Edge(nVertLevels,nEdges))
rublten_Edge(:,:) = 0.
- call tend_toEdges(dminfo,CellsToSend,CellsToRecv,mesh,rublten,rvblten,rublten_Edge)
+ call tend_toEdges(mesh,rublten,rvblten,rublten_Edge)
do i = 1, nEdgesSolve
do k = 1, nVertLevels
tend_u(k,i)=tend_u(k,i)+rublten_Edge(k,i)*mass_edge(k,i)
@@ -133,7 +134,7 @@
if(config_conv_deep_scheme .eq. 'tiedtke') then
allocate(rucuten_Edge(nVertLevels,nEdges))
rucuten_Edge(:,:) = 0.
- call tend_toEdges(dminfo,CellsToSend,CellsToRecv,mesh,rucuten,rvcuten,rucuten_Edge)
+ call tend_toEdges(mesh,rucuten,rvcuten,rucuten_Edge)
do i = 1, nEdgesSolve
do k = 1, nVertLevels
tend_u(k,i)=tend_u(k,i)+rucuten_Edge(k,i)*mass_edge(k,i)
@@ -211,15 +212,12 @@
end subroutine physics_addtend
!=============================================================================================
- subroutine tend_toEdges(dminfo,cellsToSend,cellsToRecv,mesh,Ux_tend,Uy_tend,U_tend)
+ subroutine tend_toEdges(mesh,Ux_tend,Uy_tend,U_tend)
!=============================================================================================
!input arguments:
!----------------
- type(dm_info),intent(in):: dminfo
type(mesh_type),intent(in):: mesh
- type(exchange_list),intent(in),pointer:: cellsToSend,cellsToRecv
-
real(kind=RKIND),intent(in),dimension(:,:):: Ux_tend,Uy_tend
!output arguments:
@@ -227,16 +225,21 @@
real(kind=RKIND),intent(out),dimension(:,:):: U_tend
!local variables:
+!-----------------
+ type(block_type),pointer :: block
+ type (field2DReal):: tempField
integer:: iCell,iEdge,k,j,nCells,nCellsSolve,nVertLevels
integer,dimension(:),pointer :: nEdgesOnCell
integer,dimension(:,:),pointer:: edgesOnCell
real(kind=RKIND),dimension(:,:),pointer:: east,north,edge_normal
- real(kind=RKIND),dimension(:,:),allocatable:: Ux_tend_halo,Uy_tend_halo
+ real(kind=RKIND),dimension(:,:),allocatable,target:: Ux_tend_halo,Uy_tend_halo
!---------------------------------------------------------------------------------------------
- nCells = mesh % nCells
+ block => mesh % block
+
+ nCells = mesh % nCells
nCellsSolve = mesh % nCellsSolve
nVertLevels = mesh % nVertLevels
@@ -258,11 +261,18 @@
enddo
enddo
- call mpas_dmpar_exch_halo_field2d_real( &
- dminfo,Ux_tend_halo,nVertLevels,nCells,cellsToSend,cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real( &
- dminfo,Uy_tend_halo,nVertLevels,nCells,cellsToSend,cellsToRecv)
+ tempField % block => block
+ tempField % dimSizes(1) = nVertLevels
+ tempField % dimSizes(2) = nCellsSolve
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+ tempField % array => Ux_tend_halo
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ tempField % array => Uy_tend_halo
+ call mpas_dmpar_exch_halo_field(tempField)
+
U_tend(:,:) = 0.0
do iCell = 1, nCells
do j = 1, nEdgesOnCell(iCell)
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_utilities.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_utilities.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_utilities.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,12 +1,13 @@
!=============================================================================================
module mpas_atmphys_utilities
+ use mpas_kind_types
implicit none
private
public:: physics_error_fatal, &
physics_message
- character(len=256),public:: mpas_err_message
+ character(len=StrKIND),public:: mpas_err_message
contains
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_vars.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_vars.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_vars.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -11,15 +11,15 @@
!list of physics parameterizations:
!=============================================================================================
- character(len=32),public:: microp_scheme
- character(len=32),public:: conv_deep_scheme
- character(len=32),public:: conv_shallow_scheme
- character(len=32),public:: lsm_scheme
- character(len=32),public:: pbl_scheme
- character(len=32),public:: radt_cld_scheme
- character(len=32),public:: radt_lw_scheme
- character(len=32),public:: radt_sw_scheme
- character(len=32),public:: sfclayer_scheme
+ character(len=StrKIND),public:: microp_scheme
+ character(len=StrKIND),public:: conv_deep_scheme
+ character(len=StrKIND),public:: conv_shallow_scheme
+ character(len=StrKIND),public:: lsm_scheme
+ character(len=StrKIND),public:: pbl_scheme
+ character(len=StrKIND),public:: radt_cld_scheme
+ character(len=StrKIND),public:: radt_lw_scheme
+ character(len=StrKIND),public:: radt_sw_scheme
+ character(len=StrKIND),public:: sfclayer_scheme
!=============================================================================================
!wrf-variables:these variables are needed to keep calls to different physics parameterizations
Modified: branches/atmos_physics/src/core_hyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_hyd_atmos/Registry        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_hyd_atmos/Registry        2012-05-03 21:04:07 UTC (rev 1864)
@@ -4,7 +4,7 @@
namelist integer sw_model config_test_case 5
namelist character sw_model config_time_integration SRK3
namelist real sw_model config_dt 172.8
-namelist integer sw_model config_calendar_type MPAS_360DAY
+namelist character sw_model config_calendar_type 360day
namelist character sw_model config_start_time 0000-01-01_00:00:00
namelist character sw_model config_stop_time none
namelist character sw_model config_run_duration none
@@ -27,7 +27,12 @@
namelist character io config_restart_name restart.nc
namelist character io config_output_interval 06:00:00
namelist integer io config_frames_per_outfile 0
-namelist character io config_decomp_file_prefix graph.info.part.
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist character decomposition config_block_decomp_file_prefix graph.info.part.
+namelist integer decomposition config_number_of_blocks 0
+namelist logical decomposition config_explicit_proc_decomp .false.
+namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist logical restart config_do_restart false
namelist character restart config_restart_interval none
Modified: branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_mpas_core.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_mpas_core.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -3,7 +3,7 @@
use mpas_framework
use mpas_timekeeping
- type (io_output_object) :: restart_obj
+ type (io_output_object), save :: restart_obj
integer :: current_outfile_frames
@@ -54,6 +54,7 @@
subroutine mpas_core_run(domain, output_obj, output_frame)
+ use mpas_kind_types
use mpas_grid_types
use mpas_io_output
use mpas_timer
@@ -69,7 +70,7 @@
type (block_type), pointer :: block_ptr
type (MPAS_Time_Type) :: currTime
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: ierr
! Eventually, dt should be domain specific
@@ -77,7 +78,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Initial time ', timeStamp
+ write(0,*) 'Initial time ', trim(timeStamp)
call atmh_write_output_frame(output_obj, output_frame, domain)
@@ -89,7 +90,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Doing timestep ', timeStamp
+ write(0,*) 'Doing timestep ', trim(timeStamp)
call mpas_timer_start("time integration")
call atmh_do_timestep(domain, dt, timeStamp)
Modified: branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -114,39 +114,17 @@
if(debug) write(0,*) ' rk substep ', rk_step
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % qtot % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % cqu % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- end if
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % qtot)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % cqu)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % pressure)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % geopotential)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % alpha)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % pv_edge)
+ if (config_h_mom_eddy_visc4 > 0.0) then
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % vorticity)
+ end if
if(debug) write(0,*) ' rk substep ', rk_step
@@ -161,16 +139,8 @@
!
! --- update halos for tendencies
!
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % theta)
! --- advance over sub_steps
@@ -208,52 +178,20 @@
!
! --- update halos for prognostic variables
!
- block => domain % blocklist
- do while (associated(block))
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nEdges, &
-!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % uhAvg % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nEdges, &
-!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % wwAvg % array(:,:), &
-!! block % mesh % nVertLevels+1, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % mesh % dpsdt % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % state % time_levs(2) % state % surface_pressure % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % ww % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % pressure_old % array(:,:), &
-!! block % mesh % nVertLevels+1, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % h_edge)
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % uhAvg)
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % wwAvg)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % theta)
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % h)
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % dpsdt)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % surface_pressure)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % alpha)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % ww)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % pressure)
+!! call mpas_dmpar_exch_halo_field(domain % blocklist % mesh % pressure_old)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % geopotential)
end do
@@ -274,24 +212,15 @@
block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
block % mesh, rk_timestep(rk_step) )
else
- call atmh_advance_scalars_mono( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % mesh, rk_timestep(rk_step), rk_step, 3, &
- domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
+ call atmh_advance_scalars_mono(block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % mesh, rk_timestep(rk_step), rk_step, 3)
end if
block => block % next
end do
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % scalars % array(:,:,:), &
- block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
- block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % scalars)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % scalars)
if(debug) write(0,*) ' advance scalars complete '
@@ -1461,7 +1390,7 @@
end subroutine atmh_advance_scalars
- subroutine atmh_advance_scalars_mono( tend, s_old, s_new, grid, dt, rk_step, rk_order, dminfo, cellsToSend, cellsToRecv)
+ subroutine atmh_advance_scalars_mono(tend, s_old, s_new, grid, dt, rk_step, rk_order)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Input: s - current model state
@@ -1476,11 +1405,11 @@
type (state_type), intent(in) :: s_old
type (state_type), intent(inout) :: s_new
type (mesh_type), intent(in) :: grid
- integer, intent(in) :: rk_step, rk_order
real (kind=RKIND), intent(in) :: dt
- type (dm_info), intent(in) :: dminfo
- type (exchange_list), pointer :: cellsToSend, cellsToRecv
+ integer, intent(in) :: rk_step, rk_order
+ type (block_type), pointer :: block
+
integer :: i, iCell, iEdge, k, iScalar, cell_upwind, cell1, cell2, num_scalars
real (kind=RKIND) :: flux, scalar_edge, d2fdx2_cell1, d2fdx2_cell2
real (kind=RKIND) :: fdir, flux_upwind, h_flux_upwind, s_upwind
@@ -1491,9 +1420,11 @@
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
integer, dimension(:,:), pointer :: cellsOnEdge
+ type (field3DReal) :: tempField
+
real (kind=RKIND), dimension( s_old % num_scalars, grid % nEdges+1) :: h_flux
- real (kind=RKIND), dimension( s_old % num_scalars, grid % nCells+1, 2 ) :: v_flux, v_flux_upwind, s_update
- real (kind=RKIND), dimension( s_old % num_scalars, grid % nCells+1, 2 ) :: scale_out, scale_in
+ real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1) :: v_flux, v_flux_upwind, s_update
+ real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1), target :: scale_out, scale_in
real (kind=RKIND), dimension( s_old % num_scalars ) :: s_max, s_min, s_max_update, s_min_update
integer :: nVertLevels, km0, km1, ktmp, kcp1, kcm1
@@ -1502,6 +1433,8 @@
real (kind=RKIND), parameter :: eps=1.e-20
real (kind=RKIND) :: coef_3rd_order
+ block => grid % block
+
num_scalars = s_old % num_scalars
scalar_old => s_old % scalars % array
@@ -1531,8 +1464,8 @@
km1 = 1
km0 = 2
- v_flux(:,:,km1) = 0.
- v_flux_upwind(:,:,km1) = 0.
+ v_flux(km1,:,:) = 0.
+ v_flux_upwind(km1,:,:) = 0.
scale_out(:,:,:) = 1.
scale_in(:,:,:) = 1.
@@ -1552,20 +1485,20 @@
cell_upwind = k
if (wwAvg(k+1,iCell) >= 0) cell_upwind = k+1
do iScalar=1,num_scalars
- v_flux(iScalar,iCell,km0) = dt * wwAvg(k+1,iCell) * &
+ v_flux(km0,iScalar,iCell) = dt * wwAvg(k+1,iCell) * &
(fnm(k+1) * scalar_new(iScalar,k+1,iCell) + fnp(k+1) * scalar_new(iScalar,k,iCell))
- v_flux_upwind(iScalar,iCell,km0) = dt * wwAvg(k+1,iCell) * scalar_old(iScalar,cell_upwind,iCell)
- v_flux(iScalar,iCell,km0) = v_flux(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km0)
+ v_flux_upwind(km0,iScalar,iCell) = dt * wwAvg(k+1,iCell) * scalar_old(iScalar,cell_upwind,iCell)
+ v_flux(km0,iScalar,iCell) = v_flux(km0, iScalar,iCell) - v_flux_upwind(km0,iScalar,iCell)
! v_flux(iScalar,iCell,km0) = 0. ! use only upwind - for testing
- s_update(iScalar,iCell,km0) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
- - rdnw(k) * (v_flux_upwind(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km1))
+ s_update(km0,iScalar,iCell) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
+ - rdnw(k) * (v_flux_upwind(km0,iScalar,iCell) - v_flux_upwind(km1,iScalar,iCell))
end do
else
do iScalar=1,num_scalars
- v_flux(iScalar,iCell,km0) = 0.
- v_flux_upwind(iScalar,iCell,km0) = 0.
- s_update(iScalar,iCell,km0) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
- - rdnw(k) * (v_flux_upwind(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km1))
+ v_flux(km0,iScalar,iCell) = 0.
+ v_flux_upwind(km0,iScalar,iCell) = 0.
+ s_update(km0,iScalar,iCell) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
+ - rdnw(k) * (v_flux_upwind(km0,iScalar,iCell) - v_flux_upwind(km1,iScalar,iCell))
end do
end if
@@ -1586,8 +1519,8 @@
h_flux_upwind = dt * uhAvg(k,iEdge) * dvEdge(iEdge) * scalar_old(iScalar,k,cell_upwind)
h_flux(iScalar,iEdge) = h_flux(iScalar,iEdge) - h_flux_upwind
! h_flux(iScalar,iEdge) = 0. ! use only upwind - for testing
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - h_flux_upwind / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + h_flux_upwind / grid % areaCell % array(cell2)
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - h_flux_upwind / grid % areaCell % array(cell1)
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + h_flux_upwind / grid % areaCell % array(cell2)
end do
end do
@@ -1627,8 +1560,8 @@
h_flux_upwind = dt * uhAvg(k,iEdge) * dvEdge(iEdge) * scalar_old(iScalar,k,cell_upwind)
h_flux(iScalar,iEdge) = h_flux(iScalar,iEdge) - h_flux_upwind
! h_flux(iScalar,iEdge) = 0. ! use only upwind - for testing
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - h_flux_upwind / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + h_flux_upwind / grid % areaCell % array(cell2)
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - h_flux_upwind / grid % areaCell % array(cell1)
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + h_flux_upwind / grid % areaCell % array(cell2)
end do
end do
@@ -1647,14 +1580,14 @@
s_max(iScalar) = max(scalar_old(iScalar,k,iCell), scalar_old(iScalar,kcp1,iCell), scalar_old(iScalar,kcm1,iCell))
s_min(iScalar) = min(scalar_old(iScalar,k,iCell), scalar_old(iScalar,kcp1,iCell), scalar_old(iScalar,kcm1,iCell))
- s_max_update(iScalar) = s_update(iScalar,iCell,km0)
- s_min_update(iScalar) = s_update(iScalar,iCell,km0)
+ s_max_update(iScalar) = s_update(km0,iScalar,iCell)
+ s_min_update(iScalar) = s_update(km0,iScalar,iCell)
! add in vertical flux to get max and min estimate
s_max_update(iScalar) = s_max_update(iScalar) &
- - rdnw(k) * (max(0.0_RKIND,v_flux(iScalar,iCell,km0)) - min(0.0_RKIND,v_flux(iScalar,iCell,km1)))
+ - rdnw(k) * (max(0.0_RKIND,v_flux(km0,iScalar,iCell)) - min(0.0_RKIND,v_flux(km1,iScalar,iCell)))
s_min_update(iScalar) = s_min_update(iScalar) &
- - rdnw(k) * (min(0.0_RKIND,v_flux(iScalar,iCell,km0)) - max(0.0_RKIND,v_flux(iScalar,iCell,km1)))
+ - rdnw(k) * (min(0.0_RKIND,v_flux(km0,iScalar,iCell)) - max(0.0_RKIND,v_flux(km1,iScalar,iCell)))
end do
@@ -1681,33 +1614,33 @@
if( config_positive_definite ) s_min(:) = 0.
do iScalar=1,num_scalars
- scale_out (iScalar,iCell,km0) = 1.
- scale_in (iScalar,iCell,km0) = 1.
+ scale_out (km0,iScalar,iCell) = 1.
+ scale_in (km0,iScalar,iCell) = 1.
s_max_update (iScalar) = s_max_update (iScalar) / h_new (k,iCell)
s_min_update (iScalar) = s_min_update (iScalar) / h_new (k,iCell)
- s_upwind = s_update(iScalar,iCell,km0) / h_new(k,iCell)
+ s_upwind = s_update(km0,iScalar,iCell) / h_new(k,iCell)
if ( s_max_update(iScalar) > s_max(iScalar) .and. config_monotonic) &
- scale_in (iScalar,iCell,km0) = max(0.0_RKIND,(s_max(iScalar)-s_upwind)/(s_max_update(iScalar)-s_upwind+eps))
+ scale_in (km0,iScalar,iCell) = max(0.0_RKIND,(s_max(iScalar)-s_upwind)/(s_max_update(iScalar)-s_upwind+eps))
if ( s_min_update(iScalar) < s_min(iScalar) ) &
- scale_out (iScalar,iCell,km0) = max(0.0_RKIND,(s_upwind-s_min(iScalar))/(s_upwind-s_min_update(iScalar)+eps))
+ scale_out (km0,iScalar,iCell) = max(0.0_RKIND,(s_upwind-s_min(iScalar))/(s_upwind-s_min_update(iScalar)+eps))
end do
end do ! end loop over cells to compute scale factor
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,1), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,2), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,1), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,2), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
+ tempField % block => block
+ tempField % dimSizes(1) = 2
+ tempField % dimSizes(2) = num_scalars
+ tempField % dimSizes(3) = grid % nCells
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+ tempField % array => scale_in
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ tempField % array => scale_out
+ call mpas_dmpar_exch_halo_field(tempField)
+
! rescale the horizontal fluxes
do iEdge = 1, grid % nEdges
@@ -1716,9 +1649,9 @@
do iScalar=1,num_scalars
flux = h_flux(iScalar,iEdge)
if (flux > 0) then
- flux = flux * min(scale_out(iScalar,cell1,km0), scale_in(iScalar,cell2,km0))
+ flux = flux * min(scale_out(km0,iScalar,cell1), scale_in(km0,iScalar,cell2))
else
- flux = flux * min(scale_in(iScalar,cell1,km0), scale_out(iScalar,cell2,km0))
+ flux = flux * min(scale_in(km0,iScalar,cell1), scale_out(km0,iScalar,cell2))
end if
h_flux(iScalar,iEdge) = flux
end do
@@ -1728,13 +1661,13 @@
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- flux = v_flux(iScalar,iCell,km1)
+ flux = v_flux(km1,iScalar,iCell)
if (flux > 0) then
- flux = flux * min(scale_out(iScalar,iCell,km0), scale_in(iScalar,iCell,km1))
+ flux = flux * min(scale_out(km0,iScalar,iCell), scale_in(km1,iScalar,iCell))
else
- flux = flux * min(scale_in(iScalar,iCell,km0), scale_out(iScalar,iCell,km1))
+ flux = flux * min(scale_in(km0,iScalar,iCell), scale_out(km1,iScalar,iCell))
end if
- v_flux(iScalar,iCell,km1) = flux
+ v_flux(km1,iScalar,iCell) = flux
end do
end do
@@ -1748,8 +1681,8 @@
do iCell=1,grid % nCells
! add in upper vertical flux that was just renormalized
do iScalar=1,num_scalars
- s_update(iScalar,iCell,km0) = s_update(iScalar,iCell,km0) + rdnw(k) * v_flux(iScalar,iCell,km1)
- if (k > 1) s_update(iScalar,iCell,km1) = s_update(iScalar,iCell,km1) - rdnw(k-1)*v_flux(iScalar,iCell,km1)
+ s_update(km0,iScalar,iCell) = s_update(km0,iScalar,iCell) + rdnw(k) * v_flux(km1,iScalar,iCell)
+ if (k > 1) s_update(km1,iScalar,iCell) = s_update(km1,iScalar,iCell) - rdnw(k-1)*v_flux(km1,iScalar,iCell)
end do
end do
@@ -1757,9 +1690,9 @@
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
do iScalar=1,num_scalars
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - &
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - &
h_flux(iScalar,iEdge) / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + &
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + &
h_flux(iScalar,iEdge) / grid % areaCell % array(cell2)
end do
end do
@@ -1768,13 +1701,13 @@
if (k > 1) then
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- s_update(iScalar,iCell,km1) = s_update(iScalar,iCell,km1) / h_new(k-1,iCell)
+ s_update(km1,iScalar,iCell) = s_update(km1,iScalar,iCell) / h_new(k-1,iCell)
end do
end do
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- scalar_new(iScalar,k-1,iCell) = s_update(iScalar,iCell,km1)
+ scalar_new(iScalar,k-1,iCell) = s_update(km1,iScalar,iCell)
end do
end do
end if
@@ -1787,7 +1720,7 @@
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- scalar_new(iScalar,grid % nVertLevels,iCell) = s_update(iScalar,iCell,km1) / h_new(grid%nVertLevels,iCell)
+ scalar_new(iScalar,grid % nVertLevels,iCell) = s_update(km1,iScalar,iCell) / h_new(grid%nVertLevels,iCell)
end do
end do
Modified: branches/atmos_physics/src/core_init_nhyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_init_nhyd_atmos/Registry        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_init_nhyd_atmos/Registry        2012-05-03 21:04:07 UTC (rev 1864)
@@ -2,7 +2,7 @@
% namelist type namelist_record name default_value
%
namelist integer nhyd_model config_test_case 7
-namelist integer nhyd_model config_calendar_type MPAS_GREGORIAN
+namelist character nhyd_model config_calendar_type gregorian
namelist character nhyd_model config_start_time none
namelist character nhyd_model config_stop_time none
namelist integer nhyd_model config_theta_adv_order 3
@@ -28,8 +28,13 @@
namelist character io config_sfc_update_name sfc_update.nc
namelist character io config_output_name init.nc
namelist character io config_restart_name restart.nc
-namelist character io config_decomp_file_prefix graph.info.part.
namelist integer io config_frames_per_outfile 0
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist character decomposition config_block_decomp_file_prefix graph.info.part.
+namelist integer decomposition config_number_of_blocks 0
+namelist logical decomposition config_explicit_proc_decomp .false.
+namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist integer restart config_restart_interval 0
namelist logical restart config_do_restart false
namelist real restart config_restart_time 172800.0
Modified: branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F
===================================================================
--- branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,5 +1,6 @@
module init_atm_test_cases
+ use mpas_kind_types
use mpas_grid_types
use mpas_configure
use mpas_constants
@@ -8,6 +9,7 @@
use mpas_atmphys_initialize_real
use mpas_RBF_interpolation
use mpas_vector_reconstruction
+ use mpas_timer
! Added only clause to keep xlf90 from getting confused from the overloaded abs intrinsic in mpas_timekeeping
use mpas_timekeeping !, only: MPAS_Time_type, MPAS_TimeInterval_type, MPAS_Clock_type, &
@@ -35,7 +37,6 @@
type (block_type), pointer :: block_ptr
-
!
! Do some quick checks to make sure compile options are compatible with the chosen test case
!
@@ -103,8 +104,9 @@
write(0,*) ' real-data GFS test case '
block_ptr => domain % blocklist
do while (associated(block_ptr))
- call init_atm_test_case_gfs(domain % dminfo, block_ptr % mesh, block_ptr % fg, block_ptr % state % time_levs(1) % state, &
- block_ptr % diag, config_test_case, block_ptr % parinfo)
+ call init_atm_test_case_gfs(block_ptr % mesh, block_ptr % fg, &
+ block_ptr % state % time_levs(1) % state, block_ptr % diag, &
+ config_test_case)
if (config_met_interp) call physics_initialize_real(block_ptr % mesh, block_ptr % fg)
block_ptr => block_ptr % next
end do
@@ -126,6 +128,7 @@
end if
+
block_ptr => domain % blocklist
do while (associated(block_ptr))
do i=2,nTimeLevs
@@ -144,7 +147,6 @@
end do
endif
-
end subroutine init_atm_setup_test_case
!----------------------------------------------------------------------------------------------------------
@@ -2153,7 +2155,7 @@
end subroutine init_atm_test_case_mtn_wave
- subroutine init_atm_test_case_gfs(dminfo, grid, fg, state, diag, test_case, parinfo)
+ subroutine init_atm_test_case_gfs(grid, fg, state, diag, test_case)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Real-data test case using GFS data
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -2165,13 +2167,15 @@
implicit none
- type (dm_info), intent(in) :: dminfo
type (mesh_type), intent(inout) :: grid
type (fg_type), intent(inout) :: fg
type (state_type), intent(inout) :: state
type (diag_type), intent(inout) :: diag
integer, intent(in) :: test_case
+
+ type (block_type), pointer :: block
type (parallel_info), pointer :: parinfo
+ type (dm_info), pointer :: dminfo
real (kind=RKIND), parameter :: u0 = 35.0
real (kind=RKIND), parameter :: alpha_grid = 0. ! no grid rotation
@@ -2215,6 +2219,8 @@
real (kind=RKIND), dimension(:,:), pointer :: v
real (kind=RKIND), dimension(:,:), pointer :: sorted_arr
+ type (field1DReal):: tempField
+
real(kind=RKIND), dimension(:), pointer :: hs, hs1
real(kind=RKIND) :: hm, zh, dzmin, dzmina, dzmina_global, dzminf, sm
integer :: nsmterrain, kz, sfc_k
@@ -2239,7 +2245,7 @@
real (kind=RKIND), allocatable, dimension(:,:,:) :: vegfra
integer, dimension(:), pointer :: mask_array
integer, dimension(grid % nEdges), target :: edge_mask
- character (len=1024) :: fname
+ character (len=StrKIND) :: fname
real (kind=RKIND) :: u, flux, fluxk, lat1, lat2, eta_v, r_pert, u_pert, lat_pert, lon_pert, r
real (kind=RKIND) :: lat, lon, x, y
@@ -2273,6 +2279,11 @@
real (kind=RKIND) :: dlat
real (kind=RKIND) :: z_edge, z_edge3, d2fdx2_cell1, d2fdx2_cell2
+
+ block => grid % block
+ parinfo => block % parinfo
+ dminfo => block % domain % dminfo
+
weightsOnEdge => grid % weightsOnEdge % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
nEdgesOnCell => grid % nEdgesOnCell % array
@@ -2376,7 +2387,7 @@
allocate(rarray(nx,ny,nzz))
allocate(nhs(grid % nCells))
nhs(:) = 0
- grid % ter % array(:) = 0.0
+ ter(:) = 0.0
do jTileStart=1,20401,ny-6
! do jTileStart=1,961,ny-6
@@ -2409,7 +2420,7 @@
grid % nCells, grid % maxEdges, grid % nEdgesOnCell % array, grid % cellsOnCell % array, &
grid % latCell % array, grid % lonCell % array)
- grid % ter % array(iPoint) = grid % ter % array(iPoint) + rarray(i,j,1)
+ ter(iPoint) = ter(iPoint) + rarray(i,j,1)
nhs(iPoint) = nhs(iPoint) + 1
end do
@@ -2419,7 +2430,7 @@
end do
do iCell=1, grid % nCells
- grid % ter % array(iCell) = grid % ter % array(iCell) / real(nhs(iCell))
+ ter(iCell) = ter(iCell) / real(nhs(iCell))
end do
deallocate(rarray)
@@ -2980,7 +2991,7 @@
nInterpPoints = grid % nCells
latPoints => grid % latCell % array
lonPoints => grid % lonCell % array
- destField1d => grid % ter % array
+ destField1d => ter
ndims = 1
end if
@@ -3046,11 +3057,9 @@
ter(iCell) = hs(iCell) - 0.25*ter(iCell)
end do
- call mpas_dmpar_exch_halo_field1d_real(dminfo, ter(:), &
- grid % nCells, &
- parinfo % cellsToSend, parinfo % cellsToRecv)
+ ! note that ther variable ter used throughout this section is a pointer to grid % ter % array, here we are passing ter's parent field
+ call mpas_dmpar_exch_halo_field(grid % ter)
-
end do
do iCell=1,grid % nCells
@@ -3164,7 +3173,7 @@
sm = .05*min(0.5_RKIND*zw(k)/hm,1.0_RKIND)
do i=1,50
- do iCell=1,grid %nCells
+ do iCell=1,grid % nCells
hs1(iCell) = 0.
do j = 1,nEdgesOnCell(iCell)
@@ -3184,10 +3193,16 @@
end do
- call mpas_dmpar_exch_halo_field1d_real(dminfo, hs(:), &
- grid % nCells, &
- parinfo % cellsToSend, parinfo % cellsToRecv)
+ tempField % block => block
+ tempField % dimSizes(1) = grid % nCells
+ tempField % sendList => parinfo % cellsToSend
+ tempField % recvList => parinfo % cellsToRecv
+ tempField % array => hs
+ call mpas_timer_start("EXCHANGE_1D_REAL")
+ call mpas_dmpar_exch_halo_field(tempField)
+ call mpas_timer_stop("EXCHANGE_1D_REAL")
+
! dzmina = minval(hs(:)-hx(k-1,:))
dzmina = minval(zw(k)+ah(k)*hs(1:grid%nCellsSolve)-zw(k-1)-ah(k-1)*hx(k-1,1:grid%nCellsSolve))
call mpas_dmpar_min_real(dminfo, dzmina, dzmina_global)
@@ -4321,7 +4336,7 @@
type (MPAS_Clock_type) :: fg_clock
type (MPAS_Time_type) :: start_time, stop_time, curr_time
type (MPAS_TimeInterval_type) :: fg_interval
- character (len=32) :: timeString
+ character (len=StrKIND) :: timeString
! Set interpolation sequence to be used for SST/SKINTEMP field
Modified: branches/atmos_physics/src/core_nhyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/Registry        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_nhyd_atmos/Registry        2012-05-03 21:04:07 UTC (rev 1864)
@@ -4,7 +4,7 @@
namelist integer nhyd_model config_test_case 0
namelist character nhyd_model config_time_integration SRK3
namelist real nhyd_model config_dt 600.0
-namelist integer nhyd_model config_calendar_type MPAS_GREGORIAN
+namelist character nhyd_model config_calendar_type gregorian
namelist character nhyd_model config_start_time 0000-01-01_00:00:00
namelist character nhyd_model config_stop_time none
namelist character nhyd_model config_run_duration none
@@ -44,7 +44,12 @@
namelist character io config_restart_name restart.nc
namelist character io config_output_interval 06:00:00
namelist integer io config_frames_per_outfile 0
-namelist character io config_decomp_file_prefix graph.info.part.
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist character decomposition config_block_decomp_file_prefix graph.info.part.
+namelist integer decomposition config_number_of_blocks 0
+namelist logical decomposition config_explicit_proc_decomp .false.
+namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist logical restart config_do_restart false
namelist logical restart config_do_DAcycling false
namelist character restart config_restart_interval none
@@ -316,10 +321,10 @@
namelist integer physics noznlev 59
namelist integer physics naerlev 29
-namelist integer physics cam_dim1 4
+namelist integer physics camdim1 4
dim nOznLevels namelist:noznlev
dim nAerLevels namelist:naerlev
-dim cam_dim1 namelist:cam_dim1
+dim cam_dim1 namelist:camdim1
%... DIMENSION NEEDED FOR LONGWAVE AND SHORTWAVE RADIATION FLUXES TO INCLUDE AN ADDITIONAL LAYER
%... BETWEEN THE TOP OF THE MODEL AND THE TOP OF THE ATMOSPHERE
Modified: branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -2,8 +2,8 @@
use mpas_framework
- type (io_output_object) :: restart_obj
- type (io_input_object) :: sfc_update_obj
+ type (io_output_object), save :: restart_obj
+ type (io_input_object), save :: sfc_update_obj
integer :: current_outfile_frames
type (MPAS_Clock_type) :: clock
@@ -18,6 +18,7 @@
subroutine mpas_core_init(domain, startTimeStamp)
use mpas_configure
+ use mpas_kind_types
use mpas_grid_types
use atm_test_cases
@@ -32,8 +33,9 @@
type (field1DChar) :: xtime
type (MPAS_Time_type) :: startTime, sliceTime
type (MPAS_TimeInterval_type) :: timeDiff, minTimeDiff
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: i
+ integer :: ierr
if (.not. config_do_restart) call atm_setup_test_case(domain)
@@ -58,50 +60,19 @@
sfc_update_obj % filename = trim(config_sfc_update_name)
sfc_update_obj % stream = STREAM_SFC
- call mpas_io_input_init(sfc_update_obj, domain % dminfo)
+ call mpas_io_input_init(sfc_update_obj, domain % blocklist, domain % dminfo)
!
! We need to decide which time slice to read from the surface file - read the most recent time slice that falls before or on the start time
!
- sfc_update_obj % time = 1
-
- if (sfc_update_obj % rdLocalTime <= 0) then
- write(0,*) 'Error: Couldn''t find any times in surface update file.'
+ sfc_update_obj % time = MPAS_seekStream(sfc_update_obj % io_stream, trim(config_start_time), MPAS_STREAM_LATEST_BEFORE, timeStamp, ierr)
+ if (ierr == MPAS_IO_ERR) then
+ write(0,*) 'Error: surface update file '//trim(sfc_update_obj % filename)//' did not contain any times at or before '//trim(config_start_time)
call mpas_dmpar_abort(domain % dminfo)
end if
-
- if (domain % dminfo % my_proc_id == IO_NODE) then
- allocate(xtime % ioinfo)
- xtime % ioinfo % start(1) = 1
- xtime % ioinfo % count(1) = sfc_update_obj % rdLocalTime
- allocate(xtime % array(sfc_update_obj % rdLocalTime))
- xtime % ioinfo % fieldName = 'xtime'
- call mpas_io_input_field(sfc_update_obj, xtime)
+ write(0,*) 'Starting model with surface time ', trim(timeStamp)
- call mpas_set_timeInterval(interval=minTimeDiff, DD=10000)
- call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
-
- do i=1,sfc_update_obj % rdLocalTime
- call mpas_set_time(curr_time=sliceTime, dateTimeString=xtime % array(i))
- timeDiff = abs(sliceTime - startTime)
- if (sliceTime <= startTime .and. timeDiff < minTimeDiff) then
- minTimeDiff = timeDiff
- sfc_update_obj % time = i
- end if
- end do
-
- timeStamp = xtime % array(sfc_update_obj % time)
-
- deallocate(xtime % ioinfo)
- deallocate(xtime % array)
- end if
-
- call mpas_dmpar_bcast_int(domain % dminfo, sfc_update_obj % time)
- call mpas_dmpar_bcast_char(domain % dminfo, timeStamp)
-
- write(0,*) 'Starting model with surface time ', timeStamp
-
end if
end subroutine mpas_core_init
@@ -190,25 +161,19 @@
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % state % time_levs(1) % state % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % u)
if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then
call atm_init_coupled_diagnostics( block % state % time_levs(1) % state, block % diag, mesh)
end if
call atm_compute_solve_diagnostics(dt, block % state % time_levs(1) % state, block % diag, mesh)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % ru % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % rw % array, &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % CellsToSend, block % parinfo % CellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % pv_edge)
+ call mpas_dmpar_exch_halo_field(block % diag % ru)
+
+ call mpas_dmpar_exch_halo_field(block % diag % rw)
+
call mpas_rbf_interp_initialize(mesh)
call mpas_init_reconstruct(mesh)
call mpas_reconstruct(mesh, block % state % time_levs(1) % state % u % array, &
@@ -263,6 +228,7 @@
subroutine mpas_core_run(domain, output_obj, output_frame)
use mpas_grid_types
+ use mpas_kind_types
use mpas_io_output
use mpas_timer
@@ -276,7 +242,7 @@
type (block_type), pointer :: block_ptr
type (MPAS_Time_Type) :: currTime
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: itimestep
integer :: ierr
@@ -292,17 +258,13 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Begin timestep ', timeStamp
+ write(0,*) 'Begin timestep ', trim(timeStamp)
! Input external updates (i.e. surface)
if (mpas_is_alarm_ringing(clock, sfcAlarmID, ierr=ierr)) then
call mpas_reset_clock_alarm(clock, sfcAlarmID, ierr=ierr)
- call mpas_read_and_distribute_fields(domain % dminfo, sfc_update_obj, domain % blocklist, &
- readCellStart, nReadCells, readEdgeStart, nReadEdges, readVertexStart, nReadVertices, &
- readVertLevelStart, nReadVertLevels, &
- sendCellList, recvCellList, sendEdgeList, recvEdgeList, sendVertexList, recvVertexList, &
- sendVertLevelList, recvVertLevelList)
+ call mpas_read_and_distribute_fields(sfc_update_obj)
sfc_update_obj % time = sfc_update_obj % time + 1
end if
@@ -458,6 +420,7 @@
subroutine atm_do_timestep(domain, dt, itimestep)
use mpas_grid_types
+ use mpas_kind_types
use atm_time_integration
#ifdef DO_PHYSICS
use mpas_atmphys_control
@@ -474,7 +437,7 @@
type (MPAS_Time_Type) :: startTime, currTime
type (MPAS_TimeInterval_Type) :: xtimeTime
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: s, s_n, s_d
real (kind=RKIND) :: xtime_s
integer :: ierr
Modified: branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,6 +1,7 @@
module atm_time_integration
use mpas_grid_types
+ use mpas_kind_types
use mpas_configure
use mpas_constants
use mpas_dmpar
@@ -38,7 +39,7 @@
type (block_type), pointer :: block
type (MPAS_Time_type) :: currTime
type (MPAS_TimeInterval_type) :: dtInterval
- character (len=32) :: xtime
+ character (len=StrKIND) :: xtime
if (trim(config_time_integration) == 'SRK3') then
call atm_srk3(domain, dt, itimestep)
@@ -98,6 +99,7 @@
real (kind=RKIND) :: domain_mass, scalar_mass, scalar_min, scalar_max
real (kind=RKIND) :: global_domain_mass, global_scalar_mass, global_scalar_min, global_scalar_max
+
!
! Initialize RK weights
!
@@ -122,30 +124,22 @@
! the so-called owned edges?
- block => domain % blocklist
- do while (associated(block))
! WCS-parallel: first three and rtheta_p arise from scalar transport and microphysics update (OK). Others come from where?
! theta_m
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(1) % state % theta_m % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(1) % state % theta_m)
+
! scalars
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(1) % state % scalars % array(:,:,:), &
- block % state % time_levs(1) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(1) % state % scalars)
+
! pressure_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % pressure_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % pressure_p)
+
! rtheta_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rtheta_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rtheta_p)
+
block => domain % blocklist
do while (associated(block))
! We are setting values in the halo here, so no communications are needed.
@@ -185,9 +179,12 @@
if (debug) write(0,*) ' add physics tendencies '
block => domain % blocklist
do while (associated(block))
- call physics_addtend( domain % dminfo , block % parinfo % cellsToSend, block % parinfo % cellsToRecv, &
- block % mesh , block % state % time_levs(1) % state, block % diag, block % tend, &
- block % tend_physics , block % state % time_levs(2) % state % rho_zz % array(:,:), &
+ call physics_addtend( block % mesh, &
+ block % state % time_levs(1) % state, &
+ block % diag, &
+ block % tend, &
+ block % tend_physics, &
+ block % state % time_levs(2) % state % rho_zz % array(:,:), &
block % diag % rho_edge % array(:,:) )
block => block % next
end do
@@ -199,19 +196,13 @@
! because we are solving for all edges of owned cells
!***********************************
- block => domain % blocklist
- do while (associated(block))
! tend_u
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u, (/ 1 /))
block => domain % blocklist
do while (associated(block))
call atm_set_smlstep_pert_variables( block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % tend, block % diag, block % mesh )
+ block % tend, block % diag, block % mesh )
call atm_compute_vert_imp_coefs( block % state % time_levs(2) % state, block % mesh, block % diag, rk_sub_timestep(rk_step) )
block => block % next
end do
@@ -223,7 +214,7 @@
block => domain % blocklist
do while (associated(block))
call atm_advance_acoustic_step( block % state % time_levs(2) % state, block % diag, block % tend, &
- block % mesh, rk_sub_timestep(rk_step) )
+ block % mesh, rk_sub_timestep(rk_step) )
block => block % next
end do
@@ -233,20 +224,12 @@
! WCS-parallel: is this a candidate for a smaller stencil? we need only communicate cells that share edges with owned cells.
- block => domain % blocklist
- do while (associated(block))
! rtheta_pp
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rtheta_pp % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rtheta_pp, (/ 1 /))
end do ! end of small stimestep loop
! will need communications here for rho_pp
- block => domain % blocklist
- do while (associated(block))
! WCS-parallel: is communication of rw_p and rho_pp because of limiter (pd or mono scheme?),
! or is it needed for the large-step variable recovery (to get decoupled variables)?
@@ -258,43 +241,36 @@
! MGD seems necessary
! rw_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rw_p % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % diag % rw_p, (/ 1 /))
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rw_p)
+
! MGD seems necessary
! ru_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % ru_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % diag % ru_p, (/ 2 /))
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % ru_p)
+
! rho_pp
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rho_pp % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rho_pp)
+ ! the second layer of halo cells must be exchanged before calling atm_recover_large_step_variables
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rtheta_pp, (/ 2 /))
+
+
block => domain % blocklist
do while (associated(block))
- call atm_recover_large_step_variables( block % state % time_levs(2) % state, &
+ call atm_recover_large_step_variables( block % state % time_levs(2) % state, &
block % diag, block % tend, block % mesh, &
rk_timestep(rk_step), number_sub_steps(rk_step), rk_step )
-
block => block % next
end do
! ************ advection of moist variables here...
- block => domain % blocklist
- do while (associated(block))
! u
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- block => block % next
- end do
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % u, (/ 3 /))
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
-
if (config_scalar_advection) then
block => domain % blocklist
@@ -305,16 +281,16 @@
! so we keep the advance_scalars routine as well
!
if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then
- call atm_advance_scalars( block % tend, &
+ call atm_advance_scalars( block % tend, &
block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
block % diag, &
block % mesh, rk_timestep(rk_step) )
else
- call atm_advance_scalars_mono( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % diag, &
- block % mesh, rk_timestep(rk_step), rk_step, 3, &
- domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
+ block % domain = domain
+ call atm_advance_scalars_mono( block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % diag, block % mesh, &
+ rk_timestep(rk_step), rk_step, 3 )
end if
block => block % next
end do
@@ -354,33 +330,25 @@
! need communications here to fill out u, w, theta_m, p, and pp, scalars, etc
! so that they are available for next RK step or the first rk substep of the next timestep
- block => domain % blocklist
- do while (associated(block))
!MGD seems necessary
! w
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % w % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % w)
+
! pv_edge
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % pv_edge)
+
! rho_edge
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rho_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rho_edge)
! **** this will always be needed - perhaps we can cover this with compute_solve_diagnostics
! scalars
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
- block % state % time_levs(2) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ if(rk_step < 3) then
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % scalars)
+ end if
- end do ! rk_step loop
+ end do ! rk_step loop
!... compute full velocity vectors at cell centers:
block => domain % blocklist
@@ -1434,7 +1402,7 @@
!---------------------------
- subroutine atm_advance_scalars_mono( tend, s_old, s_new, diag, grid, dt, rk_step, rk_order, dminfo, cellsToSend, cellsToRecv)
+ subroutine atm_advance_scalars_mono(tend, s_old, s_new, diag, grid, dt, rk_step, rk_order)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Input: s - current model state
@@ -1442,23 +1410,20 @@
!
implicit none
- type (tend_type), intent(in) :: tend
- type (state_type), intent(in) :: s_old
- type (state_type), intent(inout) :: s_new
- type (diag_type), intent(in) :: diag
- type (mesh_type), intent(in) :: grid
- real (kind=RKIND) :: dt
+ type (tend_type),intent(in) :: tend
+ type (state_type),intent(inout) :: s_old
+ type (state_type),intent(inout) :: s_new
+ type (diag_type),intent(in) :: diag
+ type (mesh_type),intent(in) :: grid
+ real (kind=RKIND),intent(in) :: dt
+ integer, intent(in) :: rk_step, rk_order
- integer, intent(in) :: rk_step, rk_order
- type (dm_info), intent(in) :: dminfo
- type (exchange_list), pointer :: cellsToSend, cellsToRecv
-
-
+ type (block_type), pointer :: block
integer :: i, iCell, iEdge, k, iScalar, cell1, cell2
real (kind=RKIND) :: flux, scalar_edge, d2fdx2_cell1, d2fdx2_cell2, scalar_weight
real (kind=RKIND) :: scalar_weight_cell1, scalar_weight_cell2
- real (kind=RKIND), dimension(:,:,:), pointer :: scalar_old_in, scalar_new_in, scalar_tend
+ real (kind=RKIND), dimension(:,:,:), pointer :: scalar_tend
real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
real (kind=RKIND), dimension(:,:), pointer :: uhAvg, h_old, h_new, wwAvg, rho_edge, rho_zz, zgrid, kdiff
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell, qv_init
@@ -1468,9 +1433,11 @@
integer, dimension(:), pointer :: nAdvCellsForEdge
real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd
+ type (field2DReal) :: tempField
+
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scalar_old, scalar_new
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: s_max, s_min, s_update
- real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scale_in, scale_out
+ real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ), target :: scale_in, scale_out
real (kind=RKIND), dimension( grid % nVertLevels, grid % nEdges ) :: flux_arr
real (kind=RKIND), dimension( grid % nVertLevels + 1, grid % nCells ) :: wdtn
@@ -1497,10 +1464,10 @@
flux4(q_im2, q_im1, q_i, q_ip1, ua) + &
coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0
+ block => grid % block
+
coef_3rd_order = config_coef_3rd_order
- scalar_old_in => s_old % scalars % array
- scalar_new_in => s_new % scalars % array
kdiff => diag % kdiff % array
deriv_two => grid % deriv_two % array
uhAvg => diag % ruAvg % array
@@ -1545,7 +1512,7 @@
do iCell = 1,grid%nCellsSolve
do k = 1, grid%nVertLevels
do iScalar = 1,s_old%num_scalars
- scalar_old_in(iScalar,k,iCell) = scalar_old_in(iScalar,k,iCell)+dt*scalar_tend(iScalar,k,iCell) / h_old(k,iCell)
+ s_old % scalars % array(iScalar,k,iCell) = s_old % scalars % array(iScalar,k,iCell)+dt*scalar_tend(iScalar,k,iCell) / h_old(k,iCell)
scalar_tend(iScalar,k,iCell) = 0.
end do
end do
@@ -1553,12 +1520,7 @@
! halo exchange
- call mpas_dmpar_exch_halo_field3d_real( dminfo, &
- scalar_old_in(:,:,:), &
- s_old % num_scalars, &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
+ call mpas_dmpar_exch_halo_field(s_old % scalars)
!
! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old
@@ -1573,8 +1535,8 @@
do iCell = 1, grid%nCells
do k=1, grid%nVertLevels
- scalar_old(k,iCell) = scalar_old_in(iScalar,k,iCell)
- scalar_new(k,iCell) = scalar_new_in(iScalar,k,iCell)
+ scalar_old(k,iCell) = s_old % scalars % array(iScalar,k,iCell)
+ scalar_new(k,iCell) = s_new % scalars % array(iScalar,k,iCell)
end do
end do
@@ -1734,17 +1696,23 @@
!
! communicate scale factors here
!
- call mpas_dmpar_exch_halo_field2d_real( dminfo, &
- scale_in(:,:), &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
- call mpas_dmpar_exch_halo_field2d_real( dminfo, &
- scale_out(:,:), &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
!
+! WCS_halo_opt_2 - communicate only first halo row in these next two exchanges
+!
+
+ tempField % block => block
+ tempField % dimSizes(1) = grid % nVertLevels
+ tempField % dimSizes(2) = grid % nCells
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+
+ tempField % array => scale_in
+ call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
+
+ tempField % array => scale_out
+ call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
+
+!
! rescale the fluxes
!
do iEdge = 1, grid % nEdges
@@ -1814,7 +1782,7 @@
do iCell = 1, grid%nCells
do k=1, grid%nVertLevels
- scalar_new_in(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell))
+ s_new % scalars % array(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell))
end do
end do
Property changes on: branches/atmos_physics/src/core_ocean
___________________________________________________________________
Deleted: svn:mergeinfo
- /branches/cam_mpas_nh/src/core_ocean:1260-1270
/branches/ocean_projects/imp_vert_mix_mrp/src/core_ocean:754-986
/branches/ocean_projects/rayleigh/src/core_ocean:1298-1311
/branches/ocean_projects/split_explicit_mrp/src/core_ocean:1134-1138
/branches/ocean_projects/split_explicit_timestepping/src/core_ocean:1044-1097
/branches/ocean_projects/time_averaging/src/core_ocean:1271-1305
/branches/ocean_projects/vert_adv_mrp/src/core_ocean:704-745
/branches/source_renaming/src/core_ocean:1082-1113
/branches/time_manager/src/core_ocean:924-962
Modified: branches/atmos_physics/src/core_ocean/Makefile
===================================================================
--- branches/atmos_physics/src/core_ocean/Makefile        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/Makefile        2012-05-03 21:04:07 UTC (rev 1864)
@@ -5,6 +5,7 @@
mpas_ocn_advection.o \
         mpas_ocn_thick_hadv.o \
         mpas_ocn_thick_vadv.o \
+ mpas_ocn_gm.o \
         mpas_ocn_vel_coriolis.o \
         mpas_ocn_vel_vadv.o \
         mpas_ocn_vel_hmix.o \
@@ -36,6 +37,15 @@
         mpas_ocn_vmix_coefs_tanh.o \
         mpas_ocn_restoring.o \
         mpas_ocn_tendency.o \
+         mpas_ocn_tracer_advection.o \
+         mpas_ocn_tracer_advection_std.o \
+         mpas_ocn_tracer_advection_std_hadv.o \
+         mpas_ocn_tracer_advection_std_vadv.o \
+         mpas_ocn_tracer_advection_std_vadv2.o \
+         mpas_ocn_tracer_advection_std_vadv3.o \
+         mpas_ocn_tracer_advection_std_vadv4.o \
+         mpas_ocn_tracer_advection_mono.o \
+         mpas_ocn_tracer_advection_helpers.o \
mpas_ocn_time_integration.o \
mpas_ocn_time_integration_rk4.o \
mpas_ocn_time_integration_split.o \
@@ -70,6 +80,8 @@
mpas_ocn_thick_vadv.o:
+mpas_ocn_gm.o:
+
mpas_ocn_vel_pressure_grad.o:
mpas_ocn_vel_vadv.o:
@@ -120,6 +132,24 @@
mpas_ocn_tracer_hmix_del4.o:
+mpas_ocn_tracer_advection.o: mpas_ocn_tracer_advection_std.o mpas_ocn_tracer_advection_mono.o
+
+mpas_ocn_tracer_advection_std.o: mpas_ocn_tracer_advection_std_hadv.o mpas_ocn_tracer_advection_std_vadv.o
+
+mpas_ocn_tracer_advection_std_hadv.o: mpas_ocn_tracer_advection_helpers.o
+
+mpas_ocn_tracer_advection_std_vadv.o: mpas_ocn_tracer_advection_std_vadv2.o mpas_ocn_tracer_advection_std_vadv3.o mpas_ocn_tracer_advection_std_vadv4.o
+
+mpas_ocn_tracer_advection_std_vadv2.o: mpas_ocn_tracer_advection_helpers.o
+
+mpas_ocn_tracer_advection_std_vadv3.o: mpas_ocn_tracer_advection_helpers.o
+
+mpas_ocn_tracer_advection_std_vadv4.o: mpas_ocn_tracer_advection_helpers.o
+
+mpas_ocn_tracer_advection_mono.o: mpas_ocn_tracer_advection_helpers.o
+
+mpas_ocn_tracer_advection_helpers.o:
+
mpas_ocn_restoring.o:
mpas_ocn_vmix.o: mpas_ocn_vmix_coefs_const.o mpas_ocn_vmix_coefs_rich.o mpas_ocn_vmix_coefs_tanh.o
@@ -140,6 +170,7 @@
                         mpas_ocn_test_cases.o \
                                         mpas_ocn_advection.o \
                                         mpas_ocn_thick_hadv.o \
+ mpas_ocn_gm.o \
                                         mpas_ocn_thick_vadv.o \
                                         mpas_ocn_vel_coriolis.o \
                                         mpas_ocn_vel_vadv.o \
@@ -170,6 +201,15 @@
                                         mpas_ocn_vmix_coefs_rich.o \
                                         mpas_ocn_vmix_coefs_tanh.o \
                                         mpas_ocn_restoring.o \
+                                         mpas_ocn_tracer_advection.o \
+                                         mpas_ocn_tracer_advection_std.o \
+                                         mpas_ocn_tracer_advection_std_hadv.o \
+                                         mpas_ocn_tracer_advection_std_vadv.o \
+                                         mpas_ocn_tracer_advection_std_vadv2.o \
+                                         mpas_ocn_tracer_advection_std_vadv3.o \
+                                         mpas_ocn_tracer_advection_std_vadv4.o \
+                                         mpas_ocn_tracer_advection_mono.o \
+                                         mpas_ocn_tracer_advection_helpers.o \
                                         mpas_ocn_tendency.o \
                                         mpas_ocn_time_integration.o \
                                         mpas_ocn_time_integration_rk4.o \
Modified: branches/atmos_physics/src/core_ocean/Registry
===================================================================
--- branches/atmos_physics/src/core_ocean/Registry        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/Registry        2012-05-03 21:04:07 UTC (rev 1864)
@@ -5,23 +5,31 @@
namelist character sw_model config_time_integration RK4
namelist logical sw_model config_rk_filter_btr_mode false
namelist real sw_model config_dt 172.8
-namelist integer sw_model config_calendar_type MPAS_360DAY
+namelist character sw_model config_calendar_type 360day
namelist character sw_model config_start_time 0000-01-01_00:00:00
namelist character sw_model config_stop_time none
namelist character sw_model config_run_duration none
namelist integer sw_model config_stats_interval 100
namelist logical sw_model config_initial_stats false
+namelist logical sw_model config_prescribe_velocity false
+namelist logical sw_model config_prescribe_thickness false
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
namelist character io config_output_interval 24:00:00
namelist integer io config_frames_per_outfile 0
-namelist character io config_decomp_file_prefix graph.info.part.
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist character decomposition config_block_decomp_file_prefix graph.info.part.
+namelist integer decomposition config_number_of_blocks 0
+namelist logical decomposition config_explicit_proc_decomp .false.
+namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist logical restart config_do_restart false
namelist character restart config_restart_interval none
namelist character grid config_vert_grid_type isopycnal
namelist character grid config_pressure_type pressure
namelist real grid config_rho0 1028
+namelist logical grid config_enforce_zstar_at_restart false
namelist integer split_explicit_ts config_n_ts_iter 2
namelist integer split_explicit_ts config_n_bcl_iter_beg 2
namelist integer split_explicit_ts config_n_bcl_iter_mid 2
@@ -44,9 +52,11 @@
namelist logical hmix config_include_KE_vertex false
namelist real hmix config_h_tracer_eddy_diff2 0.0
namelist real hmix config_h_tracer_eddy_diff4 0.0
-namelist real hmix config_apvm_upwinding 0.5
+namelist real hmix config_h_kappa 0.0
+namelist real hmix config_h_kappa_q 0.0
namelist logical hmix config_rayleigh_friction false
namelist real hmix config_rayleigh_damping_coeff 0.0
+namelist real hmix config_apvm_scale_factor 0.0
namelist character vmix config_vert_visc_type const
namelist character vmix config_vert_diff_type const
namelist logical vmix config_implicit_vertical_mix .true.
@@ -66,11 +76,12 @@
namelist real vmix_tanh config_zWidth_tanh 100
namelist character eos config_eos_type linear
namelist character advection config_vert_tracer_adv stencil
-namelist integer advection config_vert_tracer_adv_order 4
-namelist integer advection config_tracer_adv_order 2
+namelist integer advection config_vert_tracer_adv_order 4
+namelist integer advection config_horiz_tracer_adv_order 2
namelist integer advection config_thickness_adv_order 2
-namelist logical advection config_positive_definite false
+namelist real advection config_coef_3rd_order 0.25
namelist logical advection config_monotonic false
+namelist logical advection config_check_monotonicity false
namelist logical restore config_restoreTS false
namelist real restore config_restoreT_timescale 90.0
namelist real restore config_restoreS_timescale 90.0
@@ -82,6 +93,7 @@
dim nEdges nEdges
dim maxEdges maxEdges
dim maxEdges2 maxEdges2
+dim nAdvectionCells maxEdges2+0
dim nVertices nVertices
dim TWO 2
dim R3 3
@@ -120,6 +132,7 @@
var persistent real meshDensity ( nCells ) 0 iro meshDensity mesh - -
var persistent real meshScalingDel2 ( nEdges ) 0 ro meshScalingDel2 mesh - -
var persistent real meshScalingDel4 ( nEdges ) 0 ro meshScalingDel4 mesh - -
+var persistent real meshScaling ( nEdges ) 0 ro meshScaling mesh - -
var persistent integer cellsOnEdge ( TWO nEdges ) 0 iro cellsOnEdge mesh - -
var persistent integer nEdgesOnCell ( nCells ) 0 iro nEdgesOnCell mesh - -
@@ -149,9 +162,17 @@
var persistent real h_s ( nCells ) 0 iro h_s mesh - -
% Space needed for advection
-var persistent real deriv_two ( FIFTEEN TWO nEdges ) 0 - deriv_two mesh - -
-var persistent integer advCells ( TWENTYONE nCells ) 0 - advCells mesh - -
+var persistent real deriv_two ( maxEdges2 TWO nEdges ) 0 - deriv_two mesh - -
+% Added for monotonic advection scheme
+var persistent real adv_coefs ( nAdvectionCells nEdges ) 0 - adv_coefs mesh - -
+var persistent real adv_coefs_2nd ( nAdvectionCells nEdges ) 0 - adv_coefs_2nd mesh - -
+var persistent real adv_coefs_3rd ( nAdvectionCells nEdges ) 0 - adv_coefs_3rd mesh - -
+var persistent integer advCellsForEdge ( nAdvectionCells nEdges ) 0 - advCellsForEdge mesh - -
+var persistent integer nAdvCellsForEdge ( nEdges ) 0 - nAdvCellsForEdge mesh - -
+var persistent integer highOrderAdvectionMask ( nVertLevels nEdges ) 0 - highOrderAdvectionMask mesh - -
+var persistent integer lowOrderAdvectionMask ( nVertLevels nEdges ) 0 - lowOrderAdvectionMask mesh - -
+
% !! NOTE: the following arrays are needed to allow the use
% !! of the module_advection.F w/o alteration
% Space needed for deformation calculation weights
@@ -171,6 +192,7 @@
var persistent real referenceBottomDepth ( nVertLevels ) 0 iro referenceBottomDepth mesh - -
var persistent real referenceBottomDepthTopOfCell ( nVertLevelsP1 ) 0 - referenceBottomDepthTopOfCell mesh - -
var persistent real hZLevel ( nVertLevels ) 0 iro hZLevel mesh - -
+var persistent real zstarWeight ( nVertLevels ) 0 - zstarWeight mesh - -
% Boundary conditions: read from input, saved in restart and written to output
var persistent integer boundaryEdge ( nVertLevels nEdges ) 0 iro boundaryEdge mesh - -
@@ -211,17 +233,27 @@
% Diagnostic fields: only written to output
var persistent real zMid ( nVertLevels nCells Time ) 2 io zMid state - -
var persistent real v ( nVertLevels nEdges Time ) 2 - v state - -
+var persistent real uTransport ( nVertLevels nEdges Time ) 2 - uTransport state - -
+var persistent real uBolusGM ( nVertLevels nEdges Time ) 2 - uBolusGM state - -
+var persistent real uBolusGMX ( nVertLevels nEdges Time ) 2 - uBolusGMX state - -
+var persistent real uBolusGMY ( nVertLevels nEdges Time ) 2 - uBolusGMY state - -
+var persistent real uBolusGMZ ( nVertLevels nEdges Time ) 2 - uBolusGMZ state - -
+var persistent real uBolusGMZonal ( nVertLevels nEdges Time ) 2 o uBolusGMZonal state - -
+var persistent real uBolusGMMeridional ( nVertLevels nEdges Time ) 2 o uBolusGMMeridional state - -
+var persistent real hEddyFlux ( nVertLevels nEdges Time ) 2 - hEddyFlux state - -
+var persistent real h_kappa ( nVertLevels nEdges Time ) 2 - h_kappa state - -
+var persistent real h_kappa_q ( nVertLevels nEdges Time ) 2 - h_kappa_q state - -
var persistent real divergence ( nVertLevels nCells Time ) 2 o divergence state - -
var persistent real vorticity ( nVertLevels nVertices Time ) 2 o vorticity state - -
-var persistent real pv_edge ( nVertLevels nEdges Time ) 2 - pv_edge state - -
+var persistent real Vor_edge ( nVertLevels nEdges Time ) 2 - Vor_edge state - -
var persistent real h_edge ( nVertLevels nEdges Time ) 2 - h_edge state - -
var persistent real h_vertex ( nVertLevels nVertices Time ) 2 - h_vertex state - -
var persistent real ke ( nVertLevels nCells Time ) 2 o ke state - -
var persistent real kev ( nVertLevels nVertices Time ) 2 o kev state - -
var persistent real kevc ( nVertLevels nCells Time ) 2 o kevc state - -
var persistent real ke_edge ( nVertLevels nEdges Time ) 2 - ke_edge state - -
-var persistent real pv_vertex ( nVertLevels nVertices Time ) 2 - pv_vertex state - -
-var persistent real pv_cell ( nVertLevels nCells Time ) 2 - pv_cell state - -
+var persistent real Vor_vertex ( nVertLevels nVertices Time ) 2 - Vor_vertex state - -
+var persistent real Vor_cell ( nVertLevels nCells Time ) 2 o Vor_cell state - -
var persistent real uReconstructX ( nVertLevels nCells Time ) 2 - uReconstructX state - -
var persistent real uReconstructY ( nVertLevels nCells Time ) 2 - uReconstructY state - -
var persistent real uReconstructZ ( nVertLevels nCells Time ) 2 - uReconstructZ state - -
@@ -235,8 +267,8 @@
% Other diagnostic variables: neither read nor written to any files
var persistent real vh ( nVertLevels nEdges Time ) 2 - vh state - -
var persistent real circulation ( nVertLevels nVertices Time ) 2 - circulation state - -
-var persistent real gradPVt ( nVertLevels nEdges Time ) 2 - gradPVt state - -
-var persistent real gradPVn ( nVertLevels nEdges Time ) 2 - gradPVn state - -
+var persistent real gradVor_t ( nVertLevels nEdges Time ) 2 - gradVor_t state - -
+var persistent real gradVor_n ( nVertLevels nEdges Time ) 2 - gradVor_n state - -
% Globally reduced diagnostic variables: only written to output
var persistent real areaCellGlobal ( Time ) 2 o areaCellGlobal state - -
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -9,19 +9,25 @@
contains
- subroutine ocn_initialize_advection_rk( grid )!{{{
+ subroutine ocn_initialize_advection_rk( grid, err )!{{{
!
! compute the cell coefficients for the polynomial fit.
! this is performed during setup for model integration.
! WCS, 31 August 2009
!
+! Described in:
+! Skamarock, W. C., & Gassmann, A. (2011).
+! Conservative Transport Schemes for Spherical Geodesic Grids: High-Order Flux Operators for ODE-Based Time Integration.
+! Monthly Weather Review, 139(9), 2962-2975. doi:10.1175/MWR-D-10-05056.1
+!
implicit none
type (mesh_type), intent(in) :: grid
+ integer, intent(out) :: err
real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
- integer, dimension(:,:), pointer :: advCells
+ integer, dimension(:), pointer :: advCells
! local variables
@@ -50,9 +56,7 @@
integer :: cell1, cell2
integer, parameter :: polynomial_order = 2
-! logical, parameter :: debug = .true.
logical, parameter :: debug = .false.
-! logical, parameter :: least_squares = .false.
logical, parameter :: least_squares = .true.
logical :: add_the_cell, do_the_cell
@@ -61,11 +65,19 @@
real (kind=RKIND) :: rcell, cos2t, costsint, sin2t
real (kind=RKIND), dimension(grid%maxEdges) :: angle_2d
-!---
+!---
+ err = 0
+ if(polynomial_order > 2) then
+ write (*,*) 'Polynomial for second derivitave can only be 2'
+ err = 1
+ return
+ end if
+
pii = 2.*asin(1.0)
- advCells => grid % advCells % array
+! advCells => grid % advCells % array
+ allocate(advCells(grid % maxEdges2))
deriv_two => grid % deriv_two % array
deriv_two(:,:,:) = 0.
@@ -93,7 +105,7 @@
end do
end if
- advCells(1,iCell) = n
+ advCells(1) = n
! check to see if we are reaching outside the halo
@@ -110,10 +122,10 @@
if ( grid % on_a_sphere ) then
do i=1,n
- advCells(i+1,iCell) = cell_list(i)
- xc(i) = grid % xCell % array(advCells(i+1,iCell))/a
- yc(i) = grid % yCell % array(advCells(i+1,iCell))/a
- zc(i) = grid % zCell % array(advCells(i+1,iCell))/a
+ advCells(i+1) = cell_list(i)
+ xc(i) = grid % xCell % array(advCells(i+1))/grid % sphere_radius
+ yc(i) = grid % yCell % array(advCells(i+1))/grid % sphere_radius
+ zc(i) = grid % zCell % array(advCells(i+1))/grid % sphere_radius
end do
theta_abs(iCell) = pii/2. - sphere_angle( xc(1), yc(1), zc(1), &
@@ -131,7 +143,7 @@
xc(i+1), yc(i+1), zc(i+1), &
xc(ip2), yc(ip2), zc(ip2) )
- dl_sphere(i) = a*arc_length( xc(1), yc(1), zc(1), &
+ dl_sphere(i) = grid % sphere_radius*arc_length( xc(1), yc(1), zc(1), &
xc(i+1), yc(i+1), zc(i+1) )
end do
@@ -160,8 +172,8 @@
if ( iCell .ne. grid % CellsOnEdge % array(1,iEdge)) &
angle_2d(i) = angle_2d(i) - pii
-! xp(i) = grid % xCell % array(cell_list(i)) - grid % xCell % array(iCell)
-! yp(i) = grid % yCell % array(cell_list(i)) - grid % yCell % array(iCell)
+! xp(i) = grid % xCell % array(cell_list(i)) - grid % xCell % array(iCell)
+! yp(i) = grid % yCell % array(cell_list(i)) - grid % yCell % array(iCell)
xp(i) = grid % dcEdge % array(grid % EdgesOnCell % array(i,iCell)) * cos(angle_2d(i))
yp(i) = grid % dcEdge % array(grid % EdgesOnCell % array(i,iCell)) * sin(angle_2d(i))
@@ -262,12 +274,12 @@
if (ip1 > n-1) ip1 = 1
iEdge = grid % EdgesOnCell % array (i,iCell)
- xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/a
- yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/a
- zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/a
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
if ( grid % on_a_sphere ) then
call ocn_arc_bisect( xv1, yv1, zv1, &
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -14,6 +14,7 @@
module ocn_equation_of_state
+ use mpas_kind_types
use mpas_grid_types
use mpas_configure
use ocn_equation_of_state_linear
@@ -86,7 +87,7 @@
type (mesh_type), intent(in) :: grid
integer, intent(out) :: err
integer :: k_displaced
- character(len=8), intent(in) :: displacement_type
+ character(len=*), intent(in) :: displacement_type
integer, dimension(:), pointer :: maxLevelCell
real (kind=RKIND), dimension(:,:), pointer :: rho
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -14,6 +14,7 @@
module ocn_equation_of_state_jm
+ use mpas_kind_types
use mpas_grid_types
use mpas_configure
@@ -85,7 +86,7 @@
type (mesh_type), intent(in) :: grid
integer :: k_displaced, indexT, indexS
- character(len=8), intent(in) :: displacement_type
+ character(len=*), intent(in) :: displacement_type
integer, intent(out) :: err
type (dm_info) :: dminfo
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -46,13 +46,15 @@
real (kind=RKIND) :: volumeCellGlobal, volumeEdgeGlobal, CFLNumberGlobal, localCFL, localSum, areaCellGlobal, areaEdgeGlobal, areaTriangleGlobal
real (kind=RKIND), dimension(:), pointer :: areaCell, dcEdge, dvEdge, areaTriangle, areaEdge
- real (kind=RKIND), dimension(:,:), pointer :: h, u, v, h_edge, circulation, vorticity, ke, pv_edge, pv_vertex, &
- pv_cell, gradPVn, gradPVt, pressure, MontPot, wTop, rho, tracerTemp
+ real (kind=RKIND), dimension(:,:), pointer :: h, u, v, h_edge, vorticity, ke, Vor_edge, Vor_vertex, &
+ Vor_cell, gradVor_n, gradVor_t, pressure, MontPot, wTop, rho, tracerTemp
real (kind=RKIND), dimension(:,:,:), pointer :: tracers
real (kind=RKIND), dimension(kMaxVariables) :: sums, mins, maxes, averages, verticalSumMins, verticalSumMaxes, reductions
real (kind=RKIND), dimension(kMaxVariables) :: sums_tmp, mins_tmp, maxes_tmp, averages_tmp, verticalSumMins_tmp, verticalSumMaxes_tmp
+ real (kind=RKIND), dimension(:,:), allocatable :: enstrophy
+
block => domain % blocklist
dminfo => domain % dminfo
@@ -90,14 +92,13 @@
v => state % v % array
wTop => state % wTop % array
h_edge => state % h_edge % array
- circulation => state % circulation % array
vorticity => state % vorticity % array
ke => state % ke % array
- pv_edge => state % pv_edge % array
- pv_vertex => state % pv_vertex % array
- pv_cell => state % pv_cell % array
- gradPVn => state % gradPVn % array
- gradPVt => state % gradPVt % array
+ Vor_edge => state % Vor_edge % array
+ Vor_vertex => state % Vor_vertex % array
+ Vor_cell => state % Vor_cell % array
+ gradVor_n => state % gradVor_n % array
+ gradVor_t => state % gradVor_t % array
MontPot => state % MontPot % array
pressure => state % pressure % array
@@ -144,9 +145,9 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! circulation
+ ! vorticity
variableIndex = variableIndex + 1
- call ocn_compute_field_local_stats(dminfo, nVertLevels, nVerticesSolve, circulation(:,1:nVerticesSolve), &
+ call ocn_compute_field_local_stats(dminfo, nVertLevels, nVerticesSolve, vorticity(:,1:nVerticesSolve), &
sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -154,11 +155,14 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! vorticity
+ ! vorticity**2
+ allocate(enstrophy(nVertLevels,nVerticesSolve))
+ enstrophy(:,:)=vorticity(:,1:nVerticesSolve)**2
variableIndex = variableIndex + 1
call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
- vorticity(:,1:nVerticesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
+ enstrophy(:,:), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ deallocate(enstrophy)
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
@@ -176,10 +180,10 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_edge
+ ! Vor_edge
variableIndex = variableIndex + 1
call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- pv_edge(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ Vor_edge(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -187,10 +191,10 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_vertex
+ ! Vor_vertex
variableIndex = variableIndex + 1
call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
- pv_vertex(:,1:nVerticesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
+ Vor_vertex(:,1:nVerticesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -198,10 +202,10 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_cell
+ ! Vor_cell
variableIndex = variableIndex + 1
call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- pv_cell(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ Vor_cell(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -209,10 +213,10 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! gradPVn
+ ! gradVor_n
variableIndex = variableIndex + 1
call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- gradPVn(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ gradVor_n(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -220,10 +224,10 @@
verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! gradPVt
+ ! gradVor_t
variableIndex = variableIndex + 1
call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- gradPVt(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ gradVor_t(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
verticalSumMaxes_tmp(variableIndex))
sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
@@ -362,7 +366,7 @@
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/(areaEdgeGlobal*nVertLevels)
- ! circulation
+ ! vorticity
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/(nVerticesGlobal*nVertLevels)
@@ -374,23 +378,23 @@
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
- ! pv_edge
+ ! Vor_edge
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
- ! pv_vertex
+ ! Vor_vertex
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/(areaTriangleGlobal*nVertLevels)
- ! pv_cell
+ ! Vor_cell
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
- ! gradPVn
+ ! gradVor_n
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
- ! gradPVt
+ ! gradVor_t
variableIndex = variableIndex + 1
averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_gm.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_gm.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_gm.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_gm.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,142 @@
+module ocn_gm
+
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
+
+ implicit none
+ private
+ save
+
+ !--------------------------------------------------------------------
+ !
+ ! Public parameters
+ !
+ !--------------------------------------------------------------------
+
+ !--------------------------------------------------------------------
+ !
+ ! Public member functions
+ !
+ !--------------------------------------------------------------------
+
+ public :: ocn_gm_compute_uBolus
+
+ !--------------------------------------------------------------------
+ !
+ ! Private module variables
+ !
+ !--------------------------------------------------------------------
+
+contains
+
+ subroutine ocn_gm_compute_uBolus(s, grid)
+ implicit none
+ type(state_type), intent(inout) :: s
+ type(mesh_type), intent(in) :: grid
+
+ real(kind=RKIND), dimension(:,:), pointer :: uBolusGM, hEddyFlux, h_edge
+
+ integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer :: k, iEdge, nEdges
+
+ uBolusGM => s % uBolusGM % array
+ h_edge => s % h_edge % array
+ hEddyFlux => s % hEddyFlux % array
+
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+
+ nEdges = grid % nEdges
+
+ call ocn_gm_compute_hEddyFlux(s, grid)
+
+ if (config_vert_grid_type .EQ. 'isopycnal') then
+
+ do iEdge = 1, nEdges
+ do k = 1, maxLevelEdgeTop(iEdge)
+ uBolusGM(k,iEdge) = hEddyFlux(k,iEdge)/h_edge(k,iEdge)
+ end do
+ end do
+
+ else
+
+ ! Nothing for now for all other grid types (zlevel, zstar, ztilde)
+ uBolusGM(:,:) = 0.0
+
+ end if
+
+ end subroutine ocn_gm_compute_uBolus
+
+
+ subroutine ocn_gm_compute_hEddyFlux(s, grid)
+ implicit none
+ type(state_type), intent(inout) :: s
+ type(mesh_type), intent(in) :: grid
+
+ real(kind=RKIND), dimension(:,:), pointer :: hEddyFlux, h
+ real(kind=RKIND), dimension(:), pointer :: dcEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer :: k, cell1, cell2, iEdge, nEdges
+
+ hEddyFlux => s % hEddyFlux % array
+ h => s % h % array
+
+ dcEdge => grid % dcEdge % array
+ cellsOnEdge => grid % cellsOnEdge % array
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+
+ nEdges = grid % nEdges
+
+ hEddyFlux(:,:) = 0.0
+
+ if (config_vert_grid_type .EQ. 'isopycnal') then
+ do iEdge = 1,nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k=1,maxLevelEdgeTop(iEdge)
+ hEddyFlux(k,iEdge) = -config_h_kappa * (h(k,cell2) - h(k,cell1)) / dcEdge(iEdge)
+ end do
+ end do
+ else
+
+ !Nothing for now for all other grid types (zlevel, zstar, ztilde)
+
+ end if
+
+ end subroutine ocn_gm_compute_hEddyFlux
+
+
+
+ subroutine ocn_get_h_kappa(s, grid)
+
+ type (state_type), intent(inout) :: s
+ type (mesh_type), intent(in) :: grid
+
+ real(kind=RKIND), dimension(:,:), pointer :: h_kappa
+
+
+ h_kappa => s % h_kappa % array
+
+ h_kappa(:,:) = config_h_kappa
+
+
+ end subroutine ocn_get_h_kappa
+
+
+ subroutine ocn_get_h_kappa_q(s, grid)
+
+ type (state_type), intent(inout) :: s
+ type (mesh_type), intent(in) :: grid
+
+ real(kind=RKIND), dimension(:,:), pointer :: h_kappa_q
+
+
+ h_kappa_q => s % h_kappa_q % array
+
+ h_kappa_q(:,:) = config_h_kappa_q
+
+
+ end subroutine ocn_get_h_kappa_q
+
+end module ocn_gm
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -19,6 +19,7 @@
use ocn_tracer_hadv
use ocn_tracer_vadv
use ocn_tracer_hmix
+ use ocn_gm
use ocn_restoring
use ocn_equation_of_state
@@ -27,7 +28,7 @@
use ocn_time_average
- type (io_output_object) :: restart_obj
+ type (io_output_object), save :: restart_obj
integer :: current_outfile_frames
@@ -45,6 +46,7 @@
subroutine mpas_core_init(domain, startTimeStamp)!{{{
use mpas_grid_types
+ use mpas_ocn_tracer_advection
implicit none
@@ -88,6 +90,9 @@
call ocn_tendency_init(err_tmp)
err = ior(err,err_tmp)
+ call mpas_ocn_tracer_advection_init(err_tmp)
+ err = ior(err,err_tmp)
+
call mpas_timer_init(domain)
if(err.eq.1) then
@@ -100,6 +105,12 @@
call ocn_init_z_level(domain)
+ if (config_enforce_zstar_at_restart) then
+ call ocn_init_h_zstar(domain)
+ endif
+
+ call ocn_init_split_timestep(domain)
+
print *, ' Vertical grid type is: ',config_vert_grid_type
if (config_vert_grid_type.ne.'isopycnal'.and. &
@@ -134,7 +145,10 @@
block => domain % blocklist
do while (associated(block))
- call mpas_init_block(block, block % mesh, dt)
+ call mpas_init_block(block, block % mesh, dt, err)
+ if(err.eq.1) then
+ call mpas_dmpar_abort(dminfo)
+ endif
block % state % time_levs(1) % state % xtime % scalar = startTimeStamp
block => block % next
end do
@@ -213,25 +227,38 @@
end subroutine ocn_simulation_clock_init!}}}
- subroutine mpas_init_block(block, mesh, dt)!{{{
+ subroutine mpas_init_block(block, mesh, dt, err)!{{{
use mpas_grid_types
use mpas_rbf_interpolation
use mpas_vector_reconstruction
+ use mpas_ocn_tracer_advection
+ use ocn_advection
implicit none
type (block_type), intent(inout) :: block
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
+ integer, intent(out) :: err
integer :: i, iEdge, iCell, k
+ integer :: err1
+ call ocn_initialize_advection_rk(mesh, err)
+ call mpas_ocn_tracer_advection_coefficients(mesh, err1)
+ err = ior(err, err1)
+
call ocn_time_average_init(block % state % time_levs(1) % state)
call mpas_timer_start("diagnostic solve", .false., initDiagSolveTimer)
call ocn_diagnostic_solve(dt, block % state % time_levs(1) % state, mesh)
call mpas_timer_stop("diagnostic solve", initDiagSolveTimer)
+ ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ block % state % time_levs(1) % state % uTransport % array(:,:) &
+ = block % state % time_levs(1) % state % u % array(:,:) &
+ + block % state % time_levs(1) % state % uBolusGM % array(:,:)
+
call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(1) % state, mesh)
call ocn_compute_mesh_scaling(mesh)
@@ -272,33 +299,18 @@
:block % mesh % nVertLevels,iCell) = 0.0
! mrp changed to 0
! :block % mesh % nVertLevels,iCell) = -1e34
-
-! mrp 110516, added just to test for conservation of tracers
- block % state % time_levs(1) % state % tracers % array(block % state % time_levs(1) % state % index_tracer1,:,iCell) = 1.0
-
end do
- if (.not. config_do_restart) then
-
-! mrp 110808 add, so that variables are copied to * variables for split explicit
- do i=2,nTimeLevs
- call mpas_copy_state(block % state % time_levs(i) % state, &
+ do i=2,nTimeLevs
+ call mpas_copy_state(block % state % time_levs(i) % state, &
block % state % time_levs(1) % state)
- end do
-! mrp 110808 add end
+ end do
-
- else
- do i=2,nTimeLevs
- call mpas_copy_state(block % state % time_levs(i) % state, &
- block % state % time_levs(1) % state)
- end do
- endif
-
end subroutine mpas_init_block!}}}
subroutine mpas_core_run(domain, output_obj, output_frame)!{{{
+ use mpas_kind_types
use mpas_grid_types
use mpas_io_output
use mpas_timer
@@ -314,7 +326,7 @@
type (block_type), pointer :: block_ptr
type (MPAS_Time_Type) :: currTime
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: ierr
! Eventually, dt should be domain specific
@@ -322,7 +334,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Initial time ', timeStamp
+ write(0,*) 'Initial time ', trim(timeStamp)
call ocn_write_output_frame(output_obj, output_frame, domain)
block_ptr => domain % blocklist
@@ -342,7 +354,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Doing timestep ', timeStamp
+ write(0,*) 'Doing timestep ', trim(timeStamp)
call mpas_timer_start("time integration", .false., timeIntTimer)
call mpas_timestep(domain, itimestep, dt, timeStamp)
@@ -452,6 +464,7 @@
subroutine mpas_timestep(domain, itimestep, dt, timeStamp)!{{{
+ use mpas_kind_types
use mpas_grid_types
implicit none
@@ -494,7 +507,7 @@
end subroutine mpas_timestep!}}}
subroutine ocn_init_z_level(domain)!{{{
- ! Initialize maxLevel and bouncary grid variables.
+ ! Initialize zlevel-type variables
use mpas_grid_types
use mpas_configure
@@ -508,8 +521,9 @@
integer :: iTracer, cell, cell1, cell2
real (kind=RKIND) :: uhSum, hSum, hEdge1
- real (kind=RKIND), dimension(:), pointer :: &
- referenceBottomDepth, referenceBottomDepthTopOfCell
+ real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth, &
+ referenceBottomDepthTopOfCell, zstarWeight, hZLevel
+
real (kind=RKIND), dimension(:,:), pointer :: h
integer :: nVertLevels
@@ -520,6 +534,8 @@
h => block % state % time_levs(1) % state % h % array
referenceBottomDepth => block % mesh % referenceBottomDepth % array
referenceBottomDepthTopOfCell => block % mesh % referenceBottomDepthTopOfCell % array
+ zstarWeight => block % mesh % zstarWeight % array
+ hZLevel => block % mesh % hZLevel % array
nVertLevels = block % mesh % nVertLevels
! mrp 120208 right now hZLevel is in the grid.nc file.
@@ -527,9 +543,9 @@
! as the defining variable instead, and will transition soon.
! When the transition is done, hZLevel can be removed from
! registry and the following four lines deleted.
- referenceBottomDepth(1) = block % mesh % hZLevel % array(1)
+ referenceBottomDepth(1) = hZLevel(1)
do k = 2,nVertLevels
- referenceBottomDepth(k) = referenceBottomDepth(k-1) + block % mesh % hZLevel % array(k)
+ referenceBottomDepth(k) = referenceBottomDepth(k-1) + hZLevel(k)
end do
! TopOfCell needed where zero depth for the very top may be referenced.
@@ -538,6 +554,65 @@
referenceBottomDepthTopOfCell(k+1) = referenceBottomDepth(k)
end do
+ ! Initialization of zstarWeights. This determines how SSH perturbations
+ ! are distributed throughout the column.
+ if (config_vert_grid_type.eq.'zlevel') then
+
+ zstarWeight = 0.0
+ zstarWeight(1) = 1.0
+
+ elseif (config_vert_grid_type.eq.'zstar') then
+
+ do k = 1,nVertLevels
+ zstarWeight(k) = hZLevel(k)
+ enddo
+
+ elseif (config_vert_grid_type.eq.'zstarWeights') then
+
+ ! This is a test with other weights, just to make sure zstar functions
+ ! using variable weights.
+
+ zstarWeight = 0.0
+ zstarWeight(1:5) = 1.0
+ do k=1,10
+ zstarWeight(5+k) = 1.0-k*0.1
+ end do
+
+ endif
+
+ block => block % next
+ end do
+
+ end subroutine ocn_init_z_level!}}}
+
+ subroutine ocn_init_split_timestep(domain)!{{{
+ ! Initialize splitting variables
+
+ use mpas_grid_types
+ use mpas_configure
+
+ implicit none
+
+ type (domain_type), intent(inout) :: domain
+
+ integer :: i, iCell, iEdge, iVertex, k
+ type (block_type), pointer :: block
+
+ integer :: iTracer, cell, cell1, cell2
+ real (kind=RKIND) :: uhSum, hSum, hEdge1
+ real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
+
+ real (kind=RKIND), dimension(:,:), pointer :: h
+ integer :: nVertLevels
+
+ ! Initialize z-level grid variables from h, read in from input file.
+ block => domain % blocklist
+ do while (associated(block))
+
+ h => block % state % time_levs(1) % state % h % array
+ referenceBottomDepth => block % mesh % referenceBottomDepth % array
+ nVertLevels = block % mesh % nVertLevels
+
! Compute barotropic velocity at first timestep
! This is only done upon start-up.
if (trim(config_time_integration) == 'unsplit_explicit') then
@@ -613,8 +688,66 @@
block => block % next
end do
- end subroutine ocn_init_z_level!}}}
+ end subroutine ocn_init_split_timestep!}}}
+ subroutine ocn_init_h_zstar(domain)!{{{
+ ! If changing from zlevel to zstar, compute h based on zstar weights,
+ ! where SSH is distributed through the layers. We only change h.
+ ! We do not remap the tracer variables, so this breaks total global
+ ! conservation.
+
+ use mpas_grid_types
+ use mpas_configure
+
+ implicit none
+
+ type (domain_type), intent(inout) :: domain
+
+ type (block_type), pointer :: block
+
+ integer :: i, iCell, iEdge, iVertex, k, nVertLevels
+ integer, dimension(:), pointer :: maxLevelCell
+
+ real (kind=RKIND) :: hSum, sumZstarWeights
+ real (kind=RKIND), dimension(:), pointer :: hZLevel, zstarWeight, &
+ referenceBottomDepth
+ real (kind=RKIND), dimension(:,:), pointer :: h
+
+ ! Initialize z-level grid variables from h, read in from input file.
+ block => domain % blocklist
+ do while (associated(block))
+
+ h => block % state % time_levs(1) % state % h % array
+ nVertLevels = block % mesh % nVertLevels
+ hZLevel => block % mesh % hZLevel % array
+ maxLevelCell => block % mesh % maxLevelCell % array
+ zstarWeight => block % mesh % zstarWeight % array
+ referenceBottomDepth => block % mesh % referenceBottomDepth % array
+
+ do iCell=1,block % mesh % nCells
+ ! Compute the total column thickness, hSum, and the sum of zstar weights.
+ hSum = 0.0
+ sumZstarWeights = 0.0
+ do k = 1,maxLevelCell(iCell)
+ hSum = hSum + h(k,iCell)
+ sumZstarWeights = sumZstarWeights + zstarWeight(k)
+ enddo
+
+ ! h_k = h_k^{zlevel} + zeta * W_k/sum(W_k)
+ ! where zeta is SSH and W_k are weights
+ do k = 1,maxLevelCell(iCell)
+ h(k,iCell) = hZLevel(k) &
+ + (hSum - referenceBottomDepth(maxLevelCell(iCell))) &
+ * zstarWeight(k)/sumZstarWeights
+ enddo
+
+ enddo
+
+ block => block % next
+ end do
+
+ end subroutine ocn_init_h_zstar!}}}
+
subroutine ocn_compute_max_level(domain)!{{{
! Initialize maxLevel and bouncary grid variables.
@@ -732,8 +865,12 @@
if (boundaryEdge(k,iEdge).eq.1) then
boundaryCell(k,cellsOnEdge(1,iEdge)) = 1
boundaryCell(k,cellsOnEdge(2,iEdge)) = 1
- cellMask(k,cellsOnEdge(1,iEdge)) = 0
- cellMask(k,cellsOnEdge(2,iEdge)) = 0
+ if(maxLevelCell(cellsOnEdge(1,iEdge)) > k) then
+ cellMask(k, cellsOnEdge(1,iEdge)) = 0
+ end if
+ if(maxLevelCell(cellsOnEdge(2,iEdge)) > k) then
+ cellMask(k, cellsOnEdge(2,iEdge)) = 0
+ end if
boundaryVertex(k,verticesOnEdge(1,iEdge)) = 1
boundaryVertex(k,verticesOnEdge(2,iEdge)) = 1
vertexMask(k,verticesOnEdge(1,iEdge)) = 0
@@ -773,23 +910,26 @@
type (mesh_type), intent(inout) :: mesh
integer :: iEdge, cell1, cell2
- real (kind=RKIND), dimension(:), pointer :: meshDensity, meshScalingDel2, meshScalingDel4
+ real (kind=RKIND), dimension(:), pointer :: meshDensity, meshScalingDel2, meshScalingDel4, meshScaling
meshDensity => mesh % meshDensity % array
meshScalingDel2 => mesh % meshScalingDel2 % array
meshScalingDel4 => mesh % meshScalingDel4 % array
+ meshScaling => mesh % meshScaling % array
!
! Compute the scaling factors to be used in the del2 and del4 dissipation
!
meshScalingDel2(:) = 1.0
meshScalingDel4(:) = 1.0
+ meshScaling(:) = 1.0
if (config_h_ScaleWithMesh) then
do iEdge=1,mesh%nEdges
cell1 = mesh % cellsOnEdge % array(1,iEdge)
cell2 = mesh % cellsOnEdge % array(2,iEdge)
- meshScalingDel2(iEdge) = 1.0 / ( (meshDensity(cell1) + meshDensity(cell2) )/2.0)**(5.0/12.0)
- meshScalingDel4(iEdge) = 1.0 / ( (meshDensity(cell1) + meshDensity(cell2) )/2.0)**(5.0/6.0)
+ meshScalingDel2(iEdge) = 1.0 / ( (meshDensity(cell1) + meshDensity(cell2) )/2.0)**(3.0/4.0) ! goes as dc**3
+ meshScalingDel4(iEdge) = 1.0 / ( (meshDensity(cell1) + meshDensity(cell2) )/2.0)**(3.0/4.0) ! goes as dc**3
+ meshScaling(iEdge) = 1.0 / ( (meshDensity(cell1) + meshDensity(cell2) )/2.0)**(1.0/4.0)
end do
end if
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -21,8 +21,11 @@
use mpas_constants
use mpas_timer
+ use mpas_ocn_tracer_advection
+
use ocn_thick_hadv
use ocn_thick_vadv
+ use ocn_gm
use ocn_vel_coriolis
use ocn_vel_pressure_grad
@@ -97,30 +100,20 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tend_h(tend, s, d, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Compute height and normal wind tendencies, as well as diagnostic variables
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
+ subroutine ocn_tend_h(tend, s, grid)!{{{
implicit none
- type (tend_type), intent(inout) :: tend
- type (state_type), intent(in) :: s
- type (diagnostics_type), intent(in) :: d
- type (mesh_type), intent(in) :: grid
+ type (tend_type), intent(inout) :: tend !< Input/Output: Tendency structure
+ type (state_type), intent(in) :: s !< Input: State information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
- real (kind=RKIND), dimension(:,:), pointer :: h_edge, u, wTop, tend_h
+ real (kind=RKIND), dimension(:,:), pointer :: h_edge, wTop, tend_h, uTransport
integer :: err
call mpas_timer_start("ocn_tend_h")
- u => s % u % array
+ uTransport => s % uTransport % array
wTop => s % wTop % array
h_edge => s % h_edge % array
@@ -137,8 +130,10 @@
! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
! for explanation of divergence operator.
!
+ ! QC Comment (3/15/12): need to make sure that uTranport is the right
+ ! transport velocity here.
call mpas_timer_start("hadv", .false., thickHadvTimer)
- call ocn_thick_hadv_tend(grid, u, h_edge, tend_h, err)
+ call ocn_thick_hadv_tend(grid, uTransport, h_edge, tend_h, err)
call mpas_timer_stop("hadv", thickHadvTimer)
!
@@ -166,25 +161,16 @@
!-----------------------------------------------------------------------
subroutine ocn_tend_u(tend, s, d, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Compute height and normal wind tendencies, as well as diagnostic variables
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
implicit none
- type (tend_type), intent(inout) :: tend
- type (state_type), intent(in) :: s
- type (diagnostics_type), intent(in) :: d
- type (mesh_type), intent(in) :: grid
+ type (tend_type), intent(inout) :: tend !< Input/Output: Tendency structure
+ type (state_type), intent(in) :: s !< Input: State information
+ type (diagnostics_type), intent(in) :: d !< Input: Diagnostic information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
real (kind=RKIND), dimension(:,:), pointer :: &
h_edge, h, u, rho, zMid, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
+ tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
MontPot, wTop, divergence, vertViscTopOfEdge
real (kind=RKIND), dimension(:,:), pointer :: u_src
@@ -202,7 +188,7 @@
divergence => s % divergence % array
ke => s % ke % array
ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
+ Vor_edge => s % Vor_edge % array
MontPot => s % MontPot % array
pressure => s % pressure % array
vertViscTopOfEdge => d % vertViscTopOfEdge % array
@@ -222,7 +208,7 @@
!
call mpas_timer_start("coriolis", .false., velCorTimer)
- call ocn_vel_coriolis_tend(grid, pv_edge, h_edge, u, ke, tend_u, err)
+ call ocn_vel_coriolis_tend(grid, Vor_edge, h_edge, u, ke, tend_u, err)
call mpas_timer_stop("coriolis", velCorTimer)
!
@@ -286,35 +272,25 @@
!> This routine computes the scalar (tracer) tendency for the ocean
!
!-----------------------------------------------------------------------
-
- subroutine ocn_tend_scalar(tend, s, d, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Input: s - current model state
- ! grid - grid metadata
- ! note: the variable s % tracers really contains the tracers,
- ! not tracers*h
- !
- ! Output: tend - computed scalar tendencies
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
+ subroutine ocn_tend_scalar(tend, s, d, grid, dt)!{{{
implicit none
- type (tend_type), intent(inout) :: tend
- type (state_type), intent(in) :: s
- type (diagnostics_type), intent(in) :: d
- type (mesh_type), intent(in) :: grid
+ type (tend_type), intent(inout) :: tend !< Input/Output: Tendency structure
+ type (state_type), intent(in) :: s !< Input: State information
+ type (diagnostics_type), intent(in) :: d !< Input: Diagnostic information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+ real (kind=RKIND), intent(in) :: dt !< Input: Time step
real (kind=RKIND), dimension(:,:), pointer :: &
- u,h,wTop, h_edge, vertDiffTopOfCell
+ uTransport, h,wTop, h_edge, vertDiffTopOfCell, tend_h, uh
real (kind=RKIND), dimension(:,:,:), pointer :: &
tracers, tend_tr
- integer :: err
+ integer :: err, iEdge, k
call mpas_timer_start("ocn_tend_scalar")
- u => s % u % array
+ uTransport => s % uTransport % array
h => s % h % array
wTop => s % wTop % array
tracers => s % tracers % array
@@ -322,8 +298,19 @@
vertDiffTopOfCell => d % vertDiffTopOfCell % array
tend_tr => tend % tracers % array
+ tend_h => tend % h % array
+ allocate(uh(grid % nVertLevels, grid % nEdges+1))
!
+ ! QC Comment (3/15/12): need to make sure that uTransport is the right
+ ! transport velocity for the tracer.
+ do iEdge = 1, grid % nEdges
+ do k = 1, grid % nVertLevels
+ uh(k, iEdge) = uTransport(k, iEdge) * h_edge(k, iEdge)
+ end do
+ end do
+
+ !
! initialize tracer tendency (RHS of tracer equation) to zero.
!
tend_tr(:,:,:) = 0.0
@@ -336,20 +323,12 @@
! and then change maxLevelEdgeTop to maxLevelEdgeBot in the following section.
! tracer_edge at the boundary will also need to be defined for flux boundaries.
- call mpas_timer_start("hadv", .false., tracerHadvTimer)
- call ocn_tracer_hadv_tend(grid, u, h_edge, tracers, tend_tr, err)
- call mpas_timer_stop("hadv", tracerHadvTimer)
+ ! Monotonoic Advection, or standard advection
+ call mpas_timer_start("adv", .false., tracerHadvTimer)
+ call mpas_ocn_tracer_advection_tend(tracers, uh, wTop, h, h, dt, grid, tend_h, tend_tr)
+ call mpas_timer_stop("adv", tracerHadvTimer)
-
!
- ! tracer tendency: vertical advection term -d/dz( h \phi w)
- !
-
- call mpas_timer_start("vadv", .false., tracerVadvTimer)
- call ocn_tracer_vadv_tend(grid, h, wtop, tracers, tend_tr, err)
- call mpas_timer_stop("vadv", tracerVadvTimer)
-
- !
! tracer tendency: del2 horizontal tracer diffusion, div(h \kappa_2 </font>
<font color="gray">abla \phi)
!
call mpas_timer_start("hmix", .false., tracerHmixTimer)
@@ -391,6 +370,8 @@
10 format(2i8,10e20.10)
call mpas_timer_stop("ocn_tend_scalar")
+ deallocate(uh)
+
end subroutine ocn_tend_scalar!}}}
!***********************************************************************
@@ -407,19 +388,11 @@
!-----------------------------------------------------------------------
subroutine ocn_diagnostic_solve(dt, s, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Compute diagnostic fields used in the tendency computations
- !
- ! Input: grid - grid metadata
- !
- ! Output: s - computed diagnostics
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
implicit none
- real (kind=RKIND), intent(in) :: dt
- type (state_type), intent(inout) :: s
- type (mesh_type), intent(in) :: grid
+ real (kind=RKIND), intent(in) :: dt !< Input: Time step
+ type (state_type), intent(inout) :: s !< Input/Output: State information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j
@@ -441,14 +414,16 @@
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure,&
circulation, vorticity, ke, ke_edge, MontPot, wTop, zMid, &
- pv_edge, pv_vertex, pv_cell, gradPVn, gradPVt, divergence, &
- rho, temperature, salinity, kev, kevc
+ Vor_edge, Vor_vertex, Vor_cell, gradVor_n, gradVor_t, divergence, &
+ rho, temperature, salinity, kev, kevc, uBolusGM, uTransport
real (kind=RKIND), dimension(:,:,:), pointer :: tracers, deriv_two
real (kind=RKIND), dimension(:,:), allocatable:: div_u
character :: c1*6
h => s % h % array
u => s % u % array
+ uTransport => s % uTransport % array
+ uBolusGM => s % uBolusGM % array
v => s % v % array
h_edge => s % h_edge % array
circulation => s % circulation % array
@@ -458,11 +433,11 @@
kev => s % kev % array
kevc => s % kevc % array
ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
- pv_vertex => s % pv_vertex % array
- pv_cell => s % pv_cell % array
- gradPVn => s % gradPVn % array
- gradPVt => s % gradPVt % array
+ Vor_edge => s % Vor_edge % array
+ Vor_vertex => s % Vor_vertex % array
+ Vor_cell => s % Vor_cell % array
+ gradVor_n => s % gradVor_n % array
+ gradVor_t => s % gradVor_t % array
rho => s % rho % array
MontPot => s % MontPot % array
pressure => s % pressure % array
@@ -500,6 +475,7 @@
boundaryCell => grid % boundaryCell % array
+
!
! Compute height on cell edges at velocity locations
! Namelist options control the order of accuracy of the reconstructed h_edge value
@@ -509,6 +485,7 @@
! initialize h_edge to avoid divide by zero and NaN problems.
h_edge = -1.0e34
+ coef_3rd_order = config_coef_3rd_order
do iEdge=1,nEdges*hadv2nd
cell1 = cellsOnEdge(1,iEdge)
@@ -609,8 +586,9 @@
invAreaTri1 = 1.0 / areaTriangle(vertex1)
invAreaTri2 = 1.0 / areaTriangle(vertex2)
- invAreaCell1 = 1.0 / areaCell(cell1)
- invAreaCell2 = 1.0 / areaCell(cell2)
+ !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
+ invAreaCell1 = 1.0 / max(areaCell(cell1), 1.0)
+ invAreaCell2 = 1.0 / max(areaCell(cell2), 1.0)
do k=1,maxLevelEdgeBot(iEdge)
! Compute circulation and relative vorticity at each vertex
@@ -663,7 +641,8 @@
do iVertex = 1, nVertices*ke_vertex_flag
do i=1,grid % vertexDegree
iCell = cellsOnVertex(i,iVertex)
- invAreaCell1 = 1.0 / areaCell(iCell)
+ !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
+ invAreaCell1 = 1.0 / max(areaCell(iCell), 1.0)
do k=1,nVertLevels
kevc(k,iCell) = kevc(k,iCell) + kiteAreasOnVertex(i, iVertex) * kev(k, iVertex) * invAreaCell1
enddo
@@ -694,7 +673,7 @@
!
! Compute height at vertices, pv at vertices, and average pv to edge locations
- ! ( this computes pv_vertex at all vertices bounding real cells and distance-1 ghost cells )
+ ! ( this computes Vor_vertex at all vertices bounding real cells and distance-1 ghost cells )
!
do iVertex = 1,nVertices
invAreaTri1 = 1.0 / areaTriangle(iVertex)
@@ -705,35 +684,36 @@
end do
h_vertex = h_vertex * invAreaTri1
- pv_vertex(k,iVertex) = (fCoef*fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
+ Vor_vertex(k,iVertex) = (fCoef*fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
end do
end do
- pv_cell(:,:) = 0.0
- pv_edge(:,:) = 0.0
+ Vor_cell(:,:) = 0.0
+ Vor_edge(:,:) = 0.0
do iVertex = 1,nVertices
do i=1,vertexDegree
iCell = cellsOnVertex(i,iVertex)
iEdge = edgesOnVertex(i,iVertex)
- invAreaCell1 = 1.0 / areaCell(iCell)
+ !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
+ invAreaCell1 = 1.0 / max(areaCell(iCell), 1.0)
! Compute pv at cell centers
- ! ( this computes pv_cell for all real cells and distance-1 ghost cells )
+ ! ( this computes Vor_cell for all real cells and distance-1 ghost cells )
do k = 1,maxLevelCell(iCell)
- pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) * invAreaCell1
+ Vor_cell(k,iCell) = Vor_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * Vor_vertex(k, iVertex) * invAreaCell1
enddo
! Compute pv at the edges
- ! ( this computes pv_edge at all edges bounding real cells )
+ ! ( this computes Vor_edge at all edges bounding real cells )
do k=1,maxLevelEdgeBot(iEdge)
- pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex)
+ Vor_edge(k,iEdge) = Vor_edge(k,iEdge) + 0.5 * Vor_vertex(k,iVertex)
enddo
enddo
enddo
-! gradPVn(:,:) = 0.0
-! gradPVt(:,:) = 0.0
+! gradVor_n(:,:) = 0.0
+! gradVor_t(:,:) = 0.0
do iEdge = 1,nEdges
cell1 = cellsOnEdge(1, iEdge)
cell2 = cellsOnEdge(2, iEdge)
@@ -742,16 +722,16 @@
invLength = 1.0 / dcEdge(iEdge)
! Compute gradient of PV in normal direction
- ! ( this computes gradPVn for all edges bounding real cells )
+ ! ( this computes gradVor_n for all edges bounding real cells )
do k=1,maxLevelEdgeTop(iEdge)
- gradPVn(k,iEdge) = (pv_cell(k,cell2) - pv_cell(k,cell1)) * invLength
+ gradVor_n(k,iEdge) = (Vor_cell(k,cell2) - Vor_cell(k,cell1)) * invLength
enddo
invLength = 1.0 / dvEdge(iEdge)
! Compute gradient of PV in the tangent direction
- ! ( this computes gradPVt at all edges bounding real cells and distance-1 ghost cells )
+ ! ( this computes gradVor_t at all edges bounding real cells and distance-1 ghost cells )
do k = 1,maxLevelEdgeBot(iEdge)
- gradPVt(k,iEdge) = (pv_vertex(k,vertex2) - pv_vertex(k,vertex1)) * invLength
+ gradVor_t(k,iEdge) = (Vor_vertex(k,vertex2) - Vor_vertex(k,vertex1)) * invLength
enddo
enddo
@@ -761,9 +741,9 @@
!
do iEdge = 1,nEdges
do k = 1,maxLevelEdgeBot(iEdge)
- pv_edge(k,iEdge) = pv_edge(k,iEdge) &
- - 0.5 * dt* ( u(k,iEdge) * gradPVn(k,iEdge) &
- + v(k,iEdge) * gradPVt(k,iEdge) )
+ Vor_edge(k,iEdge) = Vor_edge(k,iEdge) &
+ - config_apvm_scale_factor * dt* ( u(k,iEdge) * gradVor_n(k,iEdge) &
+ + v(k,iEdge) * gradVor_t(k,iEdge) )
enddo
enddo
@@ -858,6 +838,16 @@
end do
+ !
+ ! Apply the GM closure as a bolus velocity
+ !
+ if (config_h_kappa .GE. epsilon(0D0)) then
+ call ocn_gm_compute_uBolus(s,grid)
+ else
+ ! mrp efficiency note: if uBolusGM is guaranteed to be zero, this can be removed.
+ uBolusGM = 0.0
+ end if
+
end subroutine ocn_diagnostic_solve!}}}
!***********************************************************************
@@ -872,23 +862,13 @@
!> This routine computes the vertical velocity in the top layer for the ocean
!
!-----------------------------------------------------------------------
-
subroutine ocn_wtop(s1,s2, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Compute diagnostic fields used in the tendency computations
- !
- ! Input: grid - grid metadata
- !
- ! Output: s - computed diagnostics
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
implicit none
- type (state_type), intent(inout) :: s1
- type (state_type), intent(inout) :: s2
- type (mesh_type), intent(in) :: grid
+ type (state_type), intent(inout) :: s1 !< Input/Output: State 1 information
+ type (state_type), intent(inout) :: s2 !< Input/Output: State 2 information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
- ! mrp 110512 could clean this out, remove pointers?
integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum
@@ -896,10 +876,10 @@
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle
- real (kind=RKIND), dimension(:,:), pointer :: u,h,wTop, h_edge
+ h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, zstarWeight
+ real (kind=RKIND), dimension(:,:), pointer :: uTransport,h,wTop, h_edge
real (kind=RKIND), dimension(:,:), allocatable:: div_hu
- real (kind=RKIND), dimension(:), allocatable:: div_hu_btr, h_tend_col, h_weights
+ real (kind=RKIND), dimension(:), allocatable:: div_hu_btr, h_tend_col
integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &
@@ -910,7 +890,7 @@
h => s1 % h % array
h_edge => s1 % h_edge % array
- u => s2 % u % array
+ uTransport => s2 % uTransport % array
wTop => s2 % wTop % array
areaCell => grid % areaCell % array
@@ -918,13 +898,14 @@
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeBot => grid % maxLevelEdgeBot % array
dvEdge => grid % dvEdge % array
+ zstarWeight => grid % zstarWeight % array
nCells = grid % nCells
nEdges = grid % nEdges
nVertLevels = grid % nVertLevels
allocate(div_hu(nVertLevels,nCells+1), div_hu_btr(nCells+1), &
- h_tend_col(nVertLevels), h_weights(nVertLevels))
+ h_tend_col(nVertLevels))
!
! Compute div(h^{edge} u) for each cell
@@ -935,7 +916,7 @@
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
do k=1,maxLevelEdgeBot(iEdge)
- flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
+ flux = uTransport(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
div_hu(k,cell1) = div_hu(k,cell1) + flux
div_hu(k,cell2) = div_hu(k,cell2) - flux
end do
@@ -957,51 +938,14 @@
! set vertical velocity to zero in isopycnal case
wTop=0.0
- elseif (config_vert_grid_type.eq.'zlevel') then
+ else ! zlevel or zstar type vertical grid
do iCell=1,nCells
- ! Vertical velocity through layer interface at top and
- ! bottom is zero.
- wTop(1,iCell) = 0.0
- wTop(maxLevelCell(iCell)+1,iCell) = 0.0
- do k=maxLevelCell(iCell),2,-1
- wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell)
- end do
- end do
- elseif (config_vert_grid_type.eq.'zstar1') then
-
- ! This is a testing setting. The computation is similar to zstar,
- ! but the weights are all in the top layer, so is a bit-for-bit
- ! match with zlevel.
-
- do iCell=1,nCells
-
- h_tend_col = 0.0
- h_tend_col(1) = - div_hu_btr(iCell)
-
- ! Vertical velocity through layer interface at top and
- ! bottom is zero.
- wTop(1,iCell) = 0.0
- wTop(maxLevelCell(iCell)+1,iCell) = 0.0
-
- do k=maxLevelCell(iCell),2,-1
- wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
- end do
- end do
-
- elseif (config_vert_grid_type.eq.'zstar') then
-
- ! Distribute the change in total column height due to the external
- ! mode, div_hu_btr, among all the layers. Distribute in proportion
- ! to the layer thickness.
-
- do iCell=1,nCells
-
hSum = 0.0
do k=1,maxLevelCell(iCell)
- h_tend_col(k) = - h(k,iCell)*div_hu_btr(iCell)
- hSum = hSum + h(k,iCell)
+ h_tend_col(k) = - zstarWeight(k)*h(k,iCell)*div_hu_btr(iCell)
+ hSum = hSum + zstarWeight(k)*h(k,iCell)
end do
h_tend_col = h_tend_col / hSum
@@ -1014,37 +958,9 @@
end do
end do
- elseif (config_vert_grid_type.eq.'zstarWeights') then
-
- ! This is a test with other weights, not meant to be permanent.
-
- h_weights = 0.0
- h_weights(1:5) = 1.0
- do k=1,10
- h_weights(5+k) = 1.0-k*0.1
- end do
-
- do iCell=1,nCells
-
- hSum = 0.0
- do k=1,maxLevelCell(iCell)
- h_tend_col(k) = - h_weights(k)*h(k,iCell)*div_hu_btr(iCell)
- hSum = hSum + h_weights(k)*h(k,iCell)
- end do
- h_tend_col = h_tend_col / hSum
-
- ! Vertical velocity through layer interface at top and
- ! bottom is zero.
- wTop(1,iCell) = 0.0
- wTop(maxLevelCell(iCell)+1,iCell) = 0.0
- do k=maxLevelCell(iCell),2,-1
- wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
- end do
- end do
-
endif
- deallocate(div_hu, div_hu_btr, h_tend_col, h_weights)
+ deallocate(div_hu, div_hu_btr, h_tend_col)
end subroutine ocn_wtop!}}}
@@ -1062,19 +978,10 @@
!-----------------------------------------------------------------------
subroutine ocn_fuperp(s, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Put f*uBcl^{perp} in u as a work variable
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
implicit none
- type (state_type), intent(inout) :: s
- type (mesh_type), intent(in) :: grid
+ type (state_type), intent(inout) :: s !< Input/Output: State information
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
! Some of these variables can be removed, but at a later time.
@@ -1124,6 +1031,7 @@
end subroutine ocn_fuperp!}}}
+
!***********************************************************************
!
! routine ocn_tendency_init
@@ -1137,9 +1045,8 @@
!> other tendency routines.
!
!-----------------------------------------------------------------------
-
subroutine ocn_tendency_init(err)!{{{
- integer, intent(out) :: err
+ integer, intent(out) :: err !< Output: Error flag
err = 0
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -78,7 +78,8 @@
integer :: iCell, k, i, err
type (block_type), pointer :: block
- type (state_type) :: provis
+ type (state_type), target :: provis
+ type (state_type), pointer :: provis_ptr
integer :: rk_step, iEdge, cell1, cell2
@@ -96,10 +97,13 @@
block => domain % blocklist
- call mpas_allocate_state(provis, &
+ call mpas_allocate_state(block, provis, &
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels )
+ provis_ptr => provis
+ call mpas_create_state_links(provis_ptr)
+
!
! Initialize time_levs(2) with state at current time
! Initialize first RK state
@@ -142,23 +146,11 @@
! --- update halos for diagnostic variables
call mpas_timer_start("RK4-diagnostic halo update")
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- end if
-
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(provis % Vor_edge)
+ if (config_h_mom_eddy_visc4 > 0.0) then
+ call mpas_dmpar_exch_halo_field(provis % divergence)
+ call mpas_dmpar_exch_halo_field(provis % vorticity)
+ end if
call mpas_timer_stop("RK4-diagnostic halo update")
! --- compute tendencies
@@ -173,7 +165,7 @@
if (.not.config_implicit_vertical_mix) then
call ocn_vmix_coefs(block % mesh, provis, block % diagnostics, err)
end if
- call ocn_tend_h(block % tend, provis, block % diagnostics, block % mesh)
+ call ocn_tend_h(block % tend, provis, block % mesh)
call ocn_tend_u(block % tend, provis, block % diagnostics, block % mesh)
! mrp 110718 filter btr mode out of u_tend
@@ -182,7 +174,7 @@
call filter_btr_mode_tend_u(block % tend, provis, block % diagnostics, block % mesh)
endif
- call ocn_tend_scalar(block % tend, provis, block % diagnostics, block % mesh)
+ call ocn_tend_scalar(block % tend, provis, block % diagnostics, block % mesh, dt)
block => block % next
end do
call mpas_timer_stop("RK4-tendency computations")
@@ -190,19 +182,9 @@
! --- update halos for prognostic variables
call mpas_timer_start("RK4-pronostic halo update")
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
- block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
call mpas_timer_stop("RK4-pronostic halo update")
! --- compute next substep state
@@ -231,8 +213,21 @@
provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
+ if (config_prescribe_velocity) then
+ provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ end if
+
+ if (config_prescribe_thickness) then
+ provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ end if
+
call ocn_diagnostic_solve(dt, provis, block % mesh)
+ ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ provis % uTransport % array(:,:) &
+ = provis % u % array(:,:) &
+ + provis % uBolusGM % array(:,:)
+
block => block % next
end do
end if
@@ -325,8 +320,21 @@
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
+ if (config_prescribe_velocity) then
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ end if
+
+ if (config_prescribe_thickness) then
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ end if
+
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
+ ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ block % state % time_levs(2) % state % uTransport % array(:,:) &
+ = block % state % time_levs(2) % state % u % array(:,:) &
+ + block % state % time_levs(2) % state % uBolusGM % array(:,:)
+
call mpas_reconstruct(block % mesh, block % state % time_levs(2) % state % u % array, &
block % state % time_levs(2) % state % uReconstructX % array, &
block % state % time_levs(2) % state % uReconstructY % array, &
@@ -374,7 +382,7 @@
meshScalingDel2, meshScalingDel4
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
+ tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
MontPot, wTop, divergence, vertViscTopOfEdge
type (dm_info) :: dminfo
@@ -406,7 +414,7 @@
divergence => s % divergence % array
ke => s % ke % array
ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
+ Vor_edge => s % Vor_edge % array
MontPot => s % MontPot % array
pressure => s % pressure % array
vertViscTopOfEdge => d % vertViscTopOfEdge % array
@@ -489,7 +497,7 @@
meshScalingDel2, meshScalingDel4
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
+ tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
MontPot, wTop, divergence, vertViscTopOfEdge
type (dm_info) :: dminfo
@@ -521,7 +529,7 @@
divergence => s % divergence % array
ke => s % ke % array
ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
+ Vor_edge => s % Vor_edge % array
MontPot => s % MontPot % array
pressure => s % pressure % array
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -163,27 +163,11 @@
! --- update halos for diagnostic variables
call mpas_timer_start("se halo diag", .false., timer_halo_diagnostic)
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- end if
-
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % Vor_edge)
+ if (config_h_mom_eddy_visc4 > 0.0) then
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % vorticity)
+ end if
call mpas_timer_stop("se halo diag", timer_halo_diagnostic)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -200,7 +184,7 @@
if (.not.config_implicit_vertical_mix) then
call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
end if
- call ocn_tend_u(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
+ call ocn_tend_u(block % tend, block % state % time_levs(2) % state, block % diagnostics, block % mesh)
block => block % next
end do
@@ -272,15 +256,7 @@
end do
call mpas_timer_start("se halo ubcl", .false., timer_halo_ubcl)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % uBcl % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % uBcl)
call mpas_timer_stop("se halo ubcl", timer_halo_ubcl)
end do ! do j=1,config_n_bcl_iter
@@ -312,6 +288,20 @@
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % uBcl % array(:,:)
+ do iEdge=1,block % mesh % nEdges
+ do k=1,block % mesh % nVertLevels
+
+ ! uTranport = uBcl + uBolus
+ ! This is u used in advective terms for h and tracers
+ ! in tendancy calls in stage 3.
+ block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
+ = block % mesh % edgeMask % array(k,iEdge) &
+ *( block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
+ + block % state % time_levs(1) % state % uBolusGM % array(k,iEdge) )
+
+ enddo
+ end do ! iEdge
+
block => block % next
end do ! block
@@ -392,15 +382,7 @@
! boundary update on uBtrNew
call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
- end do ! block
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle)
call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
endif ! config_btr_gam1_uWt1>1.0e-12
@@ -459,15 +441,7 @@
! boundary update on SSHnew
call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
- block => block % next
- end do ! block
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle)
call mpas_timer_stop("se halo ssh", timer_halo_ssh)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -510,14 +484,7 @@
! boundary update on uBtrNew
call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
- end do ! block
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle)
call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
end do !do BtrCorIter=1,config_n_btr_cor_iter
@@ -570,15 +537,8 @@
! boundary update on SSHnew
call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
- block => block % next
- end do ! block
- call mpas_timer_stop("se halo ssh", timer_halo_ssh)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle)
+ call mpas_timer_stop("se halo ssh", timer_halo_ssh)
endif ! config_btr_solve_SSH2
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -629,14 +589,7 @@
! boundary update on F
call mpas_timer_start("se halo F", .false., timer_halo_f)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(1) % state % FBtr % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
- end do ! block
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(1) % state % FBtr)
call mpas_timer_stop("se halo F", timer_halo_f)
@@ -663,9 +616,11 @@
do iEdge=1,block % mesh % nEdges
- ! This is u^{avg}
- uTemp(:) = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- + block % state % time_levs(2) % state % uBcl % array(:,iEdge)
+ ! velocity for uCorrection is uBtr + uBcl + uBolus
+ uTemp(:) &
+ = block % state % time_levs(2) % state % uBtr % array( iEdge) &
+ + block % state % time_levs(2) % state % uBcl % array(:,iEdge) &
+ + block % state % time_levs(1) % state % uBolusGM % array(:,iEdge)
! hSum is initialized outside the loop because on land boundaries
! maxLevelEdgeTop=0, but I want to initialize hSum with a
@@ -680,19 +635,20 @@
uCorr = ucorr_coef*(( block % state % time_levs(1) % state % FBtr % array(iEdge) - uhSum)/hSum)
- ! put u^{tr}, the velocity for tracer transport, in uNew
- ! mrp 060611 not sure if boundary enforcement is needed here.
- if (block % mesh % boundaryEdge % array(1,iEdge).eq.1) then
- block % state % time_levs(2) % state % u % array(:,iEdge) = 0.0
- else
- do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
- block % state % time_levs(2) % state % u % array(k,iEdge) = uTemp(k) + uCorr
- enddo
- do k=block % mesh % maxLevelEdgeTop % array(iEdge)+1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % u % array(k,iEdge) = 0.0
- end do
- endif
+ do k=1,block % mesh % nVertLevels
+ ! uTranport = uBtr + uBcl + uBolus + uCorrection
+ ! This is u used in advective terms for h and tracers
+ ! in tendancy calls in stage 3.
+ block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
+ = block % mesh % edgeMask % array(k,iEdge) &
+ *( block % state % time_levs(2) % state % uBtr % array( iEdge) &
+ + block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
+ + block % state % time_levs(1) % state % uBolusGM % array(k,iEdge) &
+ + uCorr )
+
+ enddo
+
end do ! iEdge
deallocate(uTemp)
@@ -716,28 +672,32 @@
!TDR: at this point, I am suggesting just pushing some of this code into subroutines.
!TDR: see comments farther down
+ ! dwj: 02/22/12 splitting thickness and tracer tendency computations and halo updates to allow monotonic advection.
block => domain % blocklist
do while (associated(block))
call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(2) % state, block % mesh)
- call ocn_tend_h (block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
- call ocn_tend_scalar(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
-
+ call ocn_tend_h(block % tend, block % state % time_levs(2) % state, block % mesh)
block => block % next
end do
! update halo for thickness and tracer tendencies
call mpas_timer_start("se halo h", .false., timer_halo_h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_timer_stop("se halo h", timer_halo_h)
+
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call ocn_tend_scalar(block % tend, block % state % time_levs(2) % state, block % diagnostics, block % mesh, dt)
block => block % next
end do
- call mpas_timer_stop("se halo h", timer_halo_h)
+ ! update halo for thickness and tracer tendencies
+ call mpas_timer_start("se halo tracers", .false., timer_halo_tracers)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
+ call mpas_timer_stop("se halo tracers", timer_halo_tracers)
+
block => domain % blocklist
do while (associated(block))
@@ -785,10 +745,22 @@
end do
end do ! iCell
- ! uBclNew is u'_{n+1/2}
- ! uBtrNew is {\bar u}_{avg}
- ! uNew is u^{tr}
+ do iEdge=1,block % mesh % nEdges
+ do k=1,block % mesh % nVertLevels
+
+ ! u = uBtr + uBcl
+ ! here uBcl is at time n+1/2
+ ! This is u used in next iteration or step
+ block % state % time_levs(2) % state % u % array(k,iEdge) &
+ = block % mesh % edgeMask % array(k,iEdge) &
+ *( block % state % time_levs(2) % state % uBtr % array( iEdge) &
+ + block % state % time_levs(2) % state % uBcl % array(k,iEdge) )
+
+ enddo
+
+ end do ! iEdge
+
! mrp 110512 I really only need this to compute h_edge, density, pressure, and SSH
! I can par this down later.
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
@@ -846,22 +818,8 @@
block => block % next
end do
- ! Boundary update on tracers. This is placed here, rather than
- ! on tend % tracers as in RK4, because I needed to update
- ! afterwards for the del4 diffusion operator.
- call mpas_timer_start("se halo tracers", .false., timer_halo_tracers)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % tracers % array(:,:,:), &
- block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
- call mpas_timer_stop("se halo tracers", timer_halo_tracers)
-
-
-
end do ! split_explicit_step = 1, config_n_ts_iter
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! END large iteration loop
@@ -901,6 +859,15 @@
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
+
+ if (config_prescribe_velocity) then
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ end if
+
+ if (config_prescribe_thickness) then
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ end if
+
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
call mpas_reconstruct(block % mesh, block % state % time_levs(2) % state % u % array, &
block % state % time_levs(2) % state % uReconstructX % array, &
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,316 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection
+!
+!> \brief MPAS ocean tracer advection driver
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains driver routine for tracer advection tendencys
+!> as well as the routines for setting up advection coefficients and
+!> initialization of the advection routines.
+!
+!-----------------------------------------------------------------------
+
+module mpas_ocn_tracer_advection
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+
+ use mpas_ocn_tracer_advection_std
+ use mpas_ocn_tracer_advection_mono
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_init, &
+ mpas_ocn_tracer_advection_coefficients, &
+ mpas_ocn_tracer_advection_tend
+
+ logical :: monotonicOn
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_coefficients
+!
+!> \brief MPAS ocean tracer advection coefficients
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine precomputes the advection coefficients for horizontal
+!> advection of tracers.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_coefficients( grid, err )!{{{
+
+ implicit none
+ type (mesh_type) :: grid !< Input: Grid information
+ integer, intent(out) :: err
+
+ real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
+ real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_2nd, adv_coefs_3rd
+ integer, dimension(:,:), pointer :: cellsOnCell, cellsOnEdge, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask, boundaryCell
+ integer, dimension(:), pointer :: nEdgesOnCell, nAdvCellsForEdge, maxLevelCell
+
+ integer, dimension(:), pointer :: cell_list, ordered_cell_list
+ integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell, k, nVertLevels
+ logical :: addcell, highOrderAdvection
+
+ deriv_two => grid % deriv_two % array
+ adv_coefs => grid % adv_coefs % array
+ adv_coefs_2nd => grid % adv_coefs_2nd % array
+ adv_coefs_3rd => grid % adv_coefs_3rd % array
+ cellsOnCell => grid % cellsOnCell % array
+ cellsOnEdge => grid % cellsOnEdge % array
+ advCellsForEdge => grid % advCellsForEdge % array
+ boundaryCell => grid % boundaryCell % array
+ highOrderAdvectionMask => grid % highOrderAdvectionMask % array
+ lowOrderAdvectionMask => grid % lowOrderAdvectionMask % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ maxLevelCell => grid % maxLevelCell % array
+ nAdvCellsForEdge => grid % nAdvCellsForEdge % array
+
+ nVertLevels = grid % nVertLevels
+
+ allocate(cell_list(grid % maxEdges2 + 2))
+ allocate(ordered_cell_list(grid % maxEdges2 + 2))
+
+ err = 0
+
+ highOrderAdvectionMask = 0
+ lowOrderAdvectionMask = 0
+ if(config_horiz_tracer_adv_order == 2) then
+
+ end if
+
+ do iEdge = 1, grid % nEdges
+ nAdvCellsForEdge(iEdge) = 0
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+
+
+ do k = 1, nVertLevels
+ if (boundaryCell(k, cell1) == 1 .or. boundaryCell(k, cell2) == 1) then
+ highOrderAdvectionMask(k, iEdge) = 0
+ lowOrderAdvectionMask(k, iEdge) = 1
+ else
+ highOrderAdvectionMask(k, iEdge) = 1
+ lowOrderAdvectionMask(k, iEdge) = 0
+ end if
+ end do
+
+ !
+ ! do only if this edge flux is needed to update owned cells
+ !
+ if (cell1 <= grid%nCells .or. cell2 <= grid%nCells) then
+
+ cell_list(1) = cell1
+ cell_list(2) = cell2
+ n = 2
+
+ ! add cells surrounding cell 1. n is number of cells currently in list
+ do i = 1, nEdgesOnCell(cell1)
+ if(cellsOnCell(i,cell1) /= cell2) then
+ n = n + 1
+ cell_list(n) = cellsOnCell(i,cell1)
+ end if
+ end do
+
+ ! add cells surrounding cell 2 (brute force approach)
+ do iCell = 1, nEdgesOnCell(cell2)
+ addcell = .true.
+ do i=1,n
+ if(cell_list(i) == cellsOnCell(iCell,cell2)) addcell = .false.
+ end do
+ if(addcell) then
+ n = n+1
+ cell_list(n) = cellsOnCell(iCell,cell2)
+ end if
+ end do
+
+ ! order the list by increasing cell number (brute force approach)
+
+ do i=1,n
+ ordered_cell_list(i) = grid % nCells + 2
+ j_in = 1
+ do j=1,n
+ if(ordered_cell_list(i) > cell_list(j) ) then
+ j_in = j
+ ordered_cell_list(i) = cell_list(j)
+ end if
+ end do
+! ordered_cell_list(i) = cell_list(j_in)
+ cell_list(j_in) = grid % nCells + 3
+ end do
+
+ nAdvCellsForEdge(iEdge) = n
+ do iCell = 1, nAdvCellsForEdge(iEdge)
+ advCellsForEdge(iCell,iEdge) = ordered_cell_list(iCell)
+ end do
+
+ ! we have the ordered list, now construct coefficients
+
+ adv_coefs(:,iEdge) = 0.
+ adv_coefs_2nd(:,iEdge) = 0.
+ adv_coefs_3rd(:,iEdge) = 0.
+
+ ! pull together third and fourth order contributions to the flux
+ ! first from cell1
+
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cell1 ) j_in = j
+ end do
+ adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,1,iEdge)
+ adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(1,1,iEdge)
+
+ do iCell = 1, nEdgesOnCell(cell1)
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cellsOnCell(iCell,cell1) ) j_in = j
+ end do
+ adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
+ adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
+ end do
+
+ ! pull together third and fourth order contributions to the flux
+ ! now from cell2
+
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cell2 ) j_in = j
+ enddo
+ adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,2,iEdge)
+ adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(1,2,iEdge)
+
+ do iCell = 1, nEdgesOnCell(cell2)
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cellsOnCell(iCell,cell2) ) j_in = j
+ enddo
+ adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,2,iEdge)
+ adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(iCell+1,2,iEdge)
+ end do
+
+ do j = 1,n
+ adv_coefs (j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs (j,iEdge) / 12.
+ adv_coefs_3rd(j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs_3rd(j,iEdge) / 12.
+ end do
+
+ ! 2nd order centered contribution - place this in the main flux weights
+
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cell1 ) j_in = j
+ enddo
+ adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
+ adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
+
+ j_in = 0
+ do j=1, n
+ if( ordered_cell_list(j) == cell2 ) j_in = j
+ enddo
+ adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
+ adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
+
+ ! multiply by edge length - thus the flux is just dt*ru times the results of the vector-vector multiply
+
+ do j=1,n
+ adv_coefs (j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs (j,iEdge)
+ adv_coefs_2nd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_2nd(j,iEdge)
+ adv_coefs_3rd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_3rd(j,iEdge)
+ end do
+
+ end if ! only do for edges of owned-cells
+
+ end do ! end loop over edges
+
+ deallocate(cell_list)
+ deallocate(ordered_cell_list)
+
+ ! If 2nd order advection, set masks appropriately.
+ if(config_horiz_tracer_adv_order == 2) then
+ lowOrderAdvectionMask = 1
+ highOrderAdvectionMask = 0
+ end if
+
+ if (maxval(highOrderAdvectionMask+lowOrderAdvectionMask) > 1) then
+ write(*,*) "Masks don't sum to 1."
+ err = 1
+ endif
+
+ end subroutine mpas_ocn_tracer_advection_coefficients!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_tend
+!
+!> \brief MPAS ocean tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for computing the tendency for
+!> advection of tracers.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_tend(tracers, uh, w, h, verticalCellSize, dt, grid, tend_h, tend)!{{{
+
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: tracer tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input/Output: tracer values
+ real (kind=RKIND), dimension(:,:), intent(in) :: uh !< Input: Thickness weighted horizontal velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: h !< Input: Thickness field
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of a cell
+ real (kind=RKIND), intent(in) :: dt !< Input: Time step
+ type (mesh_type), intent(in) :: grid !< Input: grid information
+ real (kind=RKIND), dimension(:,:), intent(in) :: tend_h !< Input: Thickness tendency information
+
+ if(monotonicOn) then
+ call mpas_ocn_tracer_advection_mono_tend(tracers, uh, w, h, verticalCellSize, dt, grid, tend_h, tend)
+ else
+ call mpas_ocn_tracer_advection_std_tend(tracers, uh, w, verticalCellSize, grid, tend)
+ endif
+ end subroutine mpas_ocn_tracer_advection_tend!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_init
+!
+!> \brief MPAS ocean tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for initialization of
+!> the tracer advection routines.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_init(err)!{{{
+
+ integer, intent(inout) :: err !< Input/Output: Error flag
+
+ integer :: err_tmp
+
+ err = 0
+
+ call mpas_ocn_tracer_advection_std_init(err_tmp)
+ call mpas_ocn_tracer_advection_mono_init(err_tmp)
+
+ err = ior(err, err_tmp)
+
+ monotonicOn = .false.
+
+ if(config_monotonic) then
+ monotonicOn = .true.
+ endif
+
+ end subroutine mpas_ocn_tracer_advection_init!}}}
+
+end module mpas_ocn_tracer_advection
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_helpers.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_helpers.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_helpers.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_helpers.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,68 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_helpers
+!
+!> \brief MPAS ocean tracer advection helper functions
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains helper functions tracer advection.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_helpers
+
+ use mpas_kind_types
+
+ implicit none
+ save
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! function mpas_ocn_tracer_advection_vflux4
+!
+!> \brief MPAS ocean 4th order vertical tracer advection stencil
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This function provides the stencil for 4th order vertical advection of tracers.
+!
+!-----------------------------------------------------------------------
+ real function mpas_ocn_tracer_advection_vflux4(q_im2, q_im1, q_i, q_ip1, w)!{{{
+ real (kind=RKIND), intent(in) :: q_im2 !< Input: Tracer value at index i-2
+ real (kind=RKIND), intent(in) :: q_im1 !< Input: Tracer value at index i-1
+ real (kind=RKIND), intent(in) :: q_i !< Input: Tracer value at index i
+ real (kind=RKIND), intent(in) :: q_ip1 !< Input: Tracer value at index i+1
+ real (kind=RKIND), intent(in) :: w !< Input: vertical veloicity
+ mpas_ocn_tracer_advection_vflux4 = w*( 7.*(q_i + q_im1) - (q_ip1 + q_im2) )/12.0
+ end function!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! function mpas_ocn_tracer_advection_vflux3
+!
+!> \brief MPAS ocean 3rd order vertical tracer advection stencil
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This function provides the stencil for 3rd order vertical advection of tracers.
+!
+!-----------------------------------------------------------------------
+ real function mpas_ocn_tracer_advection_vflux3( q_im2, q_im1, q_i, q_ip1, w, coef)!{{{
+ real (kind=RKIND), intent(in) :: q_im2 !< Input: Tracer value at index i-2
+ real (kind=RKIND), intent(in) :: q_im1 !< Input: Tracer value at index i-1
+ real (kind=RKIND), intent(in) :: q_i !< Input: Tracer value at index i
+ real (kind=RKIND), intent(in) :: q_ip1 !< Input: Tracer value at index i+1
+ real (kind=RKIND), intent(in) :: w !< Input: vertical veloicity
+ real (kind=RKIND), intent(in) :: coef !< Input: Advection coefficient
+
+ !dwj 02/21/12 flux3 is different in ocean and atmosphere
+ !flux3 = (u * (7.0 * (q_i + q_im1) - (q_ip1 + q_im2)) + coef * abs(u) * ((q_ip1 - q_im2) - 3.0*(q_i-q_im1)))/12.0
+ mpas_ocn_tracer_advection_vflux3 = (w * (7.0 * (q_i + q_im1) - (q_ip1 + q_im2)) - coef * abs(w) * ((q_ip1 - q_im2) - 3.0*(q_i-q_im1)))/12.0
+ end function!}}}
+
+end module mpas_ocn_tracer_advection_helpers
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_mono.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,380 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_mono
+!
+!> \brief MPAS ocean monotonic tracer advection with FCT
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for monotonic advection of tracers using a FCT
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_mono
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_helpers
+
+ implicit none
+ private
+ save
+
+ real (kind=RKIND) :: coef_3rd_order
+
+ public :: mpas_ocn_tracer_advection_mono_tend, &
+ mpas_ocn_tracer_advection_mono_init
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_mono_tend
+!
+!> \brief MPAS ocean monotonic tracer advection tendency with FCT
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine computes the monotonic tracer advection tendencity using a FCT.
+!> Both horizontal and vertical.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_mono_tend(tracers, uh, w, h, verticalCellSize, dt, grid, tend_h, tend)!{{{
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Input: s - current model state
+ ! grid - grid metadata
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: current tracer values
+ real (kind=RKIND), dimension(:,:), intent(in) :: uh !< Input: Thichness weighted velocitiy
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical velocitiy
+ real (kind=RKIND), dimension(:,:), intent(in) :: h !< Input: Thickness
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of a cell
+ real (kind=RKIND), dimension(:,:), intent(in) :: tend_h !< Input: Tendency for thickness field
+ real (kind=RKIND), intent(in) :: dt !< Input: Timestep
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: Tracer tendency
+
+ integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
+ integer :: nVertLevels, num_tracers, nCells, nEdges, nCellsSolve
+ integer, dimension(:), pointer :: nAdvCellsForEdge, maxLevelCell, maxLevelEdgeTop, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask
+
+ real (kind=RKIND) :: flux_upwind, tracer_min_new, tracer_max_new, tracer_upwind_new, scale_factor
+ real (kind=RKIND) :: flux, tracer_weight, invDvEdge, invAreaCell1, invAreaCell2
+ real (kind=RKIND) :: cur_max, cur_min, new_max, new_min
+ real (kind=RKIND) :: verticalWeightK, verticalWeightKm1
+ real (kind=RKIND), dimension(:), pointer :: dvEdge, areaCell
+ real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_2nd, adv_coefs_3rd
+ real (kind=RKIND), dimension(:,:), pointer :: tracer_cur, tracer_new, upwind_tendency, inv_h_new, tracer_max, tracer_min
+ real (kind=RKIND), dimension(:,:), pointer :: flux_incoming, flux_outgoing, high_order_horiz_flux, high_order_vert_flux
+
+ real (kind=RKIND), parameter :: eps = 1.e-10
+
+ ! Initialize pointers
+ dvEdge => grid % dvEdge % array
+ cellsOnEdge => grid % cellsOnEdge % array
+ cellsOnCell => grid % cellsOnCell % array
+ areaCell => grid % areaCell % array
+
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ nAdvCellsForEdge => grid % nAdvCellsForEdge % array
+ advCellsForEdge => grid % advCellsForEdge % array
+ adv_coefs => grid % adv_coefs % array
+ adv_coefs_2nd => grid % adv_coefs_2nd % array
+ adv_coefs_3rd => grid % adv_coefs_3rd % array
+ maxLevelCell => grid % maxLevelCell % array
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ highOrderAdvectionMask => grid % highOrderAdvectionMask % array
+ lowOrderAdvectionMask => grid % lowOrderAdvectionMask % array
+
+ nCells = grid % nCells
+ nCellsSolve = grid % nCellsSolve
+ nEdges = grid % nEdges
+ nVertLevels = grid % nVertLevels
+ num_tracers = size(tracers,dim=1)
+
+ ! allocate nCells arrays
+
+ allocate(tracer_new(nVertLevels, nCells))
+ allocate(tracer_cur(nVertLevels, nCells))
+ allocate(upwind_tendency(nVertLevels, nCells))
+ allocate(inv_h_new(nVertLevels, nCells))
+ allocate(tracer_max(nVertLevels, nCells))
+ allocate(tracer_min(nVertLevels, nCells))
+ allocate(flux_incoming(nVertLevels, nCells))
+ allocate(flux_outgoing(nVertLevels, nCells))
+
+ ! allocate nEdges arrays
+ allocate(high_order_horiz_flux(nVertLevels, nEdges))
+
+ ! allocate nVertLevels+1 and nCells arrays
+ allocate(high_order_vert_flux(nVertLevels+1, nCells))
+
+ do iCell = 1, nCells
+ do k=1, maxLevelCell(iCell)
+ inv_h_new(k, iCell) = 1.0 / (h(k, iCell) + dt * tend_h(k, iCell))
+ end do
+ end do
+
+ ! Loop over tracers. One tracer is advected at a time. It is copied into a temporary array in order to improve locality
+ do iTracer = 1, num_tracers
+ ! Initialize variables for use in this iTracer iteration
+ do iCell = 1, nCells
+ do k=1, maxLevelCell(iCell)
+ tracer_cur(k,iCell) = tracers(iTracer,k,iCell)
+ upwind_tendency(k, iCell) = 0.0
+
+ !tracer_new is supposed to be the "new" tracer state. This allows bounds checks.
+ if (config_check_monotonicity) then
+ tracer_new(k,iCell) = 0.0
+ end if
+ end do ! k loop
+ end do ! iCell loop
+
+ high_order_vert_flux = 0.0
+ high_order_horiz_flux = 0.0
+
+ ! Compute the high order vertical flux. Also determine bounds on tracer_cur.
+ do iCell = 1, nCells
+ k = 1
+ tracer_max(k,iCell) = max(tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+ tracer_min(k,iCell) = min(tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+
+ k = 2
+ verticalWeightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k, iCell) + verticalCellSize(k-1, iCell))
+ verticalWeightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k, iCell) + verticalCellSize(k-1, iCell))
+ high_order_vert_flux(k,iCell) = w(k,iCell)*(verticalWeightK*tracer_cur(k,iCell)+verticalWeightKm1*tracer_cur(k-1,iCell))
+ tracer_max(k,iCell) = max(tracer_cur(k-1,iCell),tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+ tracer_min(k,iCell) = min(tracer_cur(k-1,iCell),tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+
+ do k=3,maxLevelCell(iCell)-1
+ high_order_vert_flux(k,iCell) = mpas_ocn_tracer_advection_vflux3( tracer_cur(k-2,iCell),tracer_cur(k-1,iCell), &
+ tracer_cur(k ,iCell),tracer_cur(k+1,iCell), &
+ w(k,iCell), coef_3rd_order )
+ tracer_max(k,iCell) = max(tracer_cur(k-1,iCell),tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+ tracer_min(k,iCell) = min(tracer_cur(k-1,iCell),tracer_cur(k,iCell),tracer_cur(k+1,iCell))
+ end do
+
+ k = maxLevelCell(iCell)
+ verticalWeightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k, iCell) + verticalCellSize(k-1, iCell))
+ verticalWeightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k, iCell) + verticalCellSize(k-1, iCell))
+ high_order_vert_flux(k,iCell) = w(k,iCell)*(verticalWeightK*tracer_cur(k,iCell)+verticalWeightKm1*tracer_cur(k-1,iCell))
+ tracer_max(k,iCell) = max(tracer_cur(k,iCell),tracer_cur(k-1,iCell))
+ tracer_min(k,iCell) = min(tracer_cur(k,iCell),tracer_cur(k-1,iCell))
+
+ ! pull tracer_min and tracer_max from the (horizontal) surrounding cells
+ do i = 1, nEdgesOnCell(iCell)
+ do k=1, maxLevelCell(cellsOnCell(i, iCell))
+ tracer_max(k,iCell) = max(tracer_max(k,iCell),tracer_cur(k, cellsOnCell(i,iCell)))
+ tracer_min(k,iCell) = min(tracer_min(k,iCell),tracer_cur(k, cellsOnCell(i,iCell)))
+ end do ! k loop
+ end do ! i loop over nEdgesOnCell
+ end do ! iCell Loop
+
+ ! Compute the high order horizontal flux
+ do iEdge = 1, nEdges
+ do i = 1, nAdvCellsForEdge(iEdge)
+ iCell = advCellsForEdge(i,iEdge)
+ do k = 1, maxLevelCell(iCell)
+ tracer_weight = lowOrderAdvectionMask(k, iEdge) * adv_coefs_2nd(i,iEdge) &
+ + highOrderAdvectionMask(k, iEdge) * (adv_coefs(i,iEdge) + coef_3rd_order*sign(1.,uh(k,iEdge))*adv_coefs_3rd(i,iEdge))
+
+ tracer_weight = uh(k,iEdge)*tracer_weight
+ high_order_horiz_flux(k,iEdge) = high_order_horiz_flux(k,iEdge) + tracer_weight* tracer_cur(k,iCell)
+ end do ! k loop
+ end do ! i loop over nAdvCellsForEdge
+ end do ! iEdge loop
+
+ ! low order upwind vertical flux (monotonic and diffused)
+ ! Remove low order flux from the high order flux.
+ ! Store left over high order flux in high_order_vert_flux array.
+ ! Upwind fluxes are accumulated in upwind_tendency
+ do iCell = 1, nCells
+ do k = 2, maxLevelCell(iCell)
+ ! dwj 02/03/12 Ocean and Atmosphere are different in vertical
+! flux_upwind = max(0.,w(k,iCell))*tracer_cur(k-1,iCell) + min(0.,w(k,iCell))*tracer_cur(k,iCell)
+ flux_upwind = min(0.,w(k,iCell))*tracer_cur(k-1,iCell) + max(0.,w(k,iCell))*tracer_cur(k,iCell)
+ upwind_tendency(k-1,iCell) = upwind_tendency(k-1,iCell) + flux_upwind
+ upwind_tendency(k ,iCell) = upwind_tendency(k ,iCell) - flux_upwind
+ high_order_vert_flux(k,iCell) = high_order_vert_flux(k,iCell) - flux_upwind
+ end do ! k loop
+
+ ! flux_incoming contains the total remaining high order flux into iCell
+ ! it is positive.
+ ! flux_outgoing contains the total remaining high order flux out of iCell
+ ! it is negative
+ do k = 1, maxLevelCell(iCell)
+ ! dwj 02/03/12 Ocean and Atmosphere are different in vertical
+! flux_incoming (k,iCell) = -(min(0.,high_order_vert_flux(k+1,iCell))-max(0.,high_order_vert_flux(k,iCell)))
+! flux_outgoing(k,iCell) = -(max(0.,high_order_vert_flux(k+1,iCell))-min(0.,high_order_vert_flux(k,iCell)))
+
+ flux_incoming (k, iCell) = max(0.0, high_order_vert_flux(k+1, iCell)) - min(0.0, high_order_vert_flux(k, iCell))
+ flux_outgoing(k, iCell) = min(0.0, high_order_vert_flux(k+1, iCell)) - max(0.0, high_order_vert_flux(k, iCell))
+ end do ! k Loop
+ end do ! iCell Loop
+
+ ! low order upwind horizontal flux (monotinc and diffused)
+ ! Remove low order flux from the high order flux
+ ! Store left over high order flux in high_order_horiz_flux array
+ ! Upwind fluxes are accumulated in upwind_tendency
+ do iEdge = 1, nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+
+ do k = 1, maxLevelEdgeTop(iEdge)
+ flux_upwind = dvEdge(iEdge) * (max(0.,uh(k,iEdge))*tracer_cur(k,cell1) + min(0.,uh(k,iEdge))*tracer_cur(k,cell2))
+ high_order_horiz_flux(k,iEdge) = high_order_horiz_flux(k,iEdge) - flux_upwind
+
+ upwind_tendency(k,cell1) = upwind_tendency(k,cell1) - flux_upwind * invAreaCell1
+ upwind_tendency(k,cell2) = upwind_tendency(k,cell2) + flux_upwind * invAreaCell2
+
+ ! Accumulate remaining high order fluxes
+ flux_outgoing(k,cell1) = flux_outgoing(k,cell1) - max(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell1
+ flux_incoming (k,cell1) = flux_incoming (k,cell1) - min(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell1
+ flux_outgoing(k,cell2) = flux_outgoing(k,cell2) + min(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell2
+ flux_incoming (k,cell2) = flux_incoming (k,cell2) + max(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell2
+ end do ! k loop
+ end do ! iEdge loop
+
+ ! Build the factors for the FCT
+ ! Computed using the bounds that were computed previously, and the bounds on the newly updated value
+ ! Factors are placed in the flux_incoming and flux_outgoing arrays
+ do iCell = 1, nCells
+ do k = 1, maxLevelCell(iCell)
+ tracer_min_new = (tracer_cur(k,iCell)*h(k,iCell) + dt*(upwind_tendency(k,iCell)+flux_outgoing(k,iCell))) * inv_h_new(k,iCell)
+ tracer_max_new = (tracer_cur(k,iCell)*h(k,iCell) + dt*(upwind_tendency(k,iCell)+flux_incoming(k,iCell))) * inv_h_new(k,iCell)
+ tracer_upwind_new = (tracer_cur(k,iCell)*h(k,iCell) + dt*upwind_tendency(k,iCell)) * inv_h_new(k,iCell)
+
+ scale_factor = (tracer_max(k,iCell)-tracer_upwind_new)/(tracer_max_new-tracer_upwind_new+eps)
+ flux_incoming(k,iCell) = min( 1.0, max( 0.0, scale_factor) )
+
+ scale_factor = (tracer_upwind_new-tracer_min(k,iCell))/(tracer_upwind_new-tracer_min_new+eps)
+ flux_outgoing(k,iCell) = min( 1.0, max( 0.0, scale_factor) )
+ end do ! k loop
+ end do ! iCell loop
+
+ ! rescale the high order horizontal fluxes
+ do iEdge = 1, nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k = 1, maxLevelEdgeTop(iEdge)
+ flux = high_order_horiz_flux(k,iEdge)
+ flux = max(0.,flux) * min(flux_outgoing(k,cell1), flux_incoming(k,cell2)) &
+ + min(0.,flux) * min(flux_incoming(k,cell1), flux_outgoing(k,cell2))
+ high_order_horiz_flux(k,iEdge) = flux
+ end do ! k loop
+ end do ! iEdge loop
+
+ ! rescale the high order vertical flux
+ do iCell = 1, nCellsSolve
+ do k = 2, maxLevelCell(iCell)
+ flux = high_order_vert_flux(k,iCell)
+ ! dwj 02/03/12 Ocean and Atmosphere are different in vertical.
+! flux = max(0.,flux) * min(flux_outgoing(k-1,iCell), flux_incoming(k ,iCell)) &
+! + min(0.,flux) * min(flux_outgoing(k ,iCell), flux_incoming(k-1,iCell))
+ flux = max(0.,flux) * min(flux_outgoing(k ,iCell), flux_incoming(k-1,iCell)) &
+ + min(0.,flux) * min(flux_outgoing(k-1,iCell), flux_incoming(k ,iCell))
+ high_order_vert_flux(k,iCell) = flux
+ end do ! k loop
+ end do ! iCell loop
+
+ ! Accumulate the scaled high order horizontal tendencies
+ do iEdge = 1, nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+ do k=1,maxLevelEdgeTop(iEdge)
+ tend(iTracer, k, cell1) = tend(iTracer, k, cell1) - high_order_horiz_flux(k, iEdge) * invAreaCell1
+ tend(iTracer, k, cell2) = tend(iTracer, k, cell2) + high_order_horiz_flux(k, iEdge) * invAreaCell2
+
+ if (config_check_monotonicity) then
+ !tracer_new holds a tendency for now.
+ tracer_new(k, cell1) = tracer_new(k, cell1) - high_order_horiz_flux(k, iEdge) * invAreaCell1
+ tracer_new(k, cell2) = tracer_new(k, cell2) + high_order_horiz_flux(k, iEdge) * invAreaCell2
+ end if
+ end do ! k loop
+ end do ! iEdge loop
+
+ ! Accumulate the scaled high order vertical tendencies, and the upwind tendencies
+ do iCell = 1, nCellsSolve
+ do k = 1,maxLevelCell(iCell)
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + (high_order_vert_flux(k+1, iCell) - high_order_vert_flux(k, iCell)) + upwind_tendency(k,iCell)
+
+ if (config_check_monotonicity) then
+ !tracer_new holds a tendency for now. Only for a check on monotonicity
+ tracer_new(k, iCell) = tracer_new(k, iCell) + (high_order_vert_flux(k+1, iCell) - high_order_vert_flux(k, iCell)) + upwind_tendency(k,iCell)
+
+ !tracer_new is now the new state of the tracer. Only for a check on monotonicity
+ tracer_new(k, iCell) = (tracer_cur(k, iCell)*h(k, iCell) + dt * tracer_new(k, iCell)) * inv_h_new(k, iCell)
+ end if
+ end do ! k loop
+ end do ! iCell loop
+
+ if (config_check_monotonicity) then
+ !build min and max bounds on old and new tracer for check on monotonicity.
+ do iCell = 1, nCellsSolve
+ do k = 1, maxLevelCell(iCell)
+ if(tracer_new(k,iCell) < tracer_min(k, iCell)-eps) then
+ write(*,*) 'Minimum out of bounds on tracer ', iTracer, tracer_min(k, iCell), tracer_new(k,iCell)
+ end if
+
+ if(tracer_new(k,iCell) > tracer_max(k,iCell)+eps) then
+ write(*,*) 'Maximum out of bounds on tracer ', iTracer, tracer_max(k, iCell), tracer_new(k,iCell)
+ end if
+ end do
+ end do
+ end if
+ end do ! iTracer loop
+
+ deallocate(tracer_new)
+ deallocate(tracer_cur)
+ deallocate(upwind_tendency)
+ deallocate(inv_h_new)
+ deallocate(tracer_max)
+ deallocate(tracer_min)
+ deallocate(flux_incoming)
+ deallocate(flux_outgoing)
+ deallocate(high_order_horiz_flux)
+ deallocate(high_order_vert_flux)
+ end subroutine mpas_ocn_tracer_advection_mono_tend!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_mono_init
+!
+!> \brief MPAS ocean initialize monotonic tracer advection tendency with FCT
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine initializes the monotonic tracer advection tendencity using a FCT.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_mono_init(err)!{{{
+ integer, intent(inout) :: err !< Input: Error Flags
+
+ integer :: err_tmp
+
+ err = 0
+
+ if ( config_horiz_tracer_adv_order == 3) then
+ coef_3rd_order = config_coef_3rd_order
+ else if(config_horiz_tracer_adv_order == 2 .or. config_horiz_tracer_adv_order == 4) then
+ coef_3rd_order = 0.0
+ end if
+
+ end subroutine mpas_ocn_tracer_advection_mono_init!}}}
+
+end module mpas_ocn_tracer_advection_mono
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,100 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std
+!
+!> \brief MPAS ocean tracer advection driver (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains driver routine for tracer advection tendencies
+!> as well as the routines for setting up advection coefficients and
+!> initialization of the advection routines.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+ use mpas_timer
+
+ use mpas_ocn_tracer_advection_std_hadv
+ use mpas_ocn_tracer_advection_std_vadv
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_tend, &
+ mpas_ocn_tracer_advection_std_init
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_tend
+!
+!> \brief MPAS ocean standard tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for the standard computation of
+!> tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_tend(tracers, uh, w, verticalCellSize, grid, tend)!{{{
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Input: s - current model state
+ ! grid - grid metadata
+ !
+ ! Output: tend - computed scalar tendencies
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: Tracer tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer values
+ real (kind=RKIND), dimension(:,:), intent(in) :: uh !< Input: Thickness weighted horizontal velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical Velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of a cell
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ call mpas_timer_start("tracer-hadv", .false.)
+ call mpas_ocn_tracer_advection_std_hadv_tend(tracers, uh, grid, tend)
+ call mpas_timer_stop("tracer-hadv")
+ call mpas_timer_start("tracer-vadv", .false.)
+ call mpas_ocn_tracer_advection_std_vadv_tend(tracers, w, verticalCellSize, grid, tend)
+ call mpas_timer_stop("tracer-vadv")
+
+ end subroutine mpas_ocn_tracer_advection_std_tend!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_init
+!
+!> \brief MPAS ocean standard tracer advection initialization
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for the initializtion of the standard
+!> tracer advection routines.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_init(err)!{{{
+ integer, intent(inout) :: err !< Input: Error Flags
+
+ integer :: err_tmp
+
+ err = 0
+
+ call mpas_ocn_tracer_advection_std_hadv_init(err_tmp)
+ err = ior(err, err_tmp)
+ call mpas_ocn_tracer_advection_std_vadv_init(err_tmp)
+ err = ior(err, err_tmp)
+
+ end subroutine mpas_ocn_tracer_advection_std_init!}}}
+
+end module mpas_ocn_tracer_advection_std
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_hadv.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std_hadv.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_hadv.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_hadv.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,140 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std_hadv
+!
+!> \brief MPAS ocean standard horizontal tracer advection (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for horizontal tracer advection tendencies
+!> and initialization of the horizontal advection routines.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std_hadv
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_helpers
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_hadv_tend, &
+ mpas_ocn_tracer_advection_std_hadv_init
+
+ real (kind=RKIND) :: coef_3rd_order
+
+ logical :: hadvOn
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_hadv_tend
+!
+!> \brief MPAS ocean standard horizontal tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine computes the tendency for 3rd order horizontal advection of tracers.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_hadv_tend(tracers, uh, grid, tend)!{{{
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/output: Tracer tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer values
+ real (kind=RKIND), dimension(:,:), intent(in) :: uh !< Input: Thickness weighted horizontal velocity
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
+ real (kind=RKIND) :: flux, tracer_weight
+
+ real (kind=RKIND), dimension(:), pointer :: areaCell
+ integer, dimension(:,:), pointer :: cellsOnEdge
+
+ integer, dimension(:,:), pointer :: advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask
+ integer, dimension(:), pointer :: nAdvCellsForEdge
+ real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_2nd, adv_coefs_3rd
+ real (kind=RKIND), dimension(:,:), allocatable :: flux_arr
+ integer :: nVertLevels, num_tracers
+
+ if (.not. hadvOn) return
+
+ cellsOnEdge => grid % cellsOnEdge % array
+ areaCell => grid % areaCell % array
+
+ nAdvCellsForEdge => grid % nAdvCellsForEdge % array
+ advCellsForEdge => grid % advCellsForEdge % array
+ adv_coefs => grid % adv_coefs % array
+ adv_coefs_2nd => grid % adv_coefs_2nd % array
+ adv_coefs_3rd => grid % adv_coefs_3rd % array
+ highOrderAdvectionMask => grid % highOrderAdvectionMask % array
+ lowOrderAdvectionMask => grid % lowOrderAdvectionMask % array
+
+ nVertLevels = grid % nVertLevels
+ num_tracers = size(tracers, dim=1)
+
+ allocate(flux_arr(num_tracers, grid % nVertLevels))
+
+ ! horizontal flux divergence, accumulate in tracer_tend
+ do iEdge=1,grid%nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ if (cell1 <= grid%nCellsSolve .or. cell2 <= grid%nCellsSolve) then ! only for owned cells
+ flux_arr(:,:) = 0.
+ do i=1,nAdvCellsForEdge(iEdge)
+ iCell = advCellsForEdge(i,iEdge)
+ do k=1,grid % nVertLevels
+ tracer_weight = lowOrderAdvectionMask(k, iEdge) * adv_coefs_2nd(i,iEdge) &
+ + highOrderAdvectionMask(k, iEdge) * (adv_coefs(i,iEdge) + coef_3rd_order*sign(1.,uh(k,iEdge))*adv_coefs_3rd(i,iEdge))
+ do iTracer=1,num_tracers
+ flux_arr(iTracer,k) = flux_arr(iTracer,k) + tracer_weight* tracers(iTracer,k,iCell)
+ end do
+ end do
+ end do
+
+ do k=1,grid % nVertLevels
+ do iTracer=1,num_tracers
+ tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - uh(k,iEdge)*flux_arr(iTracer,k)/areaCell(cell1)
+ tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + uh(k,iEdge)*flux_arr(iTracer,k)/areaCell(cell2)
+ end do
+ end do
+ end if
+ end do
+
+ deallocate(flux_arr)
+
+ end subroutine mpas_ocn_tracer_advection_std_hadv_tend!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_hadv_init
+!
+!> \brief MPAS ocean standard horizontal tracer advection initialization
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine initializes the 3rd order standard horizontal advection of tracers
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_hadv_init(err)!{{{
+ integer, intent(inout) :: err !< Input/Output: Error flag
+
+ err = 0
+
+ hadvOn =.true.
+
+ if ( config_horiz_tracer_adv_order == 3) then
+ coef_3rd_order = config_coef_3rd_order
+ else if ( config_horiz_tracer_adv_order == 2 .or. config_horiz_tracer_adv_order == 4) then
+ coef_3rd_order = 0.0
+ end if
+ end subroutine mpas_ocn_tracer_advection_std_hadv_init!}}}
+
+end module mpas_ocn_tracer_advection_std_hadv
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std_vadv.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,103 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std_vadv
+!
+!> \brief MPAS ocean vertical tracer advection driver (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains driver routines for vertical tracer advection tendencies
+!> and initialization of the advection routines.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std_vadv
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_std_vadv2
+ use mpas_ocn_tracer_advection_std_vadv3
+ use mpas_ocn_tracer_advection_std_vadv4
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_vadv_tend, &
+ mpas_ocn_tracer_advection_std_vadv_init
+
+ logical :: order2, order3, order4
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_vadv_tend
+!
+!> \brief MPAS ocean standard vertical tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for the standard computation of
+!> vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_vadv_tend(tracers, w, verticalCellSize, grid, tend)!{{{
+
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: Tracer Tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer Values
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical Velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of cell
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ if(order2) then
+ call mpas_ocn_tracer_advection_std_vadv2_tend(tracers, w, verticalCellSize, grid, tend)
+ else if(order3) then
+ call mpas_ocn_tracer_advection_std_vadv3_tend(tracers, w, verticalCellSize, grid, tend)
+ else if(order4) then
+ call mpas_ocn_tracer_advection_std_vadv4_tend(tracers, w, verticalCellSize, grid, tend)
+ endif
+
+ end subroutine mpas_ocn_tracer_advection_std_vadv_tend!}}}
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_vadv_init
+!
+!> \brief MPAS ocean standard vertical tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine initializes the vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_vadv_init(err)!{{{
+ integer, intent(inout) :: err !< Input/Output: Error flag
+
+ err = 0
+
+ order2 = .false.
+ order3 = .false.
+ order4 = .false.
+
+ if (config_vert_tracer_adv_order == 2) then
+ order2 = .true.
+ else if (config_vert_tracer_adv_order == 3) then
+ order3 = .true.
+ else if (config_vert_tracer_adv_order == 4) then
+ order4 = .true.
+ else
+ print *, 'invalid value for config_tracer_vadv_order'
+ print *, 'options are 2, 3, or 4'
+ err = 1
+ endif
+
+ end subroutine mpas_ocn_tracer_advection_std_vadv_init!}}}
+
+end module mpas_ocn_tracer_advection_std_vadv
+
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv2.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std_vadv2.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv2.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv2.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,96 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std_vadv2
+!
+!> \brief MPAS ocean 2nd order vertical tracer advection driver (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for 2nd order vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std_vadv2
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_helpers
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_vadv2_tend
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_vadv2_tend
+!
+!> \brief MPAS ocean 2nd order standard vertical tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine is the driver routine for the 2nd order standard computation of
+!> vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_vadv2_tend(tracers, w, verticalCellSize, grid, tend)!{{{
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: tracer tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer values
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical Velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of cell
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
+ real (kind=RKIND) :: flux, tracer_edge, tracer_weight
+ real (kind=RKIND) :: tracer_weight_cell1, tracer_weight_cell2
+
+
+ real (kind=RKIND), dimension(:,:), allocatable :: vert_flux
+ real (kind=RKIND) :: weightK, weightKm1
+ integer :: nVertLevels, num_tracers
+ integer, dimension(:), pointer :: maxLevelCell
+
+ nVertLevels = grid % nVertLevels
+ num_tracers = size(tracers, dim=1)
+ maxLevelCell => grid % maxLevelCell % array
+
+ allocate(vert_flux(num_tracers, nVertLevels+1))
+
+ !
+ ! vertical flux divergence
+ !
+
+ ! zero fluxes at top and bottom
+
+ vert_flux(:,1) = 0.
+
+ do iCell=1,grid % nCellsSolve
+ do k = 2, maxLevelCell(iCell)
+ do iTracer=1,num_tracers
+ weightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ weightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ vert_flux(iTracer,k) = w(k,iCell)*(weightK*tracers(iTracer,k,iCell)+weightKm1*tracers(iTracer,k-1,iCell))
+ end do
+ end do
+
+ vert_flux(:,maxLevelCell(iCell)+1) = 0
+
+ do k=1,maxLevelCell(iCell)
+ do iTracer=1,num_tracers
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + ( vert_flux(iTracer, k+1) - vert_flux(iTracer, k))
+ end do
+ end do
+ end do
+
+ deallocate(vert_flux)
+
+ end subroutine mpas_ocn_tracer_advection_std_vadv2_tend!}}}
+
+end module mpas_ocn_tracer_advection_std_vadv2
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv3.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std_vadv3.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv3.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv3.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,106 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std_vadv3
+!
+!> \brief MPAS ocean 3rd order vertical tracer advection driver (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for 3rd order vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std_vadv3
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_helpers
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_vadv3_tend
+
+ real (kind=RKIND) :: coef_3rd_order
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_vadv3_tend
+!
+!> \brief MPAS ocean 3rd order standard vertical tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine computes the 3rd order vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_vadv3_tend(tracers, w, verticalCellSize, grid, tend)!{{{
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: Tracer Tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer Values
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical Velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of cell
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
+
+ real (kind=RKIND), dimension(:,:), allocatable :: vert_flux
+ real (kind=RKIND) :: weightK, weightKm1
+ integer :: nVertLevels, num_tracers
+ integer, dimension(:), pointer :: maxLevelCell
+
+ coef_3rd_order = config_coef_3rd_order
+
+ nVertLevels = grid % nVertLevels
+ num_tracers = size(tracers, dim=1)
+ maxLevelCell => grid % maxLevelCell % array
+
+ allocate(vert_flux(num_tracers, nVertLevels+1))
+
+ vert_flux(:,1) = 0.
+
+ do iCell=1,grid % nCellsSolve
+
+ k = 2
+ do iTracer=1,num_tracers
+ weightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ weightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ vert_flux(iTracer,k) = w(k,iCell)*(weightK*tracers(iTracer,k,iCell)+weightKm1*tracers(iTracer,k-1,iCell))
+ enddo
+
+ do k=3,maxLevelCell(iCell)-1
+ do iTracer=1,num_tracers
+ vert_flux(iTracer,k) = mpas_ocn_tracer_advection_vflux3( tracers(iTracer,k-2,iCell),tracers(iTracer,k-1,iCell), &
+ tracers(iTracer,k ,iCell),tracers(iTracer,k+1,iCell), &
+ w(k,iCell), coef_3rd_order )
+ end do
+ end do
+
+ k = maxLevelCell(iCell)
+
+ do iTracer=1,num_tracers
+ weightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ weightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ vert_flux(iTracer,k) = w(k,iCell)*(weightK*tracers(iTracer,k,iCell)+weightKm1*tracers(iTracer,k-1,iCell))
+ enddo
+
+ vert_Flux(:, maxLevelCell(iCell)+1) = 0.0
+
+ do k=1,maxLevelCell(iCell)
+ do iTracer=1,num_tracers
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + (vert_flux(iTracer, k+1) - vert_flux(iTracer, k))
+ end do
+ end do
+ end do
+
+ deallocate(vert_flux)
+
+ end subroutine mpas_ocn_tracer_advection_std_vadv3_tend!}}}
+
+end module mpas_ocn_tracer_advection_std_vadv3
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv4.F (from rev 1863, trunk/mpas/src/core_ocean/mpas_ocn_tracer_advection_std_vadv4.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv4.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_std_vadv4.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,105 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_tracer_advection_std_vadv4
+!
+!> \brief MPAS ocean 4th order vertical tracer advection driver (non-monotonic/fct)
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for 4th order vertical tracer advection.
+!
+!-----------------------------------------------------------------------
+module mpas_ocn_tracer_advection_std_vadv4
+
+ use mpas_kind_types
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_dmpar
+
+ use mpas_ocn_tracer_advection_helpers
+
+ implicit none
+ private
+ save
+
+ public :: mpas_ocn_tracer_advection_std_vadv4_tend
+
+ contains
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! routine mpas_ocn_tracer_advection_std_vadv4_tend
+!
+!> \brief MPAS ocean 4th order standard vertical tracer advection tendency
+!> \author Doug Jacobsen
+!> \date 03/09/12
+!> \version SVN:$Id:$
+!> \details
+!> This routine computes the 4th order vertical tracer advection tendencies.
+!
+!-----------------------------------------------------------------------
+ subroutine mpas_ocn_tracer_advection_std_vadv4_tend(tracers, w, verticalCellSize, grid, tend)!{{{
+ real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !< Input/Output: Tracer tendency
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers !< Input: Tracer Values
+ real (kind=RKIND), dimension(:,:), intent(in) :: w !< Input: Vertical Velocity
+ real (kind=RKIND), dimension(:,:), intent(in) :: verticalCellSize !< Input: Distance between vertical interfaces of cell
+ type (mesh_type), intent(in) :: grid !< Input: Grid information
+
+ integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
+
+ real (kind=RKIND), dimension(:,:), allocatable :: vert_flux
+ real (kind=RKIND) :: weightK, weightKm1
+ integer :: nVertLevels, num_tracers
+ integer, dimension(:), pointer :: maxLevelCell
+
+ nVertLevels = grid % nVertLevels
+ num_tracers = size(tracers, dim=1)
+ maxLevelCell => grid % maxLevelCell % array
+
+ allocate(vert_flux(num_tracers, nVertLevels+1))
+
+ ! vertical flux divergence
+ !
+
+ ! zero fluxes at top and bottom
+
+ vert_flux(:,1) = 0.
+
+ do iCell=1,grid % nCellsSolve
+
+ k = 2
+ do iTracer=1,num_tracers
+ weightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ weightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ vert_flux(iTracer,k) = w(k,iCell)*(weightK*tracers(iTracer,k,iCell)+weightKm1*tracers(iTracer,k-1,iCell))
+ enddo
+ do k=3,nVertLevels-1
+ do iTracer=1,num_tracers
+ vert_flux(iTracer,k) = mpas_ocn_tracer_advection_vflux4( tracers(iTracer,k-2,iCell),tracers(iTracer,k-1,iCell), &
+ tracers(iTracer,k ,iCell),tracers(iTracer,k+1,iCell), w(k,iCell) )
+ end do
+ end do
+
+ k = maxLevelCell(iCell)
+ do iTracer=1,num_tracers
+ weightK = verticalCellSize(k-1, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ weightKm1 = verticalCellSize(k, iCell) / (verticalCellSize(k-1, iCell) + verticalCellSize(k, iCell))
+ vert_flux(iTracer,k) = w(k,iCell)*(weightK*tracers(iTracer,k,iCell)+weightKm1*tracers(iTracer,k-1,iCell))
+ enddo
+
+ vert_flux(:,maxLevelCell(iCell)+1) = 0.0
+
+ do k=1,maxLevelCell(iCell)
+ do iTracer=1,num_tracers
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + (vert_flux(iTracer, k+1) - vert_flux(iTracer, k))
+ end do
+ end do
+
+ end do
+
+ deallocate(vert_flux)
+
+ end subroutine mpas_ocn_tracer_advection_std_vadv4_tend!}}}
+
+end module mpas_ocn_tracer_advection_std_vadv4
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -182,7 +182,7 @@
err = 0
hadv2On = .false.
- if (config_tracer_adv_order == 2) then
+ if (config_horiz_tracer_adv_order == 2) then
hadv2On = .true.
end if
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -221,11 +221,10 @@
err = 0
hadv3On = .false.
- if (config_tracer_adv_order == 3) then
+ if (config_horiz_tracer_adv_order == 3) then
hadv3On = .true.
- coef_3rd_order = 1.0
- if (config_monotonic) coef_3rd_order = 0.25
+ coef_3rd_order = config_coef_3rd_order
end if
!--------------------------------------------------------------------
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -213,7 +213,7 @@
err = 0
hadv4On = .false.
- if (config_tracer_adv_order == 4) then
+ if (config_horiz_tracer_adv_order == 4) then
hadv4On = .true.
end if
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -62,7 +62,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_vel_coriolis_tend(grid, pv_edge, h_edge, u, ke, tend, err)!{{{
+ subroutine ocn_vel_coriolis_tend(grid, Vor_edge, h_edge, u, ke, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -71,7 +71,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
- pv_edge !< Input: Potential vorticity on edge
+ Vor_edge !< Input: Potential vorticity on edge
real (kind=RKIND), dimension(:,:), intent(in) :: &
h_edge !< Input: Thickness on edge
real (kind=RKIND), dimension(:,:), intent(in) :: &
@@ -138,7 +138,7 @@
q = 0.0
do j = 1,nEdgesOnEdge(iEdge)
eoe = edgesOnEdge(j,iEdge)
- workpv = 0.5 * (pv_edge(k,iEdge) + pv_edge(k,eoe))
+ workpv = 0.5 * (Vor_edge(k,iEdge) + Vor_edge(k,eoe))
q = q + weightsOnEdge(j,iEdge) * u(k,eoe) * workpv * h_edge(k,eoe)
end do
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -175,7 +175,7 @@
do k=1,maxLevelEdgeTop(iEdge)
! Compute </font>
<font color="black">abla^2 u = </font>
<font color="black">abla divergence + k \times </font>
<font color="red">abla vorticity
delsq_u = ( divergence(k,cell2) - divergence(k,cell1) ) * invDcEdge &
- -viscVortCoef *( vorticity(k,vertex2) - vorticity(k,vertex1)) * invDvEdge
+ -viscVortCoef *( vorticity(k,vertex2) - vorticity(k,vertex1)) * invDcEdge * sqrt(3.0) ! TDR
! vorticity using </font>
<font color="gray">abla^2 u
r_tmp = dcEdge(iEdge) * delsq_u
@@ -203,7 +203,7 @@
do k=1,maxLevelEdgeTop(iEdge)
u_diffusion = (delsq_divergence(k,cell2) - delsq_divergence(k,cell1)) * invDcEdge &
- -viscVortCoef * (delsq_vorticity(k,vertex2) - delsq_vorticity(k,vertex1) ) * invDvEdge
+ -viscVortCoef * (delsq_vorticity(k,vertex2) - delsq_vorticity(k,vertex1) ) * invDcEdge * sqrt(3.0) ! TDR
tend(k,iEdge) = tend(k,iEdge) - edgeMask(k, iEdge) * u_diffusion * r_tmp
end do
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -142,8 +142,8 @@
tracers => s % tracers % array
call mpas_timer_start("eos rich", .false., richEOSTimer)
- call ocn_equation_of_state_rho(s, grid, 0, 'relative', err)
- call ocn_equation_of_state_rho(s, grid, 1, 'relative', err)
+ call ocn_equation_of_state_rho(s, grid, 0,'relative', err)
+ call ocn_equation_of_state_rho(s, grid, 1,'relative', err)
call mpas_timer_stop("eos rich", richEOSTimer)
call ocn_vmix_get_rich_numbers(grid, indexT, indexS, u, h, h_edge, &
Modified: branches/atmos_physics/src/core_sw/Registry
===================================================================
--- branches/atmos_physics/src/core_sw/Registry        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_sw/Registry        2012-05-03 21:04:07 UTC (rev 1864)
@@ -4,7 +4,7 @@
namelist integer sw_model config_test_case 5
namelist character sw_model config_time_integration RK4
namelist real sw_model config_dt 172.8
-namelist integer sw_model config_calendar_type MPAS_360DAY
+namelist character sw_model config_calendar_type 360day
namelist character sw_model config_start_time 0000-01-01_00:00:00
namelist character sw_model config_stop_time none
namelist character sw_model config_run_duration none
@@ -26,7 +26,12 @@
namelist character io config_restart_name restart.nc
namelist character io config_output_interval 06:00:00
namelist integer io config_frames_per_outfile 0
-namelist character io config_decomp_file_prefix graph.info.part.
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist character decomposition config_block_decomp_file_prefix graph.info.part.
+namelist integer decomposition config_number_of_blocks 0
+namelist logical decomposition config_explicit_proc_decomp .false.
+namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist logical restart config_do_restart false
namelist character restart config_restart_interval none
Modified: branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -3,7 +3,7 @@
use mpas_framework
use mpas_timekeeping
- type (io_output_object) :: restart_obj
+ type (io_output_object), save :: restart_obj
integer :: current_outfile_frames
type (MPAS_Clock_type) :: clock
@@ -143,6 +143,7 @@
subroutine mpas_core_run(domain, output_obj, output_frame)
use mpas_grid_types
+ use mpas_kind_types
use mpas_io_output
use mpas_timer
@@ -157,7 +158,7 @@
type (block_type), pointer :: block_ptr
type (MPAS_Time_Type) :: currTime
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
integer :: ierr
! Eventually, dt should be domain specific
@@ -165,7 +166,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Initial timestep ', timeStamp
+ write(0,*) 'Initial timestep ', trim(timeStamp)
call write_output_frame(output_obj, output_frame, domain)
@@ -179,7 +180,7 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
- write(0,*) 'Doing timestep ', timeStamp
+ write(0,*) 'Doing timestep ', trim(timeStamp)
call mpas_timer_start("time integration")
call mpas_timestep(domain, itimestep, dt, timeStamp)
Modified: branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -63,17 +63,21 @@
integer :: iCell, k
type (block_type), pointer :: block
- type (state_type) :: provis
+ type (state_type), target :: provis
+ type (state_type), pointer :: provis_ptr
integer :: rk_step
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
block => domain % blocklist
- call mpas_allocate_state(provis, &
+ call mpas_allocate_state(block, provis, &
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, &
block % mesh % nTracers)
+
+ provis_ptr => provis
+ call mpas_create_state_links(provis_ptr)
!
! Initialize time_levs(2) with state at current time
@@ -116,24 +120,13 @@
! --- update halos for diagnostic variables
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(provis % pv_edge)
- if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- end if
+ if (config_h_mom_eddy_visc4 > 0.0) then
+ call mpas_dmpar_exch_halo_field(provis % divergence)
+ call mpas_dmpar_exch_halo_field(provis % vorticity)
+ end if
- block => block % next
- end do
-
! --- compute tendencies
block => domain % blocklist
@@ -146,19 +139,9 @@
! --- update halos for prognostic variables
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
- block % mesh % nTracers, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
! --- compute next substep state
Modified: branches/atmos_physics/src/driver/mpas_subdriver.F
===================================================================
--- branches/atmos_physics/src/driver/mpas_subdriver.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/driver/mpas_subdriver.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -2,10 +2,11 @@
use mpas_framework
use mpas_core
+ use mpas_kind_types
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
- type (io_output_object) :: output_obj
+ type (io_output_object), save :: output_obj
integer :: output_frame
@@ -17,7 +18,7 @@
implicit none
real (kind=RKIND) :: dt
- character(len=32) :: timeStamp
+ character(len=StrKIND) :: timeStamp
!
! Initialize infrastructure
Modified: branches/atmos_physics/src/external/Makefile
===================================================================
--- branches/atmos_physics/src/external/Makefile        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/external/Makefile        2012-05-03 21:04:07 UTC (rev 1864)
@@ -3,7 +3,7 @@
all: esmf_time
esmf_time:
-        ( cd esmf_time_f90; make FC="$(FC) $(FFLAGS)" CPP="$(CPP)" )
+        ( cd esmf_time_f90; $(MAKE) FC="$(FC) $(FFLAGS)" CPP="$(CPP)" )
clean:
-        ( cd esmf_time_f90; make clean )
+        ( cd esmf_time_f90; $(MAKE) clean )
Modified: branches/atmos_physics/src/external/esmf_time_f90/ESMF_Calendar.F90
===================================================================
--- branches/atmos_physics/src/external/esmf_time_f90/ESMF_Calendar.F90        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/external/esmf_time_f90/ESMF_Calendar.F90        2012-05-03 21:04:07 UTC (rev 1864)
@@ -51,14 +51,23 @@
INTEGER, PARAMETER :: MONTHS_PER_YEAR = 12
- INTEGER, PARAMETER :: mday(MONTHS_PER_YEAR) &
+
+ INTEGER, PARAMETER :: daysPerMonthNoLeap(MONTHS_PER_YEAR) &
= (/31,28,31,30,31,30,31,31,30,31,30,31/)
- INTEGER, PARAMETER :: mdayleap(MONTHS_PER_YEAR) &
+ INTEGER, PARAMETER :: daysPerMonthLeap(MONTHS_PER_YEAR) &
= (/31,29,31,30,31,30,31,31,30,31,30,31/)
- INTEGER, DIMENSION(365) :: daym
- INTEGER, DIMENSION(366) :: daymleap
+ INTEGER, PARAMETER :: daysPerMonth360(MONTHS_PER_YEAR) &
+ = (/30,30,30,30,30,30,30,30,30,30,30,30/)
+
+ INTEGER, DIMENSION(MONTHS_PER_YEAR) :: mday
+ INTEGER, DIMENSION(MONTHS_PER_YEAR) :: mdayleap
+
+ INTEGER, DIMENSION(:), POINTER :: daym
+ INTEGER, DIMENSION(:), POINTER :: daymleap
+
INTEGER :: mdaycum(0:MONTHS_PER_YEAR)
INTEGER :: mdayleapcum(0:MONTHS_PER_YEAR)
+
TYPE(ESMF_BaseTime), TARGET :: monthbdys(0:MONTHS_PER_YEAR)
TYPE(ESMF_BaseTime), TARGET :: monthbdysleap(0:MONTHS_PER_YEAR)
@@ -69,7 +78,6 @@
! ! F90 "enum" type to match C++ ESMC_CalendarType enum
type ESMF_CalendarType
- private
integer :: caltype
end type
@@ -152,7 +160,10 @@
!
! !PUBLIC MEMBER FUNCTIONS:
public ESMF_CalendarCreate
+ public ESMF_CalendarDestroy
+ public ESMF_GetCalendarType
+
! Required inherited and overridden ESMF_Base class methods
public ESMF_CalendarInitialized ! Only in this implementation, intended
@@ -165,6 +176,14 @@
!==============================================================================
+
+
+ type(ESMF_CalendarType) function ESMF_GetCalendarType()
+ ESMF_GetCalendarType = defaultCal % Type
+ end function ESMF_GetCalendarType
+
+
+!==============================================================================
!BOP
! !IROUTINE: ESMF_CalendarCreate - Create a new ESMF Calendar of built-in type
@@ -210,50 +229,61 @@
type(ESMF_DaysPerYear) :: dayspy
if ( present(rc) ) rc = ESMF_FAILURE
-! Calendar type is hard-coded. Use ESMF library if more flexibility is
-! needed.
-#ifdef NO_LEAP_CALENDAR
- if ( calendartype%caltype /= ESMF_CAL_NOLEAP%caltype ) then
+
+ if ( calendartype % caltype == ESMF_CAL_GREGORIAN % caltype ) then
+ ESMF_CalendarCreate % Type = ESMF_CAL_GREGORIAN
+ mday = daysPerMonthNoLeap
+         mdayleap = daysPerMonthLeap
+         allocate(daym(365))
+         allocate(daymleap(366))
+ else if ( calendartype % caltype == ESMF_CAL_NOLEAP % caltype ) then
+ ESMF_CalendarCreate % Type = ESMF_CAL_NOLEAP
+         mday = daysPerMonthNoLeap
+         mdayleap = daysPerMonthNoLeap
+         allocate(daym(365))
+         allocate(daymleap(365))
+ else if ( calendartype % caltype == ESMF_CAL_360DAY % caltype ) then
+ ESMF_CalendarCreate % Type = ESMF_CAL_360DAY
+ mday = daysPerMonth360
+         mdayleap = daysPerMonth360
+         allocate(daym(360))
+         allocate(daymleap(360))
+ else
write(6,*) 'Not a valid calendar type for this implementation'
- write(6,*) 'This implementation only allows ESMF_CAL_NOLEAP'
- write(6,*) 'calender type set to = ', calendartype%caltype
- write(6,*) 'NO_LEAP calendar type is = ', ESMF_CAL_NOLEAP%caltype
+ write(6,*) 'The current implementation only supports ESMF_CAL_NOLEAP, ESMF_CAL_GREGORIAN, ESMF_CAL_360DAY'
return
end if
- ESMF_CalendarCreate%Type = ESMF_CAL_NOLEAP
-#else
- if ( calendartype%caltype /= ESMF_CAL_GREGORIAN%caltype ) then
- write(6,*) 'Not a valid calendar type for this implementation'
- write(6,*) 'This implementation only allows ESMF_CAL_GREGORIAN'
- write(6,*) 'calender type set to = ', calendartype%caltype
- write(6,*) 'GREGORIAN calendar type is = ', ESMF_CAL_GREGORIAN%caltype
- return
- end if
- ESMF_CalendarCreate%Type = ESMF_CAL_GREGORIAN
-#endif
-! This is a bug on some systems -- need initial value set by compiler at
-! startup.
-! However, note that some older compilers do not support compile-time
-! initialization of data members of Fortran derived data types. For example,
-! PGI 5.x compilers do not support this F95 feature. See
-! NO_DT_COMPONENT_INIT.
- ESMF_CalendarCreate%Set = .true.
- ESMF_CalendarCreate%SecondsPerDay = SECONDS_PER_DAY
-! DaysPerYear and SecondsPerYear are incorrect for Gregorian calendars...
- dayspy%D = size(daym)
-!TBH: TODO: Replace DaysPerYear and SecondsPerYear with methods
-!TBH: TODO: since they only make sense for the NO_LEAP calendar!
- ESMF_CalendarCreate%DaysPerYear = dayspy
- ESMF_CalendarCreate%SecondsPerYear = ESMF_CalendarCreate%SecondsPerDay &
- * dayspy%D
-!TBH: TODO: use mdayleap for leap-year calendar
- ESMF_CalendarCreate%DaysPerMonth(:) = mday(:)
+ ESMF_CalendarCreate % Set = .true.
+ ESMF_CalendarCreate % DaysPerMonth(:) = mday(:)
+ ESMF_CalendarCreate % SecondsPerDay = SECONDS_PER_DAY
+
+!TBH: TODO: Replace DaysPerYear and SecondsPerYear with methods
+!TBH: TODO: since they only make sense for the NO_LEAP calendar!
+ dayspy % D = size(daym)
+ ESMF_CalendarCreate % DaysPerYear = dayspy
+ ESMF_CalendarCreate % SecondsPerYear = ESMF_CalendarCreate % SecondsPerDay * dayspy % D
+
if ( present(rc) ) rc = ESMF_SUCCESS
- end function ESMF_CalendarCreate
+ end function ESMF_CalendarCreate
+ subroutine ESMF_CalendarDestroy(rc)
+
+ integer, intent(out), optional :: rc
+
+ if ( present(rc) ) rc = ESMF_FAILURE
+
+ deallocate(daym)
+ deallocate(daymleap)
+
+ if ( present(rc) ) rc = ESMF_SUCCESS
+
+ end subroutine ESMF_CalendarDestroy
+
+
+
!==============================================================================
!BOP
! !IROUTINE: ESMF_CalendarInitialized - check if calendar was created
Modified: branches/atmos_physics/src/external/esmf_time_f90/ESMF_Stubs.F90
===================================================================
--- branches/atmos_physics/src/external/esmf_time_f90/ESMF_Stubs.F90        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/external/esmf_time_f90/ESMF_Stubs.F90        2012-05-03 21:04:07 UTC (rev 1864)
@@ -91,6 +91,8 @@
! NOOP
SUBROUTINE ESMF_Finalize( rc )
USE esmf_basemod
+ USE esmf_calendarmod
+
INTEGER, INTENT( OUT), OPTIONAL :: rc
#if (defined SPMD) || (defined COUP_CSM)
#include <mpif.h>
@@ -98,6 +100,8 @@
LOGICAL :: flag
INTEGER :: ier
+ CALL ESMF_CalendarDestroy()
+
IF ( PRESENT( rc ) ) rc = ESMF_SUCCESS
#if (defined SPMD) || (defined COUP_CSM)
CALL MPI_Finalized( flag, ier )
Modified: branches/atmos_physics/src/external/esmf_time_f90/Meat.F90
===================================================================
--- branches/atmos_physics/src/external/esmf_time_f90/Meat.F90        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/external/esmf_time_f90/Meat.F90        2012-05-03 21:04:07 UTC (rev 1864)
@@ -169,26 +169,34 @@
! added from share/module_date_time in WRF.
FUNCTION nfeb ( year ) RESULT (num_days)
+ USE ESMF_CalendarMod
+
! Compute the number of days in February for the given year
IMPLICIT NONE
INTEGER :: year
INTEGER :: num_days
-! TBH: TODO: Replace this hack with run-time decision based on
-! TBH: TODO: passed-in calendar.
-#ifdef NO_LEAP_CALENDAR
- num_days = 28 ! By default, February has 28 days ...
-#else
- num_days = 28 ! By default, February has 28 days ...
- IF (MOD(year,4).eq.0) THEN
- num_days = 29 ! But every four years, it has 29 days ...
- IF (MOD(year,100).eq.0) THEN
- num_days = 28 ! Except every 100 years, when it has 28 days ...
- IF (MOD(year,400).eq.0) THEN
- num_days = 29 ! Except every 400 years, when it has 29 days.
+
+ type(ESMF_CalendarType) :: calendarType
+
+ calendarType = ESMF_GetCalendarType()
+
+ IF (calendarType % caltype == ESMF_CAL_NOLEAP % caltype) then
+ num_days = 28
+ ELSE IF (calendarType % caltype == ESMF_CAL_360DAY % caltype) then
+ num_days = 30
+ ELSE
+ num_days = 28 ! By default, February has 28 days ...
+ IF (MOD(year,4).eq.0) THEN
+ num_days = 29 ! But every four years, it has 29 days ...
+ IF (MOD(year,100).eq.0) THEN
+ num_days = 28 ! Except every 100 years, when it has 28 days ...
+ IF (MOD(year,400).eq.0) THEN
+ num_days = 29 ! Except every 400 years, when it has 29 days.
+ END IF
END IF
END IF
END IF
-#endif
+
END FUNCTION nfeb
@@ -206,6 +214,8 @@
#else
IF ( nfeb( year ) .EQ. 29 ) THEN
num_diy = 366
+ ELSE IF ( nfeb( year ) .EQ. 30 ) THEN
+ num_diy = 360
ELSE
num_diy = 365
ENDIF
Modified: branches/atmos_physics/src/framework/Makefile
===================================================================
--- branches/atmos_physics/src/framework/Makefile        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/Makefile        2012-05-03 21:04:07 UTC (rev 1864)
@@ -10,11 +10,15 @@
mpas_timekeeping.o \
mpas_configure.o \
mpas_constants.o \
+ mpas_dmpar_types.o \
+ mpas_attlist.o \
mpas_grid_types.o \
mpas_hash.o \
mpas_sort.o \
mpas_block_decomp.o \
mpas_dmpar.o \
+ mpas_io.o \
+ mpas_io_streams.o \
mpas_io_input.o \
mpas_io_output.o \
$(ZOLTANOBJ) \
@@ -25,16 +29,20 @@
framework: $(OBJS)
        ar -ru libframework.a $(OBJS)
-mpas_framework.o: mpas_dmpar.o mpas_io_input.o mpas_io_output.o mpas_grid_types.o mpas_configure.o mpas_timer.o
+mpas_framework.o: mpas_dmpar.o mpas_io_input.o mpas_io_output.o mpas_io.o mpas_grid_types.o mpas_configure.o mpas_timer.o
mpas_configure.o: mpas_dmpar.o
mpas_constants.o: mpas_kind_types.o
-mpas_grid_types.o: mpas_dmpar.o
+mpas_dmpar_types.o : mpas_kind_types.o
-mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o
+mpas_attlist.o: mpas_kind_types.o
+mpas_grid_types.o: mpas_kind_types.o mpas_dmpar_types.o mpas_attlist.o
+
+mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o
+
mpas_sort.o: mpas_kind_types.o
mpas_timekeeping.o: mpas_kind_types.o
@@ -43,10 +51,14 @@
mpas_block_decomp.o: mpas_grid_types.o mpas_hash.o mpas_configure.o
-mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_sort.o mpas_configure.o mpas_timekeeping.o $(ZOLTANOBJ)
+mpas_io.o: mpas_dmpar_types.o
-mpas_io_output.o: mpas_grid_types.o mpas_dmpar.o mpas_sort.o mpas_configure.o
+mpas_io_streams.o: mpas_attlist.o mpas_grid_types.o mpas_timekeeping.o mpas_io.o
+mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o $(ZOLTANOBJ)
+
+mpas_io_output.o: mpas_grid_types.o mpas_dmpar.o mpas_sort.o mpas_configure.o mpas_io_streams.o
+
clean:
        $(RM) *.o *.mod *.f90 libframework.a
Copied: branches/atmos_physics/src/framework/add_field_indices.inc (from rev 1863, trunk/mpas/src/framework/add_field_indices.inc)
===================================================================
--- branches/atmos_physics/src/framework/add_field_indices.inc         (rev 0)
+++ branches/atmos_physics/src/framework/add_field_indices.inc        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,48 @@
+ idim = ndims
+ totalDimSize = 0
+ field_ptr => field
+ if (trim(field % dimNames(idim)) == 'nCells') then
+!write(0,*) '... outer dimension is nCells'
+ allocate(indices(0))
+ do while (associated(field_ptr))
+ call mergeArrays(indices, field_ptr % block % mesh % indexToCellID % array(1:field_ptr % block % mesh % nCellsSolve))
+ totalDimSize = totalDimSize + field_ptr % block % mesh % nCellsSolve
+ field_ptr => field_ptr % next
+ end do
+ call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize)
+ isDecomposed = .true.
+ else if (trim(field % dimNames(idim)) == 'nEdges') then
+!write(0,*) '... outer dimension is nEdges'
+ allocate(indices(0))
+ do while (associated(field_ptr))
+ call mergeArrays(indices, field_ptr % block % mesh % indexToEdgeID % array(1:field_ptr % block % mesh % nEdgesSolve))
+ totalDimSize = totalDimSize + field_ptr % block % mesh % nEdgesSolve
+ field_ptr => field_ptr % next
+ end do
+ call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize)
+ isDecomposed = .true.
+ else if (trim(field % dimNames(idim)) == 'nVertices') then
+!write(0,*) '... outer dimension is nVertices'
+ allocate(indices(0))
+ do while (associated(field_ptr))
+ call mergeArrays(indices, field_ptr % block % mesh % indexToVertexID % array(1:field_ptr % block % mesh % nVerticesSolve))
+ totalDimSize = totalDimSize + field_ptr % block % mesh % nVerticesSolve
+ field_ptr => field_ptr % next
+ end do
+ call mpas_dmpar_sum_int(field % block % domain % dminfo, totalDimSize, globalDimSize)
+ isDecomposed = .true.
+ else
+ isDecomposed = .false.
+ globalDimSize = field % dimSizes(idim)
+ totalDimSize = globalDimSize
+
+ if (field % block % domain % dminfo % my_proc_id == IO_NODE) then
+ ndims = 1
+ allocate(indices(field % dimSizes(ndims)))
+ do i=1,field % dimSizes(ndims)
+ indices(i) = i
+ end do
+ else
+ allocate(indices(0))
+ end if
+ end if
Copied: branches/atmos_physics/src/framework/mpas_attlist.F (from rev 1863, trunk/mpas/src/framework/mpas_attlist.F)
===================================================================
--- branches/atmos_physics/src/framework/mpas_attlist.F         (rev 0)
+++ branches/atmos_physics/src/framework/mpas_attlist.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,441 @@
+module mpas_attlist
+
+ use mpas_kind_types
+
+ ! Derived type for holding field attributes
+ type att_list_type
+ character (len=StrKIND) :: attName
+ integer :: attType
+ integer :: attValueInt
+ integer, dimension(:), pointer :: attValueIntA => null()
+ real (kind=RKIND) :: attValueReal
+ real (kind=RKIND), dimension(:), pointer :: attValueRealA => null()
+ character (len=StrKIND) :: attValueText
+ type (att_list_type), pointer :: next => null()
+ end type att_list_type
+
+ interface mpas_add_att
+ module procedure mpas_add_att_int0d
+ module procedure mpas_add_att_int1d
+ module procedure mpas_add_att_real0d
+ module procedure mpas_add_att_real1d
+ module procedure mpas_add_att_text
+ end interface mpas_add_att
+
+ interface mpas_get_att
+ module procedure mpas_get_att_int0d
+ module procedure mpas_get_att_int1d
+ module procedure mpas_get_att_real0d
+ module procedure mpas_get_att_real1d
+ module procedure mpas_get_att_text
+ end interface mpas_get_att
+
+
+ !!!!! PRIVATE? !!!!!
+
+ integer, parameter :: ATT_INT = 1
+ integer, parameter :: ATT_INTA = 2
+ integer, parameter :: ATT_REAL = 3
+ integer, parameter :: ATT_REALA = 4
+ integer, parameter :: ATT_TEXT = 5
+
+
+contains
+
+
+ subroutine mpas_add_att_int0d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ integer, intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ if (.not. associated(attList)) then
+ allocate(attList)
+ cursor => attList
+ else
+ cursor => attList
+ do while (associated(cursor % next))
+ cursor => cursor % next
+ end do
+ allocate(cursor % next)
+ cursor => cursor % next
+ end if
+
+ cursor % attType = ATT_INT
+ write(cursor % attName,'(a)') trim(attName)
+ cursor % attValueInt = attValue
+
+ end subroutine mpas_add_att_int0d
+
+
+ subroutine mpas_add_att_int1d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ if (.not. associated(attList)) then
+ allocate(attList)
+ cursor => attList
+ else
+ cursor => attList
+ do while (associated(cursor % next))
+ cursor => cursor % next
+ end do
+ allocate(cursor % next)
+ cursor => cursor % next
+ end if
+
+ cursor % attType = ATT_INTA
+ allocate(cursor % attValueIntA(size(attValue)))
+ write(cursor % attName,'(a)') trim(attName)
+ cursor % attValueIntA(:) = attValue(:)
+
+ end subroutine mpas_add_att_int1d
+
+
+ subroutine mpas_add_att_real0d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ if (.not. associated(attList)) then
+ allocate(attList)
+ cursor => attList
+ else
+ cursor => attList
+ do while (associated(cursor % next))
+ cursor => cursor % next
+ end do
+ allocate(cursor % next)
+ cursor => cursor % next
+ end if
+
+ cursor % attType = ATT_REAL
+ write(cursor % attName,'(a)') trim(attName)
+ cursor % attValueReal = attValue
+
+ end subroutine mpas_add_att_real0d
+
+
+ subroutine mpas_add_att_real1d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ if (.not. associated(attList)) then
+ allocate(attList)
+ cursor => attList
+ else
+ cursor => attList
+ do while (associated(cursor % next))
+ cursor => cursor % next
+ end do
+ allocate(cursor % next)
+ cursor => cursor % next
+ end if
+
+ cursor % attType = ATT_REALA
+ allocate(cursor % attValueRealA(size(attValue)))
+ write(cursor % attName,'(a)') trim(attName)
+ cursor % attValueRealA(:) = attValue(:)
+
+ end subroutine mpas_add_att_real1d
+
+
+ subroutine mpas_add_att_text(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ if (.not. associated(attList)) then
+ allocate(attList)
+ cursor => attList
+ else
+ cursor => attList
+ do while (associated(cursor % next))
+ cursor => cursor % next
+ end do
+ allocate(cursor % next)
+ cursor => cursor % next
+ end if
+
+ cursor % attType = ATT_TEXT
+ write(cursor % attName,'(a)') trim(attName)
+ write(cursor % attValueText,'(a)') trim(attValue)
+
+ end subroutine mpas_add_att_text
+
+
+ subroutine mpas_get_att_int0d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ integer, intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ if (cursor % attType /= ATT_INT) then
+ if (present(ierr)) ierr = 1 ! Wrong type
+ else
+ attValue = cursor % attValueInt
+ end if
+ return
+ end if
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_get_att_int0d
+
+
+ subroutine mpas_get_att_int1d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), pointer :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ if (cursor % attType /= ATT_INTA) then
+ if (present(ierr)) ierr = 1 ! Wrong type
+ else
+ allocate(attValue(size(cursor % attValueIntA)))
+ attValue(:) = cursor % attValueIntA(:)
+ end if
+ return
+ end if
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_get_att_int1d
+
+
+ subroutine mpas_get_att_real0d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ if (cursor % attType /= ATT_REAL) then
+ if (present(ierr)) ierr = 1 ! Wrong type
+ else
+ attValue = cursor % attValueReal
+ end if
+ return
+ end if
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_get_att_real0d
+
+
+ subroutine mpas_get_att_real1d(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), pointer :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ if (cursor % attType /= ATT_REALA) then
+ if (present(ierr)) ierr = 1 ! Wrong type
+ else
+ allocate(attValue(size(cursor % attValueRealA)))
+ attValue(:) = cursor % attValueRealA(:)
+ end if
+ return
+ end if
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_get_att_real1d
+
+
+ subroutine mpas_get_att_text(attList, attName, attValue, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ if (cursor % attType /= ATT_TEXT) then
+ if (present(ierr)) ierr = 1 ! Wrong type
+ else
+ write(attValue,'(a)') trim(cursor % attValueText)
+ end if
+ return
+ end if
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_get_att_text
+
+
+ subroutine mpas_remove_att(attList, attName, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ character (len=*), intent(in) :: attName
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor, cursor_prev
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+
+ ! Item is at the head of the list
+ if (trim(attName) == trim(cursor % attName)) then
+ attList => cursor % next
+ if (cursor % attType == ATT_REALA) then
+ deallocate(cursor % attValueRealA)
+ else if (cursor % attType == ATT_INTA) then
+ deallocate(cursor % attValueIntA)
+ end if
+ deallocate(cursor)
+ return
+ end if
+
+ cursor_prev => cursor
+ cursor => cursor % next
+ do while (associated(cursor))
+ if (trim(attName) == trim(cursor % attName)) then
+ cursor_prev % next => cursor % next
+
+ if (cursor % attType == ATT_REALA) then
+ deallocate(cursor % attValueRealA)
+ else if (cursor % attType == ATT_INTA) then
+ deallocate(cursor % attValueIntA)
+ end if
+ deallocate(cursor)
+
+ return
+ end if
+
+ cursor_prev => cursor
+ cursor => cursor % next
+ end do
+
+ if (present(ierr)) ierr = 1 ! Not found
+
+ end subroutine mpas_remove_att
+
+
+ subroutine mpas_deallocate_attlist(attList, ierr)
+
+ implicit none
+
+ type (att_list_type), pointer :: attList
+ integer, intent(out), optional :: ierr
+
+ type (att_list_type), pointer :: cursor
+
+ if (present(ierr)) ierr = 0
+
+ cursor => attList
+ do while (associated(cursor))
+ attList => attList % next
+ if (cursor % attType == ATT_REALA) then
+ deallocate(cursor % attValueRealA)
+ else if (cursor % attType == ATT_INTA) then
+ deallocate(cursor % attValueIntA)
+ end if
+ deallocate(cursor)
+ cursor => attList
+ end do
+
+ end subroutine mpas_deallocate_attlist
+
+end module mpas_attlist
Modified: branches/atmos_physics/src/framework/mpas_block_decomp.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_block_decomp.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_block_decomp.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -2,6 +2,7 @@
use mpas_dmpar
use mpas_hash
+ use mpas_sort
type graph
integer :: nVerticesTotal
@@ -12,115 +13,202 @@
integer, dimension(:,:), pointer :: adjacencyList
end type graph
+ integer :: total_blocks
+ logical :: explicitDecomp
+ integer, dimension(:), allocatable :: block_proc_list
+ integer, dimension(:), allocatable :: block_local_id_list
contains
+ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, local_cell_list, block_id, block_start, block_count)!{{{
- subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, local_cell_list)
-
use mpas_configure
implicit none
- type (dm_info), intent(in) :: dminfo
- type (graph), intent(in) :: partial_global_graph_info
- integer, dimension(:), pointer :: local_cell_list
+ type (dm_info), intent(in) :: dminfo !< Input: domain information
+ type (graph), intent(in) :: partial_global_graph_info !< Input: Global graph information
+ integer, dimension(:), pointer :: local_cell_list !< Output: list of cells this processor owns, ordered by block
+ integer, dimension(:), pointer :: block_id !< Output: list of global block id's this processor owns
+ integer, dimension(:), pointer :: block_start !< Output: offset in local_cell_list for this blocks list of cells
+ integer, dimension(:), pointer :: block_count !< Output: number of cells in blocks
+ integer, dimension(:), pointer :: global_block_list
integer, dimension(:), pointer :: global_cell_list
integer, dimension(:), pointer :: global_start
- integer :: i, j, owner, iunit, istatus
+ integer, dimension(:), allocatable :: local_block_list
+ integer, dimension(:,:), allocatable :: sorted_local_cell_list
+
+ integer :: i, j, global_block_id, local_block_id, owning_proc, iunit, istatus
+ integer :: blocks_per_proc
integer, dimension(:), pointer :: local_nvertices
- character (len=256) :: filename
+ character (len=StrKIND) :: filename
- if (dminfo % nprocs > 1) then
+ if(config_number_of_blocks == 0) then
+ total_blocks = dminfo % nProcs
+ else
+ total_blocks = config_number_of_blocks
+ end if
- allocate(local_nvertices(dminfo % nprocs))
- allocate(global_start(dminfo % nprocs))
- allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+ explicitDecomp = config_explicit_proc_decomp
- if (dminfo % my_proc_id == IO_NODE) then
+ call mpas_build_block_proc_list(dminfo)
+ call mpas_get_blocks_per_proc(dminfo, dminfo % my_proc_id, blocks_per_proc)
- iunit = 50 + dminfo % my_proc_id
- if (dminfo % nprocs < 10) then
- write(filename,'(a,i1)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 100) then
- write(filename,'(a,i2)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 1000) then
- write(filename,'(a,i3)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 10000) then
- write(filename,'(a,i4)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 100000) then
- write(filename,'(a,i5)') trim(config_decomp_file_prefix), dminfo % nprocs
- end if
-
- open(unit=iunit, file=trim(filename), form='formatted', status='old', iostat=istatus)
-
- if (istatus /= 0) then
- write(0,*) 'Could not open block decomposition file for ',dminfo % nprocs,' tasks.'
- write(0,*) 'Filename: ',trim(filename)
- call mpas_dmpar_abort(dminfo)
- end if
-
- local_nvertices(:) = 0
- do i=1,partial_global_graph_info % nVerticesTotal
- read(unit=iunit, fmt=*) owner
- local_nvertices(owner+1) = local_nvertices(owner+1) + 1
- end do
-
-! allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+ if(total_blocks > 1) then
+ allocate(local_nvertices(dminfo % nprocs))
+ allocate(global_start(dminfo % nprocs))
+ allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+ allocate(global_block_list(partial_global_graph_info % nVerticesTotal))
+
+ if (dminfo % my_proc_id == IO_NODE) then
+
+ iunit = 50 + dminfo % my_proc_id
+ if (total_blocks < 10) then
+ write(filename,'(a,i1)') trim(config_block_decomp_file_prefix), total_blocks
+ else if (total_blocks < 100) then
+ write(filename,'(a,i2)') trim(config_block_decomp_file_prefix), total_blocks
+ else if (total_blocks < 1000) then
+ write(filename,'(a,i3)') trim(config_block_decomp_file_prefix), total_blocks
+ else if (total_blocks < 10000) then
+ write(filename,'(a,i4)') trim(config_block_decomp_file_prefix), total_blocks
+ else if (total_blocks < 100000) then
+ write(filename,'(a,i5)') trim(config_block_decomp_file_prefix), total_blocks
+ end if
+
+ open(unit=iunit, file=trim(filename), form='formatted', status='old', iostat=istatus)
+
+ if (istatus /= 0) then
+ write(0,*) 'Could not open block decomposition file for ',total_blocks,' blocks.'
+ write(0,*) 'Filename: ',trim(filename)
+ call mpas_dmpar_abort(dminfo)
+ end if
+
+ local_nvertices(:) = 0
+ do i=1,partial_global_graph_info % nVerticesTotal
+ read(unit=iunit, fmt=*) global_block_id
+ call mpas_get_owning_proc(dminfo, global_block_id, owning_proc)
+ local_nvertices(owning_proc+1) = local_nvertices(owning_proc+1) + 1
+ end do
+
+ allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+
+ global_start(1) = 1
+ do i=2,dminfo % nprocs
+ global_start(i) = global_start(i-1) + local_nvertices(i-1)
+ end do
+
+ rewind(unit=iunit)
+
+ do i=1,partial_global_graph_info % nVerticesTotal
+ read(unit=iunit, fmt=*) global_block_id
+ call mpas_get_owning_proc(dminfo, global_block_id, owning_proc)
+
+ global_cell_list(global_start(owning_proc+1)) = i
+ global_block_list(global_start(owning_proc+1)) = global_block_id
+ global_start(owning_proc+1) = global_start(owning_proc+1) + 1
+ end do
+
+ global_start(1) = 0
+ do i=2,dminfo % nprocs
+ global_start(i) = global_start(i-1) + local_nvertices(i-1)
+ end do
- global_start(1) = 1
- do i=2,dminfo % nprocs
- global_start(i) = global_start(i-1) + local_nvertices(i-1)
- end do
-
- rewind(unit=iunit)
-
- do i=1,partial_global_graph_info % nVerticesTotal
- read(unit=iunit, fmt=*) owner
- global_cell_list(global_start(owner+1)) = i
- global_start(owner+1) = global_start(owner+1) + 1
- end do
+ close(unit=iunit)
- global_start(1) = 0
- do i=2,dminfo % nprocs
- global_start(i) = global_start(i-1) + local_nvertices(i-1)
- end do
+ call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
+ allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+ allocate(local_block_list(local_nvertices(dminfo % my_proc_id + 1)))
- close(unit=iunit)
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_cell_list, local_cell_list)
- call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
- allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_block_list, local_block_list)
- call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
- global_start, local_nvertices, global_cell_list, local_cell_list)
+ else
- else
+ call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
+ allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+ allocate(local_block_list(local_nvertices(dminfo % my_proc_id + 1)))
+
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_cell_list, local_cell_list)
- call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
- allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_block_list, local_block_list)
+ end if
+
+ allocate(sorted_local_cell_list(2, local_nvertices(dminfo % my_proc_id + 1)))
+ allocate(block_id(blocks_per_proc))
+ allocate(block_start(blocks_per_proc))
+ allocate(block_count(blocks_per_proc))
- call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
- global_start, local_nvertices, global_cell_list, local_cell_list)
+ do i = 1, blocks_per_proc
+ block_start = 0
+ block_count = 0
+ end do
- end if
+ do i = 1,local_nvertices(dminfo % my_proc_id +1)
+ call mpas_get_local_block_id(dminfo, local_block_list(i), local_block_id)
+
+ block_id(local_block_id+1) = local_block_list(i)
+
+ sorted_local_cell_list(1, i) = local_block_list(i)
+ sorted_local_cell_list(2, i) = local_cell_list(i)
+
+ block_count(local_block_id+1) = block_count(local_block_id+1) + 1
+ end do
- deallocate(local_nvertices)
- deallocate(global_start)
- deallocate(global_cell_list)
+ call quicksort(local_nvertices(dminfo % my_proc_id + 1), sorted_local_cell_list)
+
+ do i = 1, local_nvertices(dminfo % my_proc_id+1)
+ local_cell_list(i) = sorted_local_cell_list(2, i)
+ end do
+
+ do i = 2,blocks_per_proc
+ block_start(i) = block_start(i-1) + block_count(i-1)
+ end do
+
+ !dwj 01/31/12 debugging multiple blocks
+! do i=1,local_nvertices(dminfo % my_proc_id +1)
+! call mpas_get_local_block_id(dminfo, sorted_local_cell_list(1, i), local_block_id)
+! write(*,*) sorted_local_cell_list(1, i), local_block_id, sorted_local_cell_list(2,i)
+! end do
+
+ deallocate(sorted_local_cell_list)
+ deallocate(local_block_list)
+ deallocate(local_nvertices)
+ deallocate(global_start)
+ deallocate(global_cell_list)
+ deallocate(global_block_list)
else
- allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
- do i=1,size(local_cell_list)
- local_cell_list(i) = i
- end do
- endif
+ allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
+ allocate(block_id(1))
+ allocate(block_start(1))
+ allocate(block_count(1))
+ block_id(1) = 0
+ block_start(1) = 0
+ block_count(1) = size(local_cell_list)
+ do i=1,size(local_cell_list)
+ local_cell_list(i) = i
+ end do
+ end if
- end subroutine mpas_block_decomp_cells_for_proc
+ !dwj 01/31/12 debugging multiple blocks
+! write(*,*) 'Blocks per proc = ', blocks_per_proc, 'total_blocks = ', total_blocks
+! do i=1,blocks_per_proc
+! write(*,*) block_id(i), block_start(i), block_count(i)
+! end do
- subroutine mpas_block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)
+! call mpas_dmpar_abort(dminfo)
+ end subroutine mpas_block_decomp_cells_for_proc!}}}
+
+ subroutine mpas_block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)!{{{
+
implicit none
integer, intent(in) :: nCells, maxCells, nEdges
@@ -172,11 +260,10 @@
call mpas_hash_destroy(h)
- end subroutine mpas_block_decomp_partitioned_edge_list
+ end subroutine mpas_block_decomp_partitioned_edge_list!}}}
+ subroutine mpas_block_decomp_all_edges_in_block(maxEdges, nCells, nEdgesOnCell, edgesOnCell, nEdges, edgeList)!{{{
- subroutine mpas_block_decomp_all_edges_in_block(maxEdges, nCells, nEdgesOnCell, edgesOnCell, nEdges, edgeList)
-
implicit none
integer, intent(in) :: maxEdges, nCells
@@ -226,11 +313,10 @@
'Listed fewer edges than expected.'
end if
- end subroutine mpas_block_decomp_all_edges_in_block
+ end subroutine mpas_block_decomp_all_edges_in_block!}}}
+ subroutine mpas_block_decomp_add_halo(dminfo, local_graph_info, local_graph_with_halo)!{{{
- subroutine mpas_block_decomp_add_halo(dminfo, local_graph_info, local_graph_with_halo)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -301,6 +387,132 @@
call mpas_hash_destroy(h)
- end subroutine mpas_block_decomp_add_halo
+ end subroutine mpas_block_decomp_add_halo!}}}
+ subroutine mpas_get_blocks_per_proc(dminfo, proc_number, blocks_per_proc)!{{{
+ type(dm_info), intent(in) :: dminfo !< Input: Domain Information
+ integer, intent(in) :: proc_number !< Input: Processor number
+ integer, intent(out) :: blocks_per_proc !< Output: Number of blocks proc_number computes on
+
+ integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ blocks_per_proc = blocks_per_proc_min
+
+ if(proc_number .le. remaining_blocks) then
+ block_per_proc = blocks_per_proc + 1
+ endif
+
+ end subroutine mpas_get_blocks_per_proc!}}}
+
+ subroutine mpas_get_local_block_id(dminfo, global_block_number, local_block_number)!{{{
+ type(dm_info), intent(in) :: dminfo !< Input: Domain Information
+ integer, intent(in) :: global_block_number !< Input: Global block id from 0 to config_number_of_blocks-1
+ integer, intent(out) :: local_block_number !< Output: Local block id on owning processor from 0 to blocks_per_proc
+
+ integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+
+ if(.not.explicitDecomp) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ if(global_block_number > even_blocks) then
+ local_block_number = blocks_per_proc_min
+ else
+ local_block_number = mod(global_block_number, blocks_per_proc_min)
+ end if
+ else
+ local_block_number = block_local_id_list(global_block_number+1)
+ end if
+ end subroutine mpas_get_local_block_id!}}}
+
+ subroutine mpas_get_owning_proc(dminfo, global_block_number, owning_proc)!{{{
+ type(dm_info), intent(in) :: dminfo !< Input: Domain Information
+ integer, intent(in) :: global_block_number !< Input: Global block id from 0 to config_number_of_blocks-1
+ integer, intent(out) :: owning_proc !< Output: Processor number that owns block global_block_number
+
+ integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+
+ if(.not.explicitDecomp) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ if(global_block_number > even_blocks) then
+ owning_proc = global_block_number - even_blocks
+ else
+ owning_proc = global_block_number / blocks_per_proc_min
+ end if
+ else
+ owning_proc = block_proc_list(global_block_number+1)
+ end if
+ end subroutine mpas_get_owning_proc!}}}
+
+ subroutine mpas_build_block_proc_list(dminfo)!{{{
+
+ use mpas_configure
+
+ implicit none
+
+ type(dm_info), intent(in) :: dminfo
+
+ integer :: iounit, istatus, i, owning_proc
+ character (len=StrKIND) :: filename
+
+ integer, dimension(:), allocatable :: block_counter
+
+ if(.not.explicitDecomp) return
+
+ allocate(block_proc_list(total_blocks))
+ allocate(block_local_id_list(total_blocks))
+
+ if (dminfo % my_proc_id == IO_NODE) then
+ allocate(block_counter(dminfo % nProcs))
+ block_counter = 0
+
+ iounit = 51 + dminfo % my_proc_id
+ if (dminfo % nProcs < 10) then
+ write(filename,'(a,i1)') trim(config_proc_decomp_file_prefix), dminfo % nProcs
+ else if (dminfo % nProcs < 100) then
+ write(filename,'(a,i2)') trim(config_proc_decomp_file_prefix), dminfo % nProcs
+ else if (dminfo % nProcs < 1000) then
+ write(filename,'(a,i3)') trim(config_proc_decomp_file_prefix), dminfo % nProcs
+ else if (dminfo % nProcs < 10000) then
+ write(filename,'(a,i4)') trim(config_proc_decomp_file_prefix), dminfo % nProcs
+ else if (dminfo % nProcs < 100000) then
+ write(filename,'(a,i5)') trim(config_proc_decomp_file_prefix), dminfo % nProcs
+ end if
+
+ open(unit=iounit, file=trim(filename), form='formatted', status='old', iostat=istatus)
+
+ do i=1,total_blocks
+ read(unit=iounit, fmt=*) owning_proc
+
+ block_proc_list(i) = owning_proc
+ block_local_id_list(i) = block_counter(owning_proc+1)
+
+ block_counter(owning_proc+1) = block_counter(owning_proc+1) + 1
+ end do
+
+ close(unit=iounit)
+ deallocate(block_counter)
+ call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_proc_list)
+ call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_local_id_list)
+ else
+ call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_proc_list)
+ call mpas_dmpar_bcast_ints(dminfo, total_blocks, block_local_id_list)
+ endif
+
+ end subroutine mpas_build_block_proc_list!}}}
+
+ subroutine mpas_finish_block_proc_list()!{{{
+ if(.not.explicitDecomp) return
+ deallocate(block_proc_list)
+ deallocate(block_local_id_list)
+ end subroutine mpas_finish_block_proc_list!}}}
+
end module mpas_block_decomp
Modified: branches/atmos_physics/src/framework/mpas_dmpar.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_dmpar.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_dmpar.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,6 +1,7 @@
module mpas_dmpar
- use mpas_kind_types
+ use mpas_dmpar_types
+ use mpas_grid_types
use mpas_sort
#ifdef _MPI
@@ -18,23 +19,6 @@
integer, parameter :: BUFSIZE = 6000
- type dm_info
- integer :: nprocs, my_proc_id, comm, info
- logical :: using_external_comm
- end type dm_info
-
-
- type exchange_list
- integer :: procID
- integer :: nlist
- integer, dimension(:), pointer :: list
- type (exchange_list), pointer :: next
- real (kind=RKIND), dimension(:), pointer :: rbuffer
- integer, dimension(:), pointer :: ibuffer
- integer :: reqID
- end type exchange_list
-
-
interface mpas_dmpar_alltoall_field
module procedure mpas_dmpar_alltoall_field1d_integer
module procedure mpas_dmpar_alltoall_field2d_integer
@@ -43,7 +27,30 @@
module procedure mpas_dmpar_alltoall_field3d_real
end interface
+ private :: mpas_dmpar_alltoall_field1d_integer
+ private :: mpas_dmpar_alltoall_field2d_integer
+ private :: mpas_dmpar_alltoall_field1d_real
+ private :: mpas_dmpar_alltoall_field2d_real
+ private :: mpas_dmpar_alltoall_field3d_real
+
+ interface mpas_dmpar_exch_halo_field
+ module procedure mpas_dmpar_exch_halo_field1d_integer
+ module procedure mpas_dmpar_exch_halo_field2d_integer
+ module procedure mpas_dmpar_exch_halo_field3d_integer
+ module procedure mpas_dmpar_exch_halo_field1d_real
+ module procedure mpas_dmpar_exch_halo_field2d_real
+ module procedure mpas_dmpar_exch_halo_field3d_real
+ end interface
+
+ private :: mpas_dmpar_exch_halo_field1d_integer
+ private :: mpas_dmpar_exch_halo_field2d_integer
+ private :: mpas_dmpar_exch_halo_field3d_integer
+ private :: mpas_dmpar_exch_halo_field1d_real
+ private :: mpas_dmpar_exch_halo_field2d_real
+ private :: mpas_dmpar_exch_halo_field3d_real
+
+
contains
@@ -565,7 +572,7 @@
subroutine mpas_dmpar_get_owner_list(dminfo, &
nOwnedList, nNeededList, &
ownedList, neededList, &
- sendList, recvList)
+ sendList, recvList, inOffset)
implicit none
@@ -575,9 +582,10 @@
integer, dimension(nNeededList), intent(in) :: neededList
type (exchange_list), pointer :: sendList
type (exchange_list), pointer :: recvList
+ integer, optional :: inOffset
integer :: i, j, k, kk
- integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc
+ integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc, offset
integer :: numToSend, numToRecv
integer, dimension(nOwnedList) :: recipientList
integer, dimension(2,nOwnedList) :: ownedListSorted
@@ -585,6 +593,7 @@
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: mpi_ierr, mpi_rreq, mpi_sreq
+
#ifdef _MPI
allocate(sendList)
allocate(recvList)
@@ -593,6 +602,11 @@
sendListPtr => sendList
recvListPtr => recvList
+ offset = 0
+ if(present(inOffset)) then
+ offset = inOffset
+ end if
+
do i=1,nOwnedList
ownedListSorted(1,i) = ownedList(i)
ownedListSorted(2,i) = i
@@ -635,6 +649,7 @@
allocate(sendListPtr % next)
sendListPtr => sendListPtr % next
sendListPtr % procID = currentProc
+ sendListPtr % blockID = currentProc ! Currently, we have just one block per task, so blockID = procID
sendListPtr % nlist = numToSend
allocate(sendListPtr % list(numToSend))
nullify(sendListPtr % next)
@@ -668,13 +683,14 @@
allocate(recvListPtr % next)
recvListPtr => recvListPtr % next
recvListPtr % procID = i
+ recvListPtr % blockID = i ! Currently, we have just one block per task, so blockID = procID
recvListPtr % nlist = numToRecv
allocate(recvListPtr % list(numToRecv))
nullify(recvListPtr % next)
kk = 1
do j=1,nNeededList
if (ownerListIn(j) == -i) then
- recvListPtr % list(kk) = j
+ recvListPtr % list(kk) = j + offset
kk = kk + 1
end if
end do
@@ -696,6 +712,7 @@
#else
allocate(recvList)
recvList % procID = dminfo % my_proc_id
+ recvList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
recvList % nlist = nNeededList
allocate(recvList % list(nNeededList))
nullify(recvList % next)
@@ -705,6 +722,7 @@
allocate(sendList)
sendList % procID = dminfo % my_proc_id
+ sendList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
sendList % nlist = nOwnedList
allocate(sendList % list(nOwnedList))
nullify(sendList % next)
@@ -1467,21 +1485,27 @@
end subroutine mpas_unpack_recv_buf3d_integer
- subroutine mpas_dmpar_exch_halo_field1d_integer(dminfo, array, dim1, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- integer, dimension(*), intent(inout) :: array
+ type (field1DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
+ integer, dimension(size(field % dimSizes)) :: dims
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
@@ -1496,7 +1520,7 @@
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf1d_integer(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
@@ -1507,7 +1531,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ call mpas_unpack_recv_buf1d_integer(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
recvListPtr => recvListPtr % next
@@ -1522,31 +1546,40 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field1d_integer
- subroutine mpas_dmpar_exch_halo_field2d_integer(dminfo, array, dim1, dim2, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- integer, dimension(dim1,*), intent(inout) :: array
+ type (field2DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d2
+ integer, dimension(size(field % dimSizes)) :: dims
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
+ d2 = dims(1) * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d2))
call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1557,9 +1590,9 @@
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
+ d2 = dims(1) * sendListPtr % nlist
allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf2d_integer(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
@@ -1570,8 +1603,8 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ d2 = dims(1) * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_integer(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
recvListPtr => recvListPtr % next
@@ -1586,31 +1619,40 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field2d_integer
- subroutine mpas_dmpar_exch_halo_field3d_integer(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- integer, dimension(dim1,dim2,*), intent(inout) :: array
+ type (field3DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d3
+ integer, dimension(size(field % dimSizes)) :: dims
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d3))
call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1621,9 +1663,9 @@
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
+ d3 = dims(1) * dims(2) * sendListPtr % nlist
allocate(sendListPtr % ibuffer(d3))
- call mpas_pack_send_buf3d_integer(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ call mpas_pack_send_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
@@ -1635,8 +1677,8 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_integer(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
@@ -1652,6 +1694,9 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field3d_integer
@@ -1747,21 +1792,27 @@
end subroutine mpas_unpack_recv_buf3d_real
- subroutine mpas_dmpar_exch_halo_field1d_real(dminfo, array, dim1, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- real (kind=RKIND), dimension(*), intent(inout) :: array
+ type (field1DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
+ integer, dimension(size(field % dimSizes)) :: dims
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
@@ -1771,12 +1822,12 @@
end if
recvListPtr => recvListPtr % next
end do
-
+
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf1d_real(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
@@ -1787,7 +1838,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ call mpas_unpack_recv_buf1d_real(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
recvListPtr => recvListPtr % next
@@ -1802,44 +1853,54 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field1d_real
- subroutine mpas_dmpar_exch_halo_field2d_real(dminfo, array, dim1, dim2, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: array
+ type (field2DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d2
+ integer, dimension(size(field % dimSizes)) :: dims
+
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
+ d2 = dims(1) * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d2))
call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
end do
-
+
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
+ d2 = dims(1) * sendListPtr % nlist
allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf2d_real(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
@@ -1850,13 +1911,13 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ d2 = dims(1) * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_real(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
recvListPtr => recvListPtr % next
end do
-
+
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
@@ -1866,31 +1927,40 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field2d_real
- subroutine mpas_dmpar_exch_halo_field3d_real(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayers)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: array
+ type (field3DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloLayers
+
+ type (dm_info) :: dminfo
type (exchange_list), pointer :: sendList, recvList
-
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d3
+ integer, dimension(size(field % dimSizes)) :: dims
#ifdef _MPI
+ dminfo = field % block % domain % dminfo
+ dims = field % dimSizes
+
+ call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+
recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d3))
call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1901,9 +1971,9 @@
sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
+ d3 = dims(1) * dims(2) * sendListPtr % nlist
allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ call mpas_pack_send_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
@@ -1915,8 +1985,8 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
@@ -1932,9 +2002,194 @@
sendListPtr => sendListPtr % next
end do
+ call mpas_destroy_exchange_list(sendList)
+ call mpas_destroy_exchange_list(recvList)
+
#endif
end subroutine mpas_dmpar_exch_halo_field3d_real
+ subroutine mpas_aggregate_exchange_lists(myProcID, haloLayersIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)
+
+ implicit none
+
+ !--- in variables ---!
+ integer, intent(in) :: myProcID
+ integer, dimension(:), intent(in), target, optional :: haloLayersIn
+ type (exchange_list), dimension(:), pointer :: sendListArray, recvListArray
+
+ !--- out variabls ---!
+ type (exchange_list), pointer :: aggregateSendList, aggregateRecvList
+
+ !--- local variables ---!
+ integer :: i, j
+ integer, dimension(:), pointer :: haloLayers
+ type (exchange_list), pointer :: inListPtr, aggListPtr
+ logical :: blockAdded
+ logical :: listInitilized
+
+ if (present(haloLayersIn)) then
+ haloLayers => haloLayersIn
+ else
+ allocate(haloLayers(size(sendListArray)))
+ do i=1, size(haloLayers)
+ haloLayers(i) = i
+ end do
+ end if
+
+ nullify(aggregateSendList)
+ nullify(aggregateRecvList)
+
+ do i=1, size(haloLayers)
+
+ inListPtr => sendListArray(haloLayers(i)) % next
+ do while(associated(inListPtr))
+
+ blockAdded = .false.
+ aggListPtr => aggregateSendList
+
+ do while(associated(aggListPtr))
+ if(inListPtr % blockID == aggListPtr % blockID) then
+ if(inListPtr % procID .ne. myProcID) then
+ call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
+ end if
+ blockAdded = .true.
+ exit
+ end if
+ aggListPtr => aggListPtr % next
+ end do
+
+ if(.not. blockAdded) then
+
+ if (.not. associated(aggregateSendList)) then
+ allocate(aggregateSendList)
+ nullify(aggregateSendList % next)
+ aggListPtr => aggregateSendList
+ else
+ aggListPtr => aggregateSendList
+ do while(associated(aggListPtr % next))
+ aggListPtr => aggListPtr % next
+ end do
+ allocate(aggListPtr % next)
+ aggListPtr => aggListPtr % next
+ end if
+
+ nullify(aggListPtr % next)
+ aggListPtr % procID = inListPtr % procID
+ aggListPtr % blockID = inListPtr % blockID
+ aggListPtr % nlist = inListPtr % nlist
+ allocate(aggListPtr % list(inListPtr % nlist))
+ aggListPtr % list = inListPtr % list
+ aggListPtr % reqID = inListPtr % reqID
+
+ end if
+
+ inListPtr => inListPtr % next
+ end do
+
+
+ inListPtr => recvListArray(haloLayers(i)) % next
+ do while(associated(inListPtr))
+
+ blockAdded = .false.
+ aggListPtr => aggregateRecvList
+ do while(associated(aggListPtr))
+ if(inListPtr % blockID == aggListPtr % blockID) then
+ if(inListPtr % procID .ne. myProcID) then
+ call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
+ end if
+ blockAdded = .true.
+ exit
+ end if
+ aggListPtr => aggListPtr % next
+ end do
+
+ if(.not. blockAdded) then
+
+ if (.not. associated(aggregateRecvList)) then
+ allocate(aggregateRecvList)
+ nullify(aggregateRecvList % next)
+ aggListPtr => aggregateRecvList
+ else
+ aggListPtr => aggregateRecvList
+ do while(associated(aggListPtr % next))
+ aggListPtr => aggListPtr % next
+ end do
+
+ allocate(aggListPtr % next)
+ aggListPtr => aggListPtr % next
+ nullify(aggListPtr % next)
+ end if
+
+ aggListPtr % procID = inListPtr % procID
+ aggListPtr % blockID = inListPtr % blockID
+ aggListPtr % nlist = inListPtr % nlist
+ allocate(aggListPtr % list(inListPtr % nlist))
+ aggListPtr % list = inListPtr % list
+ aggListPtr % reqID = inListPtr % reqID
+
+ end if
+
+ inListPtr => inListPtr % next
+ end do
+
+ end do
+
+ if (.not. present(haloLayersIn)) then
+ deallocate(haloLayers)
+ end if
+
+ end subroutine mpas_aggregate_exchange_lists
+
+
+ subroutine mpas_destroy_exchange_list(exchangeList)
+
+ implicit none
+
+ !--- in variables ---!
+ type (exchange_list), pointer :: exchangeList
+
+ !--- local variables ---!
+ type (exchange_list), pointer :: exchangeListPtr
+
+ do while (associated(exchangeList))
+ exchangeListPtr => exchangeList % next
+
+ deallocate(exchangeList % list)
+ deallocate(exchangeList)
+ exchangeList => exchangeListPtr
+ end do
+
+ end subroutine mpas_destroy_exchange_list
+
+
+ subroutine mpas_merge_integer_arrays(mergeArray, nMergeArray, dataToAppend)
+
+ implicit none
+
+ !--- inout variables ---!
+ integer, dimension(:), pointer :: mergeArray
+ integer, intent(inout) :: nMergeArray
+
+ !--- in variables ---!
+ integer, dimension(:), pointer :: dataToAppend
+
+ !--- local variables ---!
+ integer :: nDataToAppend, newSize
+ integer, dimension(nMergeArray) :: mergeArrayCopy
+
+
+ nDataToAppend = size(dataToAppend)
+ newSize = nMergeArray + nDataToAppend
+ mergeArrayCopy = mergeArray
+ deallocate(mergeArray)
+ allocate(mergeArray(newSize))
+ mergeArray(1:nMergeArray) = mergeArrayCopy
+ mergeArray(nMergeArray+1:newSize) = dataToAppend
+ nMergeArray = newSize
+
+ end subroutine mpas_merge_integer_arrays
+
+
end module mpas_dmpar
Copied: branches/atmos_physics/src/framework/mpas_dmpar_types.F (from rev 1863, trunk/mpas/src/framework/mpas_dmpar_types.F)
===================================================================
--- branches/atmos_physics/src/framework/mpas_dmpar_types.F         (rev 0)
+++ branches/atmos_physics/src/framework/mpas_dmpar_types.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,23 @@
+module mpas_dmpar_types
+
+ use mpas_kind_types
+
+ type dm_info
+ integer :: nprocs, my_proc_id, comm, info
+ logical :: using_external_comm
+ end type dm_info
+
+
+ type exchange_list
+ integer :: procID
+ integer :: blockID
+ integer :: nlist
+ integer, dimension(:), pointer :: list
+ type (exchange_list), pointer :: next
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
+
+ end type exchange_list
+
+end module mpas_dmpar_types
Modified: branches/atmos_physics/src/framework/mpas_framework.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_framework.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_framework.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -7,6 +7,7 @@
use mpas_configure
use mpas_timer
use mpas_timekeeping
+ use mpas_io
contains
@@ -19,6 +20,9 @@
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
+ integer :: pio_num_iotasks
+ integer :: pio_stride
+
allocate(dminfo)
call mpas_dmpar_init(dminfo)
@@ -28,6 +32,13 @@
call mpas_timekeeping_init(config_calendar_type)
+ pio_num_iotasks = config_pio_num_iotasks
+ pio_stride = config_pio_stride
+ if (pio_num_iotasks == 0) then
+ pio_num_iotasks = domain % dminfo % nprocs
+ end if
+ call MPAS_io_init(dminfo, pio_num_iotasks, pio_stride)
+
end subroutine mpas_framework_init
@@ -38,6 +49,8 @@
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
+ call MPAS_io_finalize()
+
call mpas_deallocate_domain(domain)
call mpas_dmpar_finalize(dminfo)
Modified: branches/atmos_physics/src/framework/mpas_grid_types.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_grid_types.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_grid_types.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -1,13 +1,17 @@
module mpas_grid_types
- use mpas_dmpar
+ use mpas_kind_types
+ use mpas_dmpar_types
+ use mpas_attlist
integer, parameter :: nTimeLevs = 2
! Derived type describing info for doing I/O specific to a field
type io_info
- character (len=1024) :: fieldName
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND) :: units
+ character (len=StrKIND) :: description
integer, dimension(4) :: start
integer, dimension(4) :: count
logical :: input
@@ -19,71 +23,293 @@
! Derived type for storing fields
type field3DReal
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
real (kind=RKIND), dimension(:,:,:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(3) :: dimNames
+ integer, dimension(3) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field3DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field3DReal
! Derived type for storing fields
type field2DReal
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
real (kind=RKIND), dimension(:,:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(2) :: dimNames
+ integer, dimension(2) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field2DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field2DReal
! Derived type for storing fields
type field1DReal
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
real (kind=RKIND), dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(1) :: dimNames
+ integer, dimension(1) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field1DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field1DReal
! Derived type for storing fields
type field0DReal
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
real (kind=RKIND) :: scalar
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field0DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field0DReal
! Derived type for storing fields
+ type field3DInteger
+
+ ! Back-pointer to the containing block
+ type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
+ integer, dimension(:,:,:), pointer :: array
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(3) :: dimNames
+ integer, dimension(3) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field3DInteger), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
+ end type field3DInteger
+
+
+ ! Derived type for storing fields
type field2DInteger
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
integer, dimension(:,:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(2) :: dimNames
+ integer, dimension(2) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field2DInteger), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field2DInteger
! Derived type for storing fields
type field1DInteger
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
integer, dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(1) :: dimNames
+ integer, dimension(1) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field1DInteger), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field1DInteger
! Derived type for storing fields
+ type field0DInteger
+
+ ! Back-pointer to the containing block
+ type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
+ integer :: scalar
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field0DInteger), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
+ end type field0DInteger
+
+
+ ! Derived type for storing fields
type field1DChar
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
- character (len=64), dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
+
+ ! Raw array holding field data on this block
+ character (len=StrKIND), dimension(:), pointer :: array
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(1) :: dimNames
+ integer, dimension(1) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field1DChar), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field1DChar
! Derived type for storing fields
type field0DChar
+
+ ! Back-pointer to the containing block
type (block_type), pointer :: block
- character (len=64) :: scalar
- type (io_info), pointer :: ioinfo
+
+ ! Raw array holding field data on this block
+ character (len=StrKIND) :: scalar
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field0DChar), pointer :: prev, next
+
+ ! Halo communication lists
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
end type field0DChar
! Derived type for storing grid meta-data
type mesh_type
+ type (block_type), pointer :: block
+
#include "field_dimensions.inc"
logical :: on_a_sphere
@@ -99,12 +325,17 @@
! Type for storing (possibly architecture specific) information concerning to parallelism
type parallel_info
- type (exchange_list), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
- type (exchange_list), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
- type (exchange_list), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
- type (exchange_list), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
- type (exchange_list), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
- type (exchange_list), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
+ type (exchange_list), dimension(:), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
+ type (exchange_list), dimension(:), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
+ type (exchange_list), dimension(:), pointer :: cellsToCopy ! List of types describing which cells to copy from other blocks
+
+ type (exchange_list), dimension(:), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
+ type (exchange_list), dimension(:), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
+ type (exchange_list), dimension(:), pointer :: edgesToCopy ! List of types describing which edges to copy from other blocks
+
+ type (exchange_list), dimension(:), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
+ type (exchange_list), dimension(:), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
+ type (exchange_list), dimension(:), pointer :: verticesToCopy ! List of types describing which vertices to copy from other blocks
end type parallel_info
@@ -113,6 +344,9 @@
#include "block_group_members.inc"
+ integer :: blockID ! Unique global ID number for this block
+ integer :: localBlockID ! Unique local ID number for this block
+
type (domain_type), pointer :: domain
type (parallel_info), pointer :: parinfo
@@ -147,7 +381,7 @@
end subroutine mpas_allocate_domain
- subroutine mpas_allocate_block(b, dom, &
+ subroutine mpas_allocate_block(b, dom, blockID, &
#include "dim_dummy_args.inc"
)
@@ -155,15 +389,33 @@
type (block_type), pointer :: b
type (domain_type), pointer :: dom
+ integer, intent(in) :: blockID
#include "dim_dummy_decls.inc"
+
+ integer, parameter :: nHaloLayers = 2
+
integer :: i
+ b % blockID = blockID
+
nullify(b % prev)
nullify(b % next)
allocate(b % parinfo)
+ allocate(b % parinfo % cellsToSend(nHaloLayers))
+ allocate(b % parinfo % cellsToRecv(nHaloLayers))
+ allocate(b % parinfo % cellsToCopy(nHaloLayers))
+
+ allocate(b % parinfo % edgesToSend(nHaloLayers + 1)) ! first index is owned-cell edges
+ allocate(b % parinfo % edgesToRecv(nHaloLayers + 1)) ! first index is owned-cell edges
+ allocate(b % parinfo % edgesToCopy(nHaloLayers + 1)) ! first index is owned-cell edges
+
+ allocate(b % parinfo % verticesToSend(nHaloLayers + 1)) ! first index is owned-cell vertices
+ allocate(b % parinfo % verticesToRecv(nHaloLayers + 1)) ! first index is owned-cell vertices
+ allocate(b % parinfo % verticesToCopy(nHaloLayers + 1)) ! first index is owned-cell vertices
+
b % domain => dom
#include "block_allocs.inc"
@@ -201,6 +453,21 @@
integer :: i
+ ! BUG: It seems like we should be deallocating the exchange lists before we
+ ! deallocate the array of head pointers and the parinfo type...
+
+ deallocate(b % parinfo % cellsToSend)
+ deallocate(b % parinfo % cellsToRecv)
+ deallocate(b % parinfo % cellsToCopy)
+
+ deallocate(b % parinfo % edgesToSend)
+ deallocate(b % parinfo % edgesToRecv)
+ deallocate(b % parinfo % edgesToCopy)
+
+ deallocate(b % parinfo % verticesToSend)
+ deallocate(b % parinfo % verticesToRecv)
+ deallocate(b % parinfo % verticesToCopy)
+
deallocate(b % parinfo)
#include "block_deallocs.inc"
@@ -216,4 +483,8 @@
#include "group_shift_level_routines.inc"
+
+#include "field_links.inc"
+
+
end module mpas_grid_types
Copied: branches/atmos_physics/src/framework/mpas_io.F (from rev 1863, trunk/mpas/src/framework/mpas_io.F)
===================================================================
--- branches/atmos_physics/src/framework/mpas_io.F         (rev 0)
+++ branches/atmos_physics/src/framework/mpas_io.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,3475 @@
+module mpas_io
+
+ use mpas_dmpar_types
+ use mpas_dmpar
+
+ use pio
+ use piolib_mod
+ use pionfatt_mod
+ use pio_types
+
+ ! File access modes
+ integer, parameter :: MPAS_IO_READ = 1, &
+ MPAS_IO_WRITE = 2
+
+ ! I/O formats
+ integer, parameter :: MPAS_IO_NETCDF = 3, &
+ MPAS_IO_PNETCDF = 4
+
+ ! Field and attribute types
+ integer, parameter :: MPAS_IO_REAL = 5, &
+ MPAS_IO_DOUBLE = 6, &
+ MPAS_IO_INT = 7, &
+ MPAS_IO_LOGICAL = 8, &
+ MPAS_IO_CHAR = 9
+
+ ! Unlimited / record dimension
+ integer, parameter :: MPAS_IO_UNLIMITED_DIM = -123456
+
+ ! Error codes
+ integer, parameter :: MPAS_IO_NOERR = 0, &
+ MPAS_IO_ERR_INVALID_MODE = -1, &
+ MPAS_IO_ERR_INVALID_FORMAT = -2, &
+ MPAS_IO_ERR_LONG_FILENAME = -3, &
+ MPAS_IO_ERR_UNINIT_HANDLE = -4, &
+ MPAS_IO_ERR_PIO = -5, &
+ MPAS_IO_ERR_DATA_MODE = -6, &
+ MPAS_IO_ERR_NOWRITE = -7, &
+ MPAS_IO_ERR_REDEF_DIM = -8, &
+ MPAS_IO_ERR_REDEF_VAR = -9, &
+ MPAS_IO_ERR_UNDEFINED_DIM = -10, &
+ MPAS_IO_ERR_UNDEFINED_VAR = -11, &
+ MPAS_IO_ERR_REDEF_ATT = -12, &
+ MPAS_IO_ERR_WRONG_ATT_TYPE = -13, &
+ MPAS_IO_ERR_NO_DECOMP = -14, &
+ MPAS_IO_ERR_TWO_UNLIMITED_DIMS = -15, &
+ MPAS_IO_ERR_WRONG_MODE = -16, &
+ MPAS_IO_ERR_NO_UNLIMITED_DIM = -17, &
+ MPAS_IO_ERR_UNIMPLEMENTED = -18
+
+
+ type MPAS_IO_Handle_type
+ logical :: initialized = .false.
+ logical :: data_mode = .false.
+ type (file_desc_t) :: pio_file
+ character (len=StrKIND) :: filename
+ integer :: iomode
+ integer :: ioformat
+ integer :: pio_unlimited_dimid
+ integer (kind=PIO_offset) :: frame_number = 1
+ type (dimlist_type), pointer :: dimlist_head => null()
+ type (dimlist_type), pointer :: dimlist_tail => null()
+ type (fieldlist_type), pointer :: fieldlist_head => null()
+ type (fieldlist_type), pointer :: fieldlist_tail => null()
+ type (attlist_type), pointer :: attlist_head => null()
+ type (attlist_type), pointer :: attlist_tail => null()
+ end type MPAS_IO_Handle_type
+
+
+ interface MPAS_io_get_var
+ module procedure MPAS_io_get_var_int0d
+ module procedure MPAS_io_get_var_int1d
+ module procedure MPAS_io_get_var_int2d
+ module procedure MPAS_io_get_var_int3d
+ module procedure MPAS_io_get_var_int4d
+ module procedure MPAS_io_get_var_real0d
+ module procedure MPAS_io_get_var_real1d
+ module procedure MPAS_io_get_var_real2d
+ module procedure MPAS_io_get_var_real3d
+ module procedure MPAS_io_get_var_real4d
+ module procedure MPAS_io_get_var_char0d
+ end interface MPAS_io_get_var
+
+ interface MPAS_io_put_var
+ module procedure MPAS_io_put_var_int0d
+ module procedure MPAS_io_put_var_int1d
+ module procedure MPAS_io_put_var_int2d
+ module procedure MPAS_io_put_var_int3d
+ module procedure MPAS_io_put_var_int4d
+ module procedure MPAS_io_put_var_real0d
+ module procedure MPAS_io_put_var_real1d
+ module procedure MPAS_io_put_var_real2d
+ module procedure MPAS_io_put_var_real3d
+ module procedure MPAS_io_put_var_real4d
+ module procedure MPAS_io_put_var_char0d
+ end interface MPAS_io_put_var
+
+ interface MPAS_io_get_att
+ module procedure MPAS_io_get_att_int0d
+ module procedure MPAS_io_get_att_int1d
+ module procedure MPAS_io_get_att_real0d
+ module procedure MPAS_io_get_att_real1d
+ module procedure MPAS_io_get_att_text
+ end interface MPAS_io_get_att
+
+ interface MPAS_io_put_att
+ module procedure MPAS_io_put_att_int0d
+ module procedure MPAS_io_put_att_int1d
+ module procedure MPAS_io_put_att_real0d
+ module procedure MPAS_io_put_att_real1d
+ module procedure MPAS_io_put_att_text
+ end interface MPAS_io_put_att
+
+
+ !!!!!!!! PRIVATE !!!!!!!!
+
+! integer, parameter :: ATT_INT = 1
+! integer, parameter :: ATT_INTA = 2
+! integer, parameter :: ATT_REAL = 3
+! integer, parameter :: ATT_REALA = 4
+! integer, parameter :: ATT_TEXT = 5
+
+ type decomphandle_type
+ integer :: field_type
+ integer, dimension(:), pointer :: dims
+ integer, dimension(:), pointer :: indices
+ type (io_desc_t) :: pio_iodesc
+ end type decomphandle_type
+
+ type atthandle_type
+ character (len=StrKIND) :: attName
+ integer :: attType
+ integer :: attValueInt
+ integer, dimension(:), pointer :: attValueIntA => null()
+ real (kind=RKIND) :: attValueReal
+ real (kind=RKIND), dimension(:), pointer :: attValueRealA => null()
+ character (len=StrKIND) :: attValueText
+ end type atthandle_type
+
+ type dimhandle_type
+ character (len=StrKIND) :: dimname
+ logical :: is_unlimited_dim = .false.
+ integer :: dimsize
+ integer :: dimid
+ end type dimhandle_type
+
+ type fieldhandle_type
+ character (len=StrKIND) :: fieldname
+ integer :: fieldid
+ type (Var_desc_t) :: field_desc
+ integer :: field_type
+ logical :: has_unlimited_dim = .false.
+ integer :: ndims
+ type (dimhandle_type), pointer, dimension(:) :: dims
+ type (attlist_type), pointer :: attlist_head => null()
+ type (attlist_type), pointer :: attlist_tail => null()
+ type (decomphandle_type), pointer :: decomp => null()
+ end type fieldhandle_type
+
+ type decomplist_type
+ type (decomphandle_type), pointer :: decomphandle
+ type (decomplist_type), pointer :: next => null()
+ end type decomplist_type
+
+ type attlist_type
+ type (atthandle_type), pointer :: atthandle
+ type (attlist_type), pointer :: next => null()
+ end type attlist_type
+
+ type dimlist_type
+ type (dimhandle_type), pointer :: dimhandle
+ type (dimlist_type), pointer :: next => null()
+ end type dimlist_type
+
+ type fieldlist_type
+ type (fieldhandle_type), pointer :: fieldhandle
+ type (fieldlist_type), pointer :: next => null()
+ end type fieldlist_type
+
+ type (iosystem_desc_t), private, save :: pio_iosystem
+ type (decomplist_type), pointer, private :: decomp_list => null()
+ type (dm_info), private :: local_dminfo
+
+
+ contains
+
+
+ subroutine MPAS_io_init(dminfo, io_task_count, io_task_stride, ierr)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: io_task_count
+ integer, intent(in) :: io_task_stride
+ integer, intent(out), optional :: ierr
+
+! write(0,*) 'Called MPAS_io_init()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ local_dminfo = dminfo
+
+!write(0,*) 'MGD PIO_init'
+ call PIO_init(local_dminfo % my_proc_id, & ! comp_rank
+ local_dminfo % comm, & ! comp_comm
+ io_task_count, & ! num_iotasks
+ 0, & ! num_aggregator
+ io_task_stride, & ! stride
+ PIO_rearr_box, & ! rearr
+ pio_iosystem) ! iosystem
+
+ call pio_seterrorhandling(pio_iosystem, PIO_BCAST_ERROR)
+
+ end subroutine MPAS_io_init
+
+
+ type (MPAS_IO_Handle_type) function MPAS_io_open(filename, mode, ioformat, ierr)
+
+ implicit none
+
+ character (len=*), intent(in) :: filename
+ integer, intent(in) :: mode
+ integer, intent(in) :: ioformat
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_iotype
+ integer :: pio_ierr
+
+! write(0,*) 'Called MPAS_io_open()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+
+ ! Sanity checks
+ if (mode /= MPAS_IO_READ .and. &
+ mode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_INVALID_MODE
+ return
+ end if
+ if (ioformat /= MPAS_IO_NETCDF .and. &
+ ioformat /= MPAS_IO_PNETCDF) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_INVALID_FORMAT
+ return
+ end if
+ if (len(filename) > 1024) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_LONG_FILENAME
+ return
+ end if
+
+ MPAS_io_open % filename = filename
+ MPAS_io_open % iomode = mode
+ MPAS_io_open % ioformat = ioformat
+
+ if (ioformat == MPAS_IO_PNETCDF) then
+ pio_iotype = PIO_iotype_pnetcdf
+ else
+ pio_iotype = PIO_iotype_netcdf
+ end if
+
+ if (mode == MPAS_IO_WRITE) then
+!write(0,*) 'MGD PIO_createfile'
+ pio_ierr = PIO_createfile(pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_64BIT_OFFSET)
+ else
+!write(0,*) 'MGD PIO_openfile'
+ pio_ierr = PIO_openfile(pio_iosystem, MPAS_io_open % pio_file, pio_iotype, trim(filename), PIO_nowrite)
+ endif
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ if (mode == MPAS_IO_READ) then
+!MPAS_io_open % pio_unlimited_dimid = 44
+ pio_ierr = PIO_inquire(MPAS_io_open % pio_file, unlimitedDimID=MPAS_io_open % pio_unlimited_dimid)
+!write(0,*) 'Found unlimited dim ', MPAS_io_open % pio_unlimited_dimid
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ end if
+
+ MPAS_io_open % initialized = .true.
+
+ return
+
+ end function MPAS_io_open
+
+
+ subroutine MPAS_io_inq_unlimited_dim(handle, dimname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(out) :: dimname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+
+! write(0,*) 'Called MPAS_io_inq_unlimited_dim()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_READ) then ! We could eventually handle this for write mode, too...
+ if (present(ierr)) ierr = MPAS_IO_ERR_WRONG_MODE
+ return
+ end if
+
+ pio_ierr = PIO_inq_dimname(handle % pio_file, handle % pio_unlimited_dimid, dimname)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NO_UNLIMITED_DIM
+ dimname = ' '
+ return
+ end if
+
+ end subroutine MPAS_io_inq_unlimited_dim
+
+
+ subroutine MPAS_io_inq_dim(handle, dimname, dimsize, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: dimname
+ integer, intent(out) :: dimsize
+ integer, intent(out), optional :: ierr
+
+ type (dimlist_type), pointer :: new_dimlist_node
+ type (dimlist_type), pointer :: dim_cursor
+ integer :: pio_ierr
+
+! write(0,*) 'Called MPAS_io_inq_dim()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! First see if we already have this dimension in our list
+ !
+ dim_cursor => handle % dimlist_head
+ do while (associated(dim_cursor))
+ if (trim(dimname) == trim(dim_cursor % dimhandle % dimname)) then
+ dimsize = dim_cursor % dimhandle % dimsize
+ return
+ end if
+ dim_cursor => dim_cursor % next
+ end do
+
+
+ !
+ ! Otherwise, query the file-level API for information about the dim
+ !
+ allocate(new_dimlist_node)
+ nullify(new_dimlist_node % next)
+ allocate(new_dimlist_node % dimhandle)
+
+ new_dimlist_node % dimhandle % dimname = dimname
+
+ pio_ierr = PIO_inq_dimid(handle % pio_file, trim(dimname), new_dimlist_node % dimhandle % dimid)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_dimlist_node % dimhandle)
+ deallocate(new_dimlist_node)
+ dimsize = -1
+ return
+ end if
+
+ if (new_dimlist_node % dimhandle % dimid == handle % pio_unlimited_dimid) new_dimlist_node % dimhandle % is_unlimited_dim = .true.
+
+ pio_ierr = PIO_inq_dimlen(handle % pio_file, new_dimlist_node % dimhandle % dimid, new_dimlist_node % dimhandle % dimsize)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_dimlist_node % dimhandle)
+ deallocate(new_dimlist_node)
+ dimsize = -1
+ return
+ end if
+
+ ! Keep dimension information for future reference
+ if (.not. associated(handle % dimlist_head)) then
+ handle % dimlist_head => new_dimlist_node
+ end if
+ if (.not. associated(handle % dimlist_tail)) then
+ handle % dimlist_tail => new_dimlist_node
+ else
+ handle % dimlist_tail % next => new_dimlist_node
+ handle % dimlist_tail => handle % dimlist_tail % next
+ end if
+
+ dimsize = new_dimlist_node % dimhandle % dimsize
+
+ end subroutine MPAS_io_inq_dim
+
+
+ subroutine MPAS_io_def_dim(handle, dimname, dimsize, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: dimname
+ integer, intent(in) :: dimsize
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (dimlist_type), pointer :: new_dimlist_node
+ type (dimlist_type), pointer :: dim_cursor
+
+! write(0,*) 'Called MPAS_io_def_dim()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ !
+ ! Check that this dimension hasn't already been defined
+ !
+ dim_cursor => handle % dimlist_head
+ do while (associated(dim_cursor))
+ if (trim(dimname) == trim(dim_cursor % dimhandle % dimname)) then
+ if (dimsize /= dim_cursor % dimhandle % dimsize) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_DIM
+ end if
+ return
+ end if
+
+ ! Also, check that the user is not trying to define more than one record dimension
+ if (dimsize == MPAS_IO_UNLIMITED_DIM .and. dim_cursor % dimhandle % is_unlimited_dim) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_TWO_UNLIMITED_DIMS
+ return
+ end if
+ dim_cursor => dim_cursor % next
+ end do
+
+
+ !
+ ! Otherwise, define it
+ !
+ allocate(new_dimlist_node)
+ nullify(new_dimlist_node % next)
+ allocate(new_dimlist_node % dimhandle)
+
+ new_dimlist_node % dimhandle % dimname = dimname
+ new_dimlist_node % dimhandle % dimsize = dimsize
+ if (dimsize == MPAS_IO_UNLIMITED_DIM) then
+ new_dimlist_node % dimhandle % is_unlimited_dim = .true.
+ pio_ierr = PIO_def_dim(handle % pio_file, trim(dimname), PIO_unlimited, new_dimlist_node % dimhandle % dimid)
+ else
+ pio_ierr = PIO_def_dim(handle % pio_file, trim(dimname), dimsize, new_dimlist_node % dimhandle % dimid)
+ end if
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_dimlist_node % dimhandle)
+ deallocate(new_dimlist_node)
+ return
+ end if
+
+ ! Keep dimension information
+ if (.not. associated(handle % dimlist_head)) then
+ handle % dimlist_head => new_dimlist_node
+!write(0,*) 'Assigning head for '//trim(dimname)
+ end if
+ if (.not. associated(handle % dimlist_tail)) then
+ handle % dimlist_tail => new_dimlist_node
+!write(0,*) 'Assigning tail for '//trim(dimname)
+ else
+ handle % dimlist_tail % next => new_dimlist_node
+ handle % dimlist_tail => handle % dimlist_tail % next
+!write(0,*) 'Extending tail for '//trim(dimname)
+ end if
+
+ end subroutine MPAS_io_def_dim
+
+
+ subroutine MPAS_io_inq_var(handle, fieldname, fieldtype, ndims, dimnames, dimsizes, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(out), optional :: fieldtype
+ integer, intent(out), optional :: ndims
+ character (len=StrKIND), dimension(:), pointer, optional :: dimnames
+ integer, dimension(:), pointer, optional :: dimsizes
+ integer, intent(out), optional :: ierr
+
+ integer :: i
+ type (fieldlist_type), pointer :: new_fieldlist_node
+ type (fieldlist_type), pointer :: field_cursor
+ type (dimlist_type), pointer :: new_dimlist_node
+ type (dimlist_type), pointer :: dim_cursor
+ integer, dimension(:), pointer :: dimids
+ logical :: found
+ integer :: pio_ierr
+
+! write(0,*) 'Called MPAS_io_inq_var()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! See if we already have this variable in our list
+ !
+ found = .false.
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+!write(0,*) 'Already found variable in fieldlist'
+ found = .true.
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+
+ !
+ ! Otherwise, inquire through the file-level API and add it to the list
+ !
+ if (.not. found) then
+
+ allocate(new_fieldlist_node)
+ nullify(new_fieldlist_node % next)
+ allocate(new_fieldlist_node % fieldhandle)
+
+ new_fieldlist_node % fieldhandle % fieldname = fieldname
+
+ ! Get variable ID
+ pio_ierr = PIO_inq_varid(handle % pio_file, trim(fieldname), new_fieldlist_node % fieldhandle % fieldid)
+ pio_ierr = PIO_inq_varid(handle % pio_file, trim(fieldname), new_fieldlist_node % fieldhandle % field_desc)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ return
+ end if
+!write(0,*) 'Inquired about variable ID', new_fieldlist_node % fieldhandle % fieldid
+
+ ! Get field type
+ pio_ierr = PIO_inq_vartype(handle % pio_file, new_fieldlist_node % fieldhandle % fieldid, new_fieldlist_node % fieldhandle % field_type)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ return
+ end if
+!write(0,*) 'Inquired about variable type', new_fieldlist_node % fieldhandle % field_type
+
+ ! Convert to MPAS type
+ if (new_fieldlist_node % fieldhandle % field_type == PIO_double) then
+ new_fieldlist_node % fieldhandle % field_type = MPAS_IO_DOUBLE
+ else if (new_fieldlist_node % fieldhandle % field_type == PIO_real) then
+ new_fieldlist_node % fieldhandle % field_type = MPAS_IO_REAL
+ else if (new_fieldlist_node % fieldhandle % field_type == PIO_int) then
+ new_fieldlist_node % fieldhandle % field_type = MPAS_IO_INT
+ else if (new_fieldlist_node % fieldhandle % field_type == PIO_char) then
+ new_fieldlist_node % fieldhandle % field_type = MPAS_IO_CHAR
+!!!!!!!! PIO DOES NOT SUPPORT LOGICAL !!!!!!!!
+ end if
+
+ ! Get number of dimensions
+ pio_ierr = PIO_inq_varndims(handle % pio_file, new_fieldlist_node % fieldhandle % fieldid, new_fieldlist_node % fieldhandle % ndims)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ return
+ end if
+!write(0,*) 'Inquired about number of dimensions ', new_fieldlist_node % fieldhandle % ndims
+
+ allocate(dimids(new_fieldlist_node % fieldhandle % ndims))
+
+ ! Get dimension IDs
+ if (new_fieldlist_node % fieldhandle % ndims > 0) then
+ pio_ierr = PIO_inq_vardimid(handle % pio_file, new_fieldlist_node % fieldhandle % fieldid, dimids)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ deallocate(dimids)
+ return
+ end if
+!write(0,*) 'Inquired about dimension IDs ', dimids
+ end if
+
+ allocate(new_fieldlist_node % fieldhandle % dims(new_fieldlist_node % fieldhandle % ndims))
+
+ ! Get information about dimensions
+ do i=1,new_fieldlist_node % fieldhandle % ndims
+ new_fieldlist_node % fieldhandle % dims(i) % dimid = dimids(i)
+ if (dimids(i) == handle % pio_unlimited_dimid) then
+ new_fieldlist_node % fieldhandle % dims(i) % is_unlimited_dim = .true.
+ new_fieldlist_node % fieldhandle % has_unlimited_dim = .true.
+ end if
+
+ pio_ierr = PIO_inq_dimlen(handle % pio_file, dimids(i), new_fieldlist_node % fieldhandle % dims(i) % dimsize)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ deallocate(dimids)
+ return
+ end if
+!write(0,*) 'Inquired about dimension size ', new_fieldlist_node % fieldhandle % dims(i) % dimsize
+
+ pio_ierr = PIO_inq_dimname(handle % pio_file, dimids(i), new_fieldlist_node % fieldhandle % dims(i) % dimname)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ deallocate(dimids)
+ return
+ end if
+!write(0,*) 'Inquired about dimension name ', trim(new_fieldlist_node % fieldhandle % dims(i) % dimname)
+
+ end do
+
+ deallocate(dimids)
+
+ ! Keep variable information for future reference
+ if (.not. associated(handle % fieldlist_head)) then
+ handle % fieldlist_head => new_fieldlist_node
+!write(0,*) 'Assigning head for '//trim(fieldname)
+ end if
+ if (.not. associated(handle % fieldlist_tail)) then
+ handle % fieldlist_tail => new_fieldlist_node
+!write(0,*) 'Assigning tail for '//trim(fieldname)
+ else
+ handle % fieldlist_tail % next => new_fieldlist_node
+ handle % fieldlist_tail => handle % fieldlist_tail % next
+!write(0,*) 'Extending tail for '//trim(fieldname)
+ end if
+
+ ! Keep dimension information for any new dimensions that were encountered
+ do i=1,new_fieldlist_node % fieldhandle % ndims
+ found = .false.
+ dim_cursor => handle % dimlist_head
+ do while (associated(dim_cursor))
+ if (trim(dim_cursor % dimhandle % dimname) == trim(new_fieldlist_node % fieldhandle % dims(i) % dimname)) then
+!write(0,*) 'Already have dimension '//trim(new_fieldlist_node % fieldhandle % dims(i) % dimname)//' in our list...'
+ found = .true.
+ exit
+ end if
+ dim_cursor => dim_cursor % next
+ end do
+
+ if (.not. found) then
+ allocate(new_dimlist_node)
+ nullify(new_dimlist_node % next)
+ allocate(new_dimlist_node % dimhandle)
+ new_dimlist_node % dimhandle = new_fieldlist_node % fieldhandle % dims(i)
+ if (.not. associated(handle % dimlist_head)) then
+ handle % dimlist_head => new_dimlist_node
+!write(0,*) 'Assigning head for '//trim(new_dimlist_node % dimhandle % dimname)
+ end if
+ if (.not. associated(handle % dimlist_tail)) then
+ handle % dimlist_tail => new_dimlist_node
+!write(0,*) 'Assigning tail for '//trim(new_dimlist_node % dimhandle % dimname)
+ else
+ handle % dimlist_tail % next => new_dimlist_node
+ handle % dimlist_tail => handle % dimlist_tail % next
+!write(0,*) 'Extending tail for '//trim(new_dimlist_node % dimhandle % dimname)
+ end if
+ end if
+ end do
+ field_cursor => new_fieldlist_node
+ end if
+
+
+ !
+ ! Set output arguments
+ !
+ if (present(fieldtype)) fieldtype = field_cursor % fieldhandle % field_type
+ if (present(ndims)) ndims = field_cursor % fieldhandle % ndims
+ if (present(dimnames)) then
+ allocate(dimnames(field_cursor % fieldhandle % ndims))
+ do i=1,field_cursor % fieldhandle % ndims
+ dimnames(i) = field_cursor % fieldhandle % dims(i) % dimname
+ end do
+ end if
+ if (present(dimsizes)) then
+ allocate(dimsizes(field_cursor % fieldhandle % ndims))
+ do i=1,field_cursor % fieldhandle % ndims
+ dimsizes(i) = field_cursor % fieldhandle % dims(i) % dimsize
+ end do
+ end if
+
+ end subroutine MPAS_io_inq_var
+
+
+ subroutine MPAS_io_def_var(handle, fieldname, fieldtype, dimnames, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(in) :: fieldtype
+ character (len=StrKIND), dimension(:), intent(in) :: dimnames
+ integer, intent(out), optional :: ierr
+
+ integer :: i
+ integer :: pio_ierr
+ integer :: pio_type
+ integer :: ndims
+ type (fieldlist_type), pointer :: new_fieldlist_node
+ type (fieldlist_type), pointer :: field_cursor
+ type (dimlist_type), pointer :: dim_cursor
+ integer, dimension(:), pointer :: dimids
+
+! write(0,*) 'Called MPAS_io_def_var()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ !
+ ! Check whether this field has already been defined
+ !
+ ndims = size(dimnames)
+!write(0,*) 'Defining variable with ',ndims,' dimensions'
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ if (ndims /= field_cursor % fieldhandle % ndims) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_VAR
+! write(0,*) 'Error: Field '//trim(fieldname)//' previously defined with conflicting number of dimensions: ', &
+! ndims, field_cursor % fieldhandle % ndims
+ end if
+ if (fieldtype /= field_cursor % fieldhandle % field_type) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_VAR
+! write(0,*) 'Error: Field '//trim(fieldname)//' previously defined with conflicting type: ', &
+! fieldtype, field_cursor % fieldhandle % field_type
+ end if
+ return
+ end if
+ field_cursor => field_cursor % next
+ end do
+
+ !
+ ! Otherwise, define it
+ !
+ allocate(new_fieldlist_node)
+ nullify(new_fieldlist_node % next)
+ allocate(new_fieldlist_node % fieldhandle)
+
+ new_fieldlist_node % fieldhandle % fieldname = fieldname
+ new_fieldlist_node % fieldhandle % field_type = fieldtype
+ new_fieldlist_node % fieldhandle % ndims = ndims
+
+ allocate(dimids(ndims))
+ allocate(new_fieldlist_node % fieldhandle % dims(ndims))
+ do i = 1, ndims
+ dim_cursor => handle % dimlist_head
+ do while (associated(dim_cursor))
+ if (trim(dimnames(i)) == trim(dim_cursor % dimhandle % dimname)) then
+ exit
+ end if
+ dim_cursor => dim_cursor % next
+ end do
+ if (associated(dim_cursor)) then
+ dimids(i) = dim_cursor % dimhandle % dimid
+ if (dim_cursor % dimhandle % is_unlimited_dim) new_fieldlist_node % fieldhandle % has_unlimited_dim = .true.
+ new_fieldlist_node % fieldhandle % dims(i) = dim_cursor % dimhandle
+!write(0,*) 'Found dimension '//trim(new_fieldlist_node % fieldhandle % dims(i) % dimname)//' for field '//trim(fieldname)
+ else
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_DIM
+ deallocate(new_fieldlist_node % fieldhandle % dims)
+ deallocate(new_fieldlist_node % fieldhandle)
+ deallocate(new_fieldlist_node)
+ deallocate(dimids)
+ return
+! write(0,*) 'Error finding dimension '//trim(dimnames(i))//' for field '//trim(fieldname)
+ end if
+ end do
+
+ ! Convert from MPAS type
+ if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_DOUBLE) then
+ pio_type = PIO_double
+ else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_REAL) then
+ pio_type = PIO_real
+ else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_INT) then
+ pio_type = PIO_int
+ else if (new_fieldlist_node % fieldhandle % field_type == MPAS_IO_CHAR) then
+ pio_type = PIO_char
+!!!!!!!! PIO DOES NOT SUPPORT LOGICAL !!!!!!!!
+ end if
+
+ if (ndims == 0) then
+ pio_ierr = PIO_def_var(handle % pio_file, trim(fieldname), pio_type, new_fieldlist_node % fieldhandle % field_desc)
+ else
+ pio_ierr = PIO_def_var(handle % pio_file, trim(fieldname), pio_type, dimids, new_fieldlist_node % fieldhandle % field_desc)
+ end if
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Get the varid for use by put_att routines
+ pio_ierr = PIO_inq_varid(handle % pio_file, trim(fieldname), new_fieldlist_node % fieldhandle % fieldid)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ deallocate(dimids)
+
+ ! Keep variable information for future use
+ if (.not. associated(handle % fieldlist_head)) then
+ handle % fieldlist_head => new_fieldlist_node
+!write(0,*) 'Assigning head for '//trim(fieldname)
+ end if
+ if (.not. associated(handle % fieldlist_tail)) then
+ handle % fieldlist_tail => new_fieldlist_node
+!write(0,*) 'Assigning tail for '//trim(fieldname)
+ else
+ handle % fieldlist_tail % next => new_fieldlist_node
+ handle % fieldlist_tail => handle % fieldlist_tail % next
+!write(0,*) 'Extending tail for '//trim(fieldname)
+ end if
+
+ end subroutine MPAS_io_def_var
+
+
+ subroutine MPAS_io_get_var_indices(handle, fieldname, indices, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(in) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:), pointer :: indices
+ integer, intent(out), optional :: ierr
+
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_indices()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! Check whether the field has been defined
+ !
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+!write(0,*) trim(fieldname), ' has been defined'
+
+ if (.not. associated(field_cursor % fieldhandle % decomp)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NO_DECOMP
+ return
+ end if
+
+ allocate(indices(size(field_cursor % fieldhandle % decomp % indices)))
+ indices(:) = field_cursor % fieldhandle % decomp % indices(:)
+
+ end subroutine MPAS_io_get_var_indices
+
+
+ subroutine MPAS_io_set_var_indices(handle, fieldname, indices, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(in) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:), intent(in) :: indices
+ integer, intent(out), optional :: ierr
+
+ type (fieldlist_type), pointer :: field_cursor
+ integer :: pio_type
+ integer :: ndims, pd
+ integer :: i, i1, i2, i3, i4, i5, indx
+ integer, dimension(:), pointer :: dimlist, compdof
+ type (decomplist_type), pointer :: decomp_cursor, new_decomp
+
+! write(0,*) 'Called MPAS_io_set_var_indices()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+! write(0,*) 'Assigning ', size(indices), ' indices for ', trim(fieldname)
+
+
+ !
+ ! Check whether the field has been defined
+ !
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+!write(0,*) trim(fieldname), ' has been defined'
+
+ !
+ ! If this is a scalar field, just return
+ !
+ if (field_cursor % fieldhandle % ndims == 0 .or. &
+ (field_cursor % fieldhandle % ndims == 1 .and. field_cursor % fieldhandle % has_unlimited_dim) .or. &
+ field_cursor % fieldhandle % field_type == MPAS_IO_CHAR) then
+!write(0,*) 'No need to create a decomposition for a 0d field...'
+ return
+ end if
+
+
+ !
+ ! Check whether a suitable decomposition already exists
+ !
+ decomp_cursor => decomp_list
+!if (.not. associated(decomp_cursor)) write(0,*) 'No existing decompositions to check...'
+ DECOMP_LOOP: do while (associated(decomp_cursor))
+ if (decomp_cursor % decomphandle % field_type == field_cursor % fieldhandle % field_type) then
+ if (size(decomp_cursor % decomphandle % dims) == field_cursor % fieldhandle % ndims) then
+!write(0,*) 'Number of dimensions matches...'
+ do i=1,field_cursor % fieldhandle % ndims
+!write(0,*) 'Checking dimension ', decomp_cursor % decomphandle % dims(i), field_cursor % fieldhandle % dims(i) % dimsize
+ if (decomp_cursor % decomphandle % dims(i) /= field_cursor % fieldhandle % dims(i) % dimsize) then
+ decomp_cursor => decomp_cursor % next
+ cycle DECOMP_LOOP
+ end if
+ end do
+
+ if (size(decomp_cursor % decomphandle % indices) /= size(indices)) then
+!write(0,*) 'We do not have the same number of indices in this decomposition...'
+ decomp_cursor => decomp_cursor % next
+ cycle DECOMP_LOOP
+ end if
+
+ do i=1,size(decomp_cursor % decomphandle % indices)
+ if (indices(i) /= decomp_cursor % decomphandle % indices(i)) then
+!write(0,*) 'One of the indices does not match... ', i
+ decomp_cursor => decomp_cursor % next
+ cycle DECOMP_LOOP
+ end if
+ end do
+
+ ! OK, we have a match... just use this decomposition for the field and return
+ field_cursor % fieldhandle % decomp => decomp_cursor % decomphandle
+!write(0,*) 'Found a matching decomposition that we can use'
+ return
+ else if ((size(decomp_cursor % decomphandle % dims) == field_cursor % fieldhandle % ndims - 1) &
+ .and. field_cursor % fieldhandle % has_unlimited_dim &
+ ) then
+!write(0,*) 'Number of non-record dimensions matches...'
+ do i=1,field_cursor % fieldhandle % ndims
+ if (field_cursor % fieldhandle % dims(i) % is_unlimited_dim) cycle
+!write(0,*) 'Checking dimension ', decomp_cursor % decomphandle % dims(i), field_cursor % fieldhandle % dims(i) % dimsize
+ if (decomp_cursor % decomphandle % dims(i) /= field_cursor % fieldhandle % dims(i) % dimsize) then
+ decomp_cursor => decomp_cursor % next
+ cycle DECOMP_LOOP
+ end if
+ end do
+
+ ! Type and dimensions match... what about indices?
+
+ ! OK, we have a match... just use this decomposition for the field and return
+ field_cursor % fieldhandle % decomp => decomp_cursor % decomphandle
+!write(0,*) 'Found a matching decomposition that we can use (aside from record dimension)'
+ return
+ end if
+ end if
+ decomp_cursor => decomp_cursor % next
+ end do DECOMP_LOOP
+
+!write(0,*) 'Creating a new decomposition'
+
+
+ !
+ ! Otherwise, we need to create a new decomposition
+ !
+ ndims = field_cursor % fieldhandle % ndims
+ if (field_cursor % fieldhandle % has_unlimited_dim) ndims = ndims - 1
+
+
+ allocate(new_decomp)
+ nullify(new_decomp % next)
+ allocate(new_decomp % decomphandle)
+ allocate(new_decomp % decomphandle % dims(ndims))
+ allocate(new_decomp % decomphandle % indices(size(indices)))
+
+ new_decomp % decomphandle % field_type = field_cursor % fieldhandle % field_type
+ new_decomp % decomphandle % indices(:) = indices(:)
+
+ ! Convert from MPAS type
+ if (field_cursor % fieldhandle % field_type == MPAS_IO_DOUBLE) then
+ pio_type = PIO_double
+ else if (field_cursor % fieldhandle % field_type == MPAS_IO_REAL) then
+ pio_type = PIO_real
+ else if (field_cursor % fieldhandle % field_type == MPAS_IO_INT) then
+ pio_type = PIO_int
+ else if (field_cursor % fieldhandle % field_type == MPAS_IO_CHAR) then
+ pio_type = PIO_char
+!!!!!!!! PIO DOES NOT SUPPORT LOGICAL !!!!!!!!
+ end if
+
+ allocate(dimlist(ndims))
+
+ pd = 1
+ do i=1,ndims-1
+ dimlist(i) = field_cursor % fieldhandle % dims(i) % dimsize
+ new_decomp % decomphandle % dims(i) = dimlist(i)
+ pd = pd * dimlist(i)
+ end do
+ new_decomp % decomphandle % dims(ndims) = field_cursor % fieldhandle % dims(ndims) % dimsize
+ dimlist(ndims) = size(indices)
+ pd = pd * dimlist(ndims)
+
+ allocate(compdof(pd))
+
+ indx = 1
+ if (ndims == 5) then
+ do i5=1,dimlist(5)
+ do i4=1,dimlist(4)
+ do i3=1,dimlist(3)
+ do i2=1,dimlist(2)
+ do i1=1,dimlist(1)
+ compdof(indx) = i1 &
+ + (i2-1)*dimlist(1) &
+ + (i3-1)*dimlist(2)*dimlist(1) &
+ + (i4-1)*dimlist(3)*dimlist(2)*dimlist(1) &
+ + (indices(i5)-1)*dimlist(4)*dimlist(3)*dimlist(2)*dimlist(1)
+ indx = indx + 1
+ end do
+ end do
+ end do
+ end do
+ end do
+ else if (ndims == 4) then
+ do i4=1,dimlist(4)
+ do i3=1,dimlist(3)
+ do i2=1,dimlist(2)
+ do i1=1,dimlist(1)
+ compdof(indx) = i1 &
+ + (i2-1)*dimlist(1) &
+ + (i3-1)*dimlist(2)*dimlist(1) &
+ + (indices(i4)-1)*dimlist(3)*dimlist(2)*dimlist(1)
+ indx = indx + 1
+ end do
+ end do
+ end do
+ end do
+ else if (ndims == 3) then
+ do i3=1,dimlist(3)
+ do i2=1,dimlist(2)
+ do i1=1,dimlist(1)
+ compdof(indx) = i1 + (i2-1)*dimlist(1) + (indices(i3)-1)*dimlist(2)*dimlist(1)
+ indx = indx + 1
+ end do
+ end do
+ end do
+ else if (ndims == 2) then
+ do i2=1,dimlist(2)
+ do i1=1,dimlist(1)
+ compdof(indx) = i1 + (indices(i2)-1)*dimlist(1)
+ indx = indx + 1
+ end do
+ end do
+ else if (ndims == 1) then
+ do i1=1,dimlist(1)
+ compdof(indx) = indices(i1)
+ indx = indx + 1
+ end do
+ end if
+
+ dimlist(ndims) = field_cursor % fieldhandle % dims(ndims) % dimsize
+ call PIO_initdecomp(pio_iosystem, pio_type, dimlist, compdof, new_decomp % decomphandle % pio_iodesc)
+
+ ! Add new decomposition to the list
+ if (.not. associated(decomp_list)) then
+ decomp_list => new_decomp
+!write(0,*) 'Adding first item to the decomp_list'
+ else
+ new_decomp % next => decomp_list
+ decomp_list => new_decomp
+!write(0,*) 'Adding new decomp to the head of the list'
+ end if
+
+!write(0,*) 'Setting decomp in fieldhandle'
+ field_cursor % fieldhandle % decomp => new_decomp % decomphandle
+
+ deallocate(compdof)
+ deallocate(dimlist)
+!write(0,*) 'All finished.'
+
+ end subroutine MPAS_io_set_var_indices
+
+
+ subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &
+ realVal, realArray1d, realArray2d, realArray3d, realArray4d, &
+ charVal, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(out), optional :: intVal
+ integer, dimension(:), intent(out), optional :: intArray1d
+ integer, dimension(:,:), intent(out), optional :: intArray2d
+ integer, dimension(:,:,:), intent(out), optional :: intArray3d
+ integer, dimension(:,:,:,:), intent(out), optional :: intArray4d
+ real (kind=RKIND), intent(out), optional :: realVal
+ real (kind=RKIND), dimension(:), intent(out), optional :: realArray1d
+ real (kind=RKIND), dimension(:,:), intent(out), optional :: realArray2d
+ real (kind=RKIND), dimension(:,:,:), intent(out), optional :: realArray3d
+ real (kind=RKIND), dimension(:,:,:,:), intent(out), optional :: realArray4d
+ character (len=*), intent(out), optional :: charVal
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start1
+ integer, dimension(1) :: count1
+ integer, dimension(2) :: start2
+ integer, dimension(2) :: count2
+ character (len=StrKIND), dimension(1) :: tempchar
+ type (fieldlist_type), pointer :: field_cursor
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+! write(0,*) 'Reading ', trim(fieldname)
+
+ !
+ ! Check whether the field has been defined
+ !
+! write(0,*) 'Checking if field is define'
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+
+ !
+ ! Check that we have a decomposition for this field
+ !
+! write(0,*) 'Checking for decomposition'
+ if (.not.present(intVal) .and. .not.present(realVal) .and. .not.present(charVal)) then
+ if (.not. associated(field_cursor % fieldhandle % decomp)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NO_DECOMP
+ return
+ end if
+ end if
+
+!!!! Assume array was already allocated by the user
+
+! write(0,*) 'Checking for unlimited dim'
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ call PIO_setframe(field_cursor % fieldhandle % field_desc, handle % frame_number)
+ start1(1) = handle % frame_number
+ count1(1) = 1
+
+ start2(1) = 1
+ start2(2) = handle % frame_number
+ count2(2) = 1
+ end if
+
+! write(0,*) 'Checking for real, int, char, etc'
+ if (present(realVal)) then
+! write (0,*) ' value is real'
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, realVal)
+ else
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, realVal)
+ end if
+ else if (present(intVal)) then
+! write (0,*) ' value is int'
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, intVal)
+ else
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, intVal)
+ end if
+ else if (present(charVal)) then
+! write (0,*) ' value is char'
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ count2(1) = field_cursor % fieldhandle % dims(1) % dimsize
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start2, count2, tempchar)
+ charVal(1:count2(1)) = tempchar(1)(1:count2(1))
+ else
+ pio_ierr = PIO_get_var(handle % pio_file, field_cursor % fieldhandle % field_desc, charVal)
+ end if
+ else if (present(realArray1d)) then
+! write (0,*) ' value is real1'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray1d, pio_ierr)
+ else if (present(realArray2d)) then
+! write (0,*) ' value is real2'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray2d, pio_ierr)
+ else if (present(realArray3d)) then
+! write (0,*) ' value is real3'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray3d, pio_ierr)
+ else if (present(realArray4d)) then
+! write (0,*) ' value is real4'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray4d, pio_ierr)
+ else if (present(intArray1d)) then
+! write (0,*) ' value is int1'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray1d, pio_ierr)
+ else if (present(intArray2d)) then
+! write (0,*) ' value is int2'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray2d, pio_ierr)
+ else if (present(intArray3d)) then
+! write (0,*) ' value is int3'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray3d, pio_ierr)
+ else if (present(intArray4d)) then
+! write (0,*) ' value is int4'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray4d, pio_ierr)
+ end if
+
+! write (0,*) 'Checking for error'
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ end subroutine MPAS_io_get_var_generic
+
+
+ subroutine MPAS_io_get_var_int0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(out) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_int0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, intVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_int0d
+
+
+ subroutine MPAS_io_get_var_int1d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_int1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, intArray1d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_int1d
+
+
+ subroutine MPAS_io_get_var_int2d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_int2d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, intArray2d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_int2d
+
+
+ subroutine MPAS_io_get_var_int3d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_int3d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, intArray3d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_int3d
+
+
+ subroutine MPAS_io_get_var_int4d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:,:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_int4d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, intArray4d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_int4d
+
+
+ subroutine MPAS_io_get_var_real0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), intent(out) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real0d
+
+
+ subroutine MPAS_io_get_var_real1d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realArray1d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real1d
+
+
+ subroutine MPAS_io_get_var_real2d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real2d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realArray2d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real2d
+
+
+ subroutine MPAS_io_get_var_real3d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real3d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realArray3d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real3d
+
+
+ subroutine MPAS_io_get_var_real4d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real4d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realArray4d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real4d
+
+
+ subroutine MPAS_io_get_var_char0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ character (len=*), intent(out) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_char0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, charVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_char0d
+
+
+ subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &
+ realVal, realArray1d, realArray2d, realArray3d, realArray4d, &
+ charVal, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(in), optional :: intVal
+ integer, dimension(:), intent(in), optional :: intArray1d
+ integer, dimension(:,:), intent(in), optional :: intArray2d
+ integer, dimension(:,:,:), intent(in), optional :: intArray3d
+ integer, dimension(:,:,:,:), intent(in), optional :: intArray4d
+ real (kind=RKIND), intent(in), optional :: realVal
+ real (kind=RKIND), dimension(:), intent(in), optional :: realArray1d
+ real (kind=RKIND), dimension(:,:), intent(in), optional :: realArray2d
+ real (kind=RKIND), dimension(:,:,:), intent(in), optional :: realArray3d
+ real (kind=RKIND), dimension(:,:,:,:), intent(in), optional :: realArray4d
+ character (len=*), intent(in), optional :: charVal
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start1
+ integer, dimension(1) :: count1
+ integer, dimension(2) :: start2
+ integer, dimension(2) :: count2
+ type (fieldlist_type), pointer :: field_cursor
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+ if (.not. handle % data_mode) then
+ handle % data_mode = .true.
+
+ pio_ierr = PIO_enddef(handle % pio_file)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ end if
+
+! write(0,*) 'Writing ', trim(fieldname)
+
+
+ !
+ ! Check whether the field has been defined
+ !
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+
+ !
+ ! Check that we have a decomposition for this field
+ !
+ if (.not.present(intVal) .and. .not.present(realVal) .and. .not.present(charVal)) then
+ if (.not. associated(field_cursor % fieldhandle % decomp)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NO_DECOMP
+ return
+ end if
+ end if
+
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ call PIO_setframe(field_cursor % fieldhandle % field_desc, handle % frame_number)
+ start1(1) = handle % frame_number
+ count1(1) = 1
+
+ start2(1) = 1
+ start2(2) = handle % frame_number
+ count2(2) = 1
+ else if (handle % frame_number > 1) then
+ if(present(ierr)) ierr = MPAS_IO_NOERR
+ return
+ end if
+
+ if (present(realVal)) then
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, realVal)
+ else
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, realVal)
+ end if
+ else if (present(intVal)) then
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, start1, intVal)
+ else
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, intVal)
+ end if
+ else if (present(charVal)) then
+ if (field_cursor % fieldhandle % has_unlimited_dim) then
+ count2(1) = field_cursor % fieldhandle % dims(1) % dimsize
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % fieldid, start2, count2, (/charVal/))
+ else
+ pio_ierr = PIO_put_var(handle % pio_file, field_cursor % fieldhandle % field_desc, charVal)
+ end if
+ else if (present(realArray1d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray1d, pio_ierr)
+ else if (present(realArray2d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray2d, pio_ierr)
+ else if (present(realArray3d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray3d, pio_ierr)
+ else if (present(realArray4d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray4d, pio_ierr)
+ else if (present(intArray1d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray1d, pio_ierr)
+ else if (present(intArray2d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray2d, pio_ierr)
+ else if (present(intArray3d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray3d, pio_ierr)
+ else if (present(intArray4d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ intArray4d, pio_ierr)
+ end if
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ end subroutine MPAS_io_put_var_generic
+
+
+ subroutine MPAS_io_put_var_int0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, intent(in) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_int0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, intVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_int0d
+
+
+ subroutine MPAS_io_put_var_int1d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_int1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, intArray1d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_int1d
+
+
+ subroutine MPAS_io_put_var_int2d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_int2d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, intArray2d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_int2d
+
+
+ subroutine MPAS_io_put_var_int3d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_int3d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, intArray3d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_int3d
+
+
+ subroutine MPAS_io_put_var_int4d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ integer, dimension(:,:,:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_int4d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, intArray4d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_int4d
+
+
+ subroutine MPAS_io_put_var_real0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), intent(in) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer, dimension(1) :: start
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real0d
+
+
+ subroutine MPAS_io_put_var_real1d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realArray1d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real1d
+
+
+ subroutine MPAS_io_put_var_real2d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real2d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realArray2d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real2d
+
+
+ subroutine MPAS_io_put_var_real3d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real3d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realArray3d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real3d
+
+
+ subroutine MPAS_io_put_var_real4d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real4d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realArray4d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real4d
+
+
+ subroutine MPAS_io_put_var_char0d(handle, fieldname, val, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ character (len=*), intent(in) :: val
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_char0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, charVal=val, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_char0d
+
+
+ subroutine MPAS_io_get_att_int0d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ integer, intent(out) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ integer :: xtype, len
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: att_cursor, new_att_node
+
+! write(0,*) 'Called MPAS_io_get_att_int0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+ ! Check whether we have this attribute cached
+ att_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_INT) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueInt
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ else
+
+ ! Check whether we have this attribute cached
+ att_cursor => handle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_INT) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueInt
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ varid = PIO_global
+ end if
+
+ ! Query attribute value
+ pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ if (xtype /= PIO_int) then
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ return
+ end if
+
+ pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Keep attribute for future reference
+ allocate(new_att_node)
+ nullify(new_att_node % next)
+ allocate(new_att_node % atthandle)
+ new_att_node % atthandle % attName = attName
+ new_att_node % atthandle % attType = ATT_INT
+ new_att_node % atthandle % attValueInt = attValue
+
+ if (present(fieldname)) then
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_att_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ else
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ handle % attlist_tail % next => new_att_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ end if
+
+ end subroutine MPAS_io_get_att_int0d
+
+
+ subroutine MPAS_io_get_att_int1d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), pointer :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ integer :: xtype, len, attlen
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: att_cursor, new_att_node
+
+! write(0,*) 'Called MPAS_io_get_att_int1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+ ! Check whether we have this attribute cached
+ att_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_INTA) then
+!write(0,*) 'Using cached attribute'
+ allocate(attValue(size(att_cursor % atthandle % attValueIntA)))
+ attValue = att_cursor % atthandle % attValueIntA
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ else
+
+ ! Check whether we have this attribute cached
+ att_cursor => handle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_INTA) then
+!write(0,*) 'Using cached attribute'
+ allocate(attValue(size(att_cursor % atthandle % attValueIntA)))
+ attValue = att_cursor % atthandle % attValueIntA
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ varid = PIO_global
+ end if
+
+ ! Query attribute value
+ pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ if (xtype /= PIO_int) then
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ return
+ end if
+
+ pio_ierr = PIO_inq_attlen(handle % pio_file, varid, attName, attlen)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ allocate(attValue(attlen))
+ pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Keep attribute for future reference
+ allocate(new_att_node)
+ nullify(new_att_node % next)
+ allocate(new_att_node % atthandle)
+ new_att_node % atthandle % attName = attName
+ new_att_node % atthandle % attType = ATT_INTA
+ allocate(new_att_node % atthandle % attValueIntA(attlen))
+ new_att_node % atthandle % attValueIntA = attValue
+
+ if (present(fieldname)) then
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_att_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ else
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ handle % attlist_tail % next => new_att_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ end if
+
+ end subroutine MPAS_io_get_att_int1d
+
+
+ subroutine MPAS_io_get_att_real0d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(out) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ integer :: xtype, len
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: att_cursor, new_att_node
+
+! write(0,*) 'Called MPAS_io_get_att_real0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+ ! Check whether we have this attribute cached
+ att_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_REAL) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueReal
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ else
+
+ ! Check whether we have this attribute cached
+ att_cursor => handle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_REAL) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueReal
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ varid = PIO_global
+ end if
+
+ ! Query attribute value
+ pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ if (xtype /= PIO_double) then
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ return
+ end if
+
+ pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Keep attribute for future reference
+ allocate(new_att_node)
+ nullify(new_att_node % next)
+ allocate(new_att_node % atthandle)
+ new_att_node % atthandle % attName = attName
+ new_att_node % atthandle % attType = ATT_REAL
+ new_att_node % atthandle % attValueReal = attValue
+
+ if (present(fieldname)) then
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_att_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ else
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ handle % attlist_tail % next => new_att_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ end if
+
+ end subroutine MPAS_io_get_att_real0d
+
+
+ subroutine MPAS_io_get_att_real1d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), pointer :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ integer :: xtype, len, attlen
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: att_cursor, new_att_node
+
+! write(0,*) 'Called MPAS_io_get_att_real1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+ ! Check whether we have this attribute cached
+ att_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_REALA) then
+!write(0,*) 'Using cached attribute'
+ allocate(attValue(size(att_cursor % atthandle % attValueRealA)))
+ attValue = att_cursor % atthandle % attValueRealA
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ else
+
+ ! Check whether we have this attribute cached
+ att_cursor => handle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_REALA) then
+!write(0,*) 'Using cached attribute'
+ allocate(attValue(size(att_cursor % atthandle % attValueRealA)))
+ attValue = att_cursor % atthandle % attValueRealA
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ varid = PIO_global
+ end if
+
+ ! Query attribute value
+ pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ if (xtype /= PIO_double) then
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ return
+ end if
+
+ pio_ierr = PIO_inq_attlen(handle % pio_file, varid, attName, attlen)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ allocate(attValue(attlen))
+ pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Keep attribute for future reference
+ allocate(new_att_node)
+ nullify(new_att_node % next)
+ allocate(new_att_node % atthandle)
+ new_att_node % atthandle % attName = attName
+ new_att_node % atthandle % attType = ATT_REALA
+ allocate(new_att_node % atthandle % attValueRealA(attlen))
+ new_att_node % atthandle % attValueRealA = attValue
+
+ if (present(fieldname)) then
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_att_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ else
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ handle % attlist_tail % next => new_att_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ end if
+
+ end subroutine MPAS_io_get_att_real1d
+
+
+ subroutine MPAS_io_get_att_text(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(out) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ integer :: xtype, len
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: att_cursor, new_att_node
+
+! write(0,*) 'Called MPAS_io_get_att_text()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ return
+ end if
+
+ ! Check whether we have this attribute cached
+ att_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_TEXT) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueText
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ else
+
+ ! Check whether we have this attribute cached
+ att_cursor => handle % attlist_head
+ do while (associated(att_cursor))
+ if (trim(att_cursor % atthandle % attName) == trim(attName)) then
+ if (att_cursor % atthandle % attType == ATT_TEXT) then
+!write(0,*) 'Using cached attribute'
+ attValue = att_cursor % atthandle % attValueText
+ else
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ end if
+ return
+ end if
+ att_cursor => att_cursor % next
+ end do
+
+ varid = PIO_global
+ end if
+
+ ! Query attribute value
+ pio_ierr = PIO_inq_att(handle % pio_file, varid, attName, xtype, len)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ if (xtype /= PIO_char) then
+ if (present(ierr)) ierr=MPAS_IO_ERR_WRONG_ATT_TYPE
+ return
+ end if
+
+ pio_ierr = PIO_get_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Keep attribute for future reference
+ allocate(new_att_node)
+ nullify(new_att_node % next)
+ allocate(new_att_node % atthandle)
+ new_att_node % atthandle % attName = attName
+ new_att_node % atthandle % attType = ATT_TEXT
+ new_att_node % atthandle % attValueText = attValue
+
+ if (present(fieldname)) then
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_att_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ else
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_att_node
+!write(0,*) 'Assigning att head for '//trim(attName)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_att_node
+!write(0,*) 'Assigning att tail for '//trim(attName)
+ else
+ handle % attlist_tail % next => new_att_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attName)
+ end if
+ end if
+
+ end subroutine MPAS_io_get_att_text
+
+
+ subroutine MPAS_io_put_att_int0d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ integer, intent(in) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: attlist_cursor, new_attlist_node
+
+! write(0,*) 'Called MPAS_io_put_att_int0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ allocate(new_attlist_node)
+ nullify(new_attlist_node % next)
+ allocate(new_attlist_node % attHandle)
+ new_attlist_node % attHandle % attName = attName
+ new_attlist_node % attHandle % attType = ATT_INT
+ new_attlist_node % attHandle % attValueInt = attValue
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+
+ ! Check whether attribute was already defined
+ attlist_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_INT .or. &
+ attlist_cursor % atthandle % attValueInt /= attValue) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ return
+ end if
+
+ ! Add attribute to field attribute list
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_attlist_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+
+ else
+
+ ! Check whether attribute was already defined
+ attlist_cursor => handle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_INT .or. &
+ attlist_cursor % atthandle % attValueInt /= attValue) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = PIO_global
+
+ ! Add attribute to global attribute list
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ handle % attlist_tail % next => new_attlist_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+ end if
+
+ pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Maybe we should add attribute to list only after a successfull call to PIO?
+
+ end subroutine MPAS_io_put_att_int0d
+
+
+ subroutine MPAS_io_put_att_int1d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), intent(in) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: attlist_cursor, new_attlist_node
+
+! write(0,*) 'Called MPAS_io_put_att_int1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ allocate(new_attlist_node)
+ nullify(new_attlist_node % next)
+ allocate(new_attlist_node % attHandle)
+ new_attlist_node % attHandle % attName = attName
+ new_attlist_node % attHandle % attType = ATT_INTA
+ allocate(new_attlist_node % attHandle % attValueIntA(size(attValue)))
+ new_attlist_node % attHandle % attValueIntA = attValue
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+
+ ! Check whether attribute was already defined
+ attlist_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_INTA .or. &
+ size(attlist_cursor % atthandle % attValueIntA) /= size(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+! else if (attlist_cursor % atthandle % attValueIntA(:) /= attValue(:)) then ! array sizes should match based on previous if-test
+! if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+! deallocate(new_attlist_node % attHandle)
+! deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ return
+ end if
+
+ ! Add attribute to field attribute list
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_attlist_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+
+ else
+
+ ! Check whether attribute was already defined
+ attlist_cursor => handle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_INTA .or. &
+ size(attlist_cursor % atthandle % attValueIntA) /= size(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = PIO_global
+
+ ! Add attribute to global attribute list
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ handle % attlist_tail % next => new_attlist_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+ end if
+
+ pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Maybe we should add attribute to list only after a successfull call to PIO?
+
+ end subroutine MPAS_io_put_att_int1d
+
+
+ subroutine MPAS_io_put_att_real0d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(in) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: attlist_cursor, new_attlist_node
+
+! write(0,*) 'Called MPAS_io_put_att_real0d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ allocate(new_attlist_node)
+ nullify(new_attlist_node % next)
+ allocate(new_attlist_node % attHandle)
+ new_attlist_node % attHandle % attName = attName
+ new_attlist_node % attHandle % attType = ATT_REAL
+ new_attlist_node % attHandle % attValueReal = attValue
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+
+ ! Check whether attribute was already defined
+ attlist_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_REAL .or. &
+ attlist_cursor % atthandle % attValueReal /= attValue) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ return
+ end if
+
+ ! Add attribute to field attribute list
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_attlist_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+
+ else
+
+ ! Check whether attribute was already defined
+ attlist_cursor => handle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_REAL .or. &
+ attlist_cursor % atthandle % attValueReal /= attValue) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = PIO_global
+
+ ! Add attribute to global attribute list
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ handle % attlist_tail % next => new_attlist_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+ end if
+
+ pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Maybe we should add attribute to list only after a successfull call to PIO?
+
+ end subroutine MPAS_io_put_att_real0d
+
+
+ subroutine MPAS_io_put_att_real1d(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), intent(in) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: attlist_cursor, new_attlist_node
+
+! write(0,*) 'Called MPAS_io_put_att_real1d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ allocate(new_attlist_node)
+ nullify(new_attlist_node % next)
+ allocate(new_attlist_node % attHandle)
+ new_attlist_node % attHandle % attName = attName
+ new_attlist_node % attHandle % attType = ATT_REALA
+ allocate(new_attlist_node % attHandle % attValueRealA(size(attValue)))
+ new_attlist_node % attHandle % attValueRealA = attValue
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+
+ ! Check whether attribute was already defined
+ attlist_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_REALA .or. &
+ size(attlist_cursor % atthandle % attValueRealA) /= size(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+! else if (attlist_cursor % atthandle % attValueIntA(:) /= attValue(:)) then ! array sizes should match based on previous if-test
+! if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+! deallocate(new_attlist_node % attHandle)
+! deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ return
+ end if
+
+ ! Add attribute to field attribute list
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_attlist_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+
+ else
+
+ ! Check whether attribute was already defined
+ attlist_cursor => handle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_REALA .or. &
+ size(attlist_cursor % atthandle % attValueRealA) /= size(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+! else if (attlist_cursor % atthandle % attValueIntA /= attValue) then
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = PIO_global
+
+ ! Add attribute to global attribute list
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ handle % attlist_tail % next => new_attlist_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+ end if
+
+ pio_ierr = PIO_put_att(handle % pio_file, varid, attName, attValue)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Maybe we should add attribute to list only after a successfull call to PIO?
+
+ end subroutine MPAS_io_put_att_real1d
+
+
+ subroutine MPAS_io_put_att_text(handle, attName, attValue, fieldname, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(in) :: attValue
+ character (len=*), intent(in), optional :: fieldname
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ integer :: varid
+ type (fieldlist_type), pointer :: field_cursor
+ type (attlist_type), pointer :: attlist_cursor, new_attlist_node
+
+! write(0,*) 'Called MPAS_io_put_att_text()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+ if (handle % data_mode) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_DATA_MODE
+ return
+ end if
+ if (handle % iomode /= MPAS_IO_WRITE) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_NOWRITE
+ return
+ end if
+
+
+ allocate(new_attlist_node)
+ nullify(new_attlist_node % next)
+ allocate(new_attlist_node % attHandle)
+ new_attlist_node % attHandle % attName = attName
+ new_attlist_node % attHandle % attType = ATT_TEXT
+ new_attlist_node % attHandle % attValueText = attValue
+
+
+ !
+ ! For variable attributes, find the structure for fieldname
+ !
+ if (present(fieldname)) then
+ field_cursor => handle % fieldlist_head
+ do while (associated(field_cursor))
+ if (trim(fieldname) == trim(field_cursor % fieldhandle % fieldname)) then
+
+ ! Check whether attribute was already defined
+ attlist_cursor => field_cursor % fieldhandle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_TEXT .or. &
+ trim(attlist_cursor % atthandle % attValueText) /= trim(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = field_cursor % fieldhandle % fieldid
+ exit
+ end if
+ field_cursor => field_cursor % next
+ end do
+ if (.not. associated(field_cursor)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNDEFINED_VAR
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ return
+ end if
+
+ ! Add attribute to field attribute list
+ if (.not. associated(field_cursor % fieldhandle % attlist_head)) then
+ field_cursor % fieldhandle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(field_cursor % fieldhandle % attlist_tail)) then
+ field_cursor % fieldhandle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ field_cursor % fieldhandle % attlist_tail % next => new_attlist_node
+ field_cursor % fieldhandle % attlist_tail => field_cursor % fieldhandle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+
+ else
+
+ ! Check whether attribute was already defined
+ attlist_cursor => handle % attlist_head
+ do while (associated(attlist_cursor))
+ if (trim(attName) == trim(attlist_cursor % atthandle % attName)) then
+!write(0,*) 'Attribute already defined'
+ if (attlist_cursor % atthandle % attType /= ATT_TEXT .or. &
+ trim(attlist_cursor % atthandle % attValueText) /= trim(attValue)) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_REDEF_ATT
+ deallocate(new_attlist_node % attHandle)
+ deallocate(new_attlist_node)
+ end if
+ return
+ end if
+ attlist_cursor => attlist_cursor % next
+ end do
+
+ varid = PIO_global
+
+ ! Add attribute to global attribute list
+ if (.not. associated(handle % attlist_head)) then
+ handle % attlist_head => new_attlist_node
+!write(0,*) 'Assigning att head for '//trim(attname)
+ end if
+ if (.not. associated(handle % attlist_tail)) then
+ handle % attlist_tail => new_attlist_node
+!write(0,*) 'Assigning att tail for '//trim(attname)
+ else
+ handle % attlist_tail % next => new_attlist_node
+ handle % attlist_tail => handle % attlist_tail % next
+!write(0,*) 'Extending att tail for '//trim(attname)
+ end if
+ end if
+
+ pio_ierr = PIO_put_att(handle % pio_file, varid, attName, trim(attValue))
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ ! Maybe we should add attribute to list only after a successfull call to PIO?
+
+ end subroutine MPAS_io_put_att_text
+
+
+ subroutine MPAS_io_set_frame(handle, frame, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ integer, intent(in) :: frame
+ integer, intent(out), optional :: ierr
+
+! write(0,*) 'Called MPAS_io_set_frame()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ handle % frame_number = frame
+
+ end subroutine MPAS_io_set_frame
+
+
+ subroutine MPAS_io_advance_frame(handle, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ integer, intent(out), optional :: ierr
+
+! write(0,*) 'Called MPAS_io_advance_frame()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ handle % frame_number = handle % frame_number + 1
+
+ end subroutine MPAS_io_advance_frame
+
+
+ subroutine MPAS_io_sync(handle, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ integer, intent(out), optional :: ierr
+
+! write(0,*) 'Called MPAS_io_sync()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+ call PIO_syncfile(handle % pio_file)
+
+ end subroutine MPAS_io_sync
+
+
+ subroutine MPAS_io_close(handle, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ integer, intent(out), optional :: ierr
+
+ type (dimlist_type), pointer :: dimlist_ptr, dimlist_del
+ type (fieldlist_type), pointer :: fieldlist_ptr, fieldlist_del
+ type (attlist_type), pointer :: attlist_ptr, attlist_del
+
+! write(0,*) 'Called MPAS_io_close()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ ! Sanity checks
+ if (.not. handle % initialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_UNINIT_HANDLE
+ return
+ end if
+
+ ! Deallocate memory associated with the file
+ fieldlist_ptr => handle % fieldlist_head
+ do while (associated(fieldlist_ptr))
+ fieldlist_del => fieldlist_ptr
+ fieldlist_ptr => fieldlist_ptr % next
+
+ attlist_ptr => fieldlist_del % fieldhandle % attlist_head
+ do while (associated(attlist_ptr))
+ attlist_del => attlist_ptr
+ attlist_ptr => attlist_ptr % next
+ if (attlist_del % atthandle % attType == ATT_INTA) deallocate(attlist_del % atthandle % attValueIntA)
+ if (attlist_del % atthandle % attType == ATT_REALA) deallocate(attlist_del % atthandle % attValueRealA)
+ deallocate(attlist_del % atthandle)
+ end do
+ nullify(fieldlist_del % fieldhandle % attlist_head)
+ nullify(fieldlist_del % fieldhandle % attlist_tail)
+
+ deallocate(fieldlist_del % fieldhandle % dims)
+
+ deallocate(fieldlist_del % fieldhandle)
+ end do
+ nullify(handle % fieldlist_head)
+ nullify(handle % fieldlist_tail)
+
+ dimlist_ptr => handle % dimlist_head
+ do while (associated(dimlist_ptr))
+ dimlist_del => dimlist_ptr
+ dimlist_ptr => dimlist_ptr % next
+ deallocate(dimlist_del % dimhandle)
+ end do
+ nullify(handle % dimlist_head)
+ nullify(handle % dimlist_tail)
+
+ attlist_ptr => handle % attlist_head
+ do while (associated(attlist_ptr))
+ attlist_del => attlist_ptr
+ attlist_ptr => attlist_ptr % next
+ if (attlist_del % atthandle % attType == ATT_INTA) deallocate(attlist_del % atthandle % attValueIntA)
+ if (attlist_del % atthandle % attType == ATT_REALA) deallocate(attlist_del % atthandle % attValueRealA)
+ deallocate(attlist_del % atthandle)
+ end do
+ nullify(handle % attlist_head)
+ nullify(handle % attlist_tail)
+
+ handle % initialized = .false.
+
+!write(0,*) 'MGD PIO_closefile'
+ call PIO_closefile(handle % pio_file)
+
+ end subroutine MPAS_io_close
+
+
+ subroutine MPAS_io_finalize(ierr)
+
+ implicit none
+
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (decomplist_type), pointer :: decomp_cursor, decomp_del
+
+! write(0,*) 'Called MPAS_io_finalize()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ decomp_cursor => decomp_list
+ do while (associated(decomp_cursor))
+ decomp_del => decomp_cursor
+ decomp_cursor => decomp_cursor % next
+!write(0,*) 'Deallocating a decomposition...'
+!if (.not. associated(decomp_del % decomphandle)) write(0,*) 'OOPS... do not have decomphandle'
+ deallocate(decomp_del % decomphandle % dims)
+ deallocate(decomp_del % decomphandle % indices)
+ call PIO_freedecomp(pio_iosystem, decomp_del % decomphandle % pio_iodesc)
+ deallocate(decomp_del % decomphandle)
+ deallocate(decomp_del)
+ end do
+
+!write(0,*) 'MGD PIO_finalize'
+ call PIO_finalize(pio_iosystem, pio_ierr)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+
+ end subroutine MPAS_io_finalize
+
+
+ subroutine MPAS_io_err_mesg(ierr, fatal)
+
+ implicit none
+
+ integer, intent(in) :: ierr
+ logical, intent(in) :: fatal
+
+ select case (ierr)
+ case (MPAS_IO_NOERR)
+ ! ... do nothing ...
+ case (MPAS_IO_ERR_INVALID_MODE)
+ write(0,*) 'MPAS IO Error: Invalid file access mode'
+ case (MPAS_IO_ERR_INVALID_FORMAT)
+ write(0,*) 'MPAS IO Error: Invalid I/O format'
+ case (MPAS_IO_ERR_LONG_FILENAME)
+ write(0,*) 'MPAS IO Error: Filename too long'
+ case (MPAS_IO_ERR_UNINIT_HANDLE)
+ write(0,*) 'MPAS IO Error: Uninitialized I/O handle'
+ case (MPAS_IO_ERR_PIO)
+ write(0,*) 'MPAS IO Error: Bad return value from PIO'
+ case (MPAS_IO_ERR_DATA_MODE)
+ write(0,*) 'MPAS IO Error: Cannot define in data mode'
+ case (MPAS_IO_ERR_NOWRITE)
+ write(0,*) 'MPAS IO Error: File not opened for writing'
+ case (MPAS_IO_ERR_REDEF_DIM)
+ write(0,*) 'MPAS IO Error: Inconsistent redefinition of dimension'
+ case (MPAS_IO_ERR_REDEF_VAR)
+ write(0,*) 'MPAS IO Error: Inconsistent redefinition of field'
+ case (MPAS_IO_ERR_UNDEFINED_DIM)
+ write(0,*) 'MPAS IO Error: Field uses undefined dimension'
+ case (MPAS_IO_ERR_UNDEFINED_VAR)
+ write(0,*) 'MPAS IO Error: Undefined field'
+ case (MPAS_IO_ERR_REDEF_ATT)
+ write(0,*) 'MPAS IO Error: Inconsistent redefinition of attribute'
+ case (MPAS_IO_ERR_WRONG_ATT_TYPE)
+ write(0,*) 'MPAS IO Error: Wrong type for requested attribute'
+ case (MPAS_IO_ERR_NO_DECOMP)
+ write(0,*) 'MPAS IO Error: Decomposition indices not set for field'
+ case (MPAS_IO_ERR_TWO_UNLIMITED_DIMS)
+ write(0,*) 'MPAS IO Error: Defining more than one unlimited dimension'
+ case (MPAS_IO_ERR_WRONG_MODE)
+ write(0,*) 'MPAS IO Error: Operation not permitted in this file mode'
+ case (MPAS_IO_ERR_NO_UNLIMITED_DIM)
+ write(0,*) 'MPAS IO Error: No unlimited dimension found in dataset'
+ case (MPAS_IO_ERR_UNIMPLEMENTED)
+ write(0,*) 'MPAS IO Error: Unimplemented functionality'
+ case default
+ write(0,*) 'MPAS IO Error: Unrecognized error code...'
+ end select
+
+ if (fatal .and. (ierr /= MPAS_IO_NOERR)) call mpas_dmpar_abort(local_dminfo)
+
+ end subroutine MPAS_io_err_mesg
+
+end module mpas_io
Modified: branches/atmos_physics/src/framework/mpas_io_input.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_input.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_io_input.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -6,6 +6,7 @@
use mpas_sort
use mpas_configure
use mpas_timekeeping
+ use mpas_io_streams
#ifdef HAVE_ZOLTAN
@@ -15,37 +16,17 @@
integer, parameter :: STREAM_INPUT=1, STREAM_SFC=2, STREAM_RESTART=3
type io_input_object
- character (len=1024) :: filename
+ character (len=StrKIND) :: filename
integer :: rd_ncid
integer :: stream
integer :: time
-#include "io_input_obj_decls.inc"
+ type (MPAS_Stream_type) :: io_stream
+
end type io_input_object
- interface mpas_io_input_field
- module procedure mpas_io_input_field0d_real
- module procedure mpas_io_input_field1d_real
- module procedure mpas_io_input_field2d_real
- module procedure mpas_io_input_field3d_real
- module procedure mpas_io_input_field1d_integer
- module procedure mpas_io_input_field2d_integer
- module procedure mpas_io_input_field0d_char
- module procedure mpas_io_input_field1d_char
- end interface mpas_io_input_field
-
- interface mpas_io_input_field_time
- module procedure mpas_io_input_field0d_real_time
- module procedure mpas_io_input_field1d_real_time
- module procedure mpas_io_input_field2d_real_time
- module procedure mpas_io_input_field3d_real_time
- module procedure mpas_io_input_field1d_integer_time
- module procedure mpas_io_input_field0d_char_time
- module procedure mpas_io_input_field1d_char_time
- end interface mpas_io_input_field_time
-
type (exchange_list), pointer :: sendCellList, recvCellList
type (exchange_list), pointer :: sendEdgeList, recvEdgeList
type (exchange_list), pointer :: sendVertexList, recvVertexList
@@ -70,8 +51,12 @@
type (io_input_object) :: input_obj
#include "dim_decls.inc"
- character (len=16) :: c_on_a_sphere
+ character (len=StrKIND) :: c_on_a_sphere
real (kind=RKIND) :: r_sphere_radius
+
+ integer :: ierr
+ integer, dimension(:), pointer :: readIndices
+ type (MPAS_IO_Handle_type) :: inputHandle
type (field1dInteger) :: indexToCellIDField
type (field1dInteger) :: indexToEdgeIDField
@@ -93,12 +78,15 @@
type (field1DChar) :: xtime
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
+ integer, dimension(:), pointer :: indexToCellID_0Halo
+ integer, dimension(:), pointer :: nEdgesOnCell_0Halo
integer, dimension(:,:), pointer :: cellsOnCell_0Halo
-
+
+ integer, dimension(:), pointer :: nEdgesOnCell_2Halo
+
integer, dimension(:,:), pointer :: edgesOnCell_2Halo
integer, dimension(:,:), pointer :: verticesOnCell_2Halo
+
integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
@@ -115,6 +103,7 @@
#endif
integer, dimension(:), pointer :: local_cell_list, local_edge_list, local_vertex_list
+ integer, dimension(:), pointer :: block_id, block_start, block_count
integer, dimension(:), pointer :: local_vertlevel_list, needed_vertlevel_list
integer :: nlocal_edges, nlocal_vertices
type (exchange_list), pointer :: send1Halo, recv1Halo
@@ -127,9 +116,25 @@
type (MPAS_Time_type) :: sliceTime
type (MPAS_TimeInterval_type) :: timeDiff
type (MPAS_TimeInterval_type) :: minTimeDiff
- character(len=32) :: timeStamp
- character(len=1024) :: filename
+ character(len=StrKIND) :: timeStamp
+ character(len=StrKIND) :: filename
+ integer, parameter :: nHalos = 2
+ integer, dimension(nHalos+1) :: nCellsCumulative ! own cells, halo 1 cells, halo 2 cells
+ integer, dimension(nHalos+2) :: nEdgesCumulative ! own edges, own cell's edges, halo 1 edges, halo 2 edges
+ integer, dimension(nHalos+2) :: nVerticesCumulative ! own vertices, own cell's vertices, halo 1 vertices, halo 2 vertices
+
+ integer, dimension(nHalos) :: nCellsHalo ! halo 1 cells, halo 2 cells
+ integer, dimension(nHalos+1) :: nEdgesHalo ! own cell's edges, halo 1 edges, halo 2 edges
+ integer, dimension(nHalos+1) :: nVerticesHalo ! own cell's vertices, halo 1 vertices, halo 2 vertices
+
+ integer, dimension(:), pointer :: tempIDs
+ integer :: ntempIDs, offset
+
+ integer :: nHalo, nOwnCells, nOwnEdges, nOwnVertices, cellCount, edgeCount, vertexCount, iEdge, iVertex
+ type (hashtable) :: edgeHash, vertexHash
+
+
if (config_do_restart) then
! this get followed by set is to ensure that the time is in standard format
@@ -144,7 +149,19 @@
input_obj % filename = trim(config_input_name)
input_obj % stream = STREAM_INPUT
end if
- call mpas_io_input_init(input_obj, domain % dminfo)
+ inputHandle = MPAS_io_open(trim(input_obj % filename), MPAS_IO_READ, MPAS_IO_PNETCDF, ierr)
+ if (ierr /= MPAS_IO_NOERR) then
+ write(0,*) ' '
+ if (input_obj % stream == STREAM_RESTART) then
+ write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_INPUT) then
+ write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_SFC) then
+ write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
+ end if
+ write(0,*) ' '
+ call mpas_dmpar_abort(domain % dminfo)
+ end if
!
@@ -157,10 +174,10 @@
! from the input file
!
call mpas_dmpar_get_index_range(domain % dminfo, 1, nCells, readCellStart, readCellEnd)
- nReadCells = readCellEnd - readCellStart + 1
+ nReadCells = readCellEnd - readCellStart + 1
call mpas_dmpar_get_index_range(domain % dminfo, 1, nEdges, readEdgeStart, readEdgeEnd)
- nReadEdges = readEdgeEnd - readEdgeStart + 1
+ nReadEdges = readEdgeEnd - readEdgeStart + 1
call mpas_dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
nReadVertices = readVertexEnd - readVertexStart + 1
@@ -181,7 +198,13 @@
indexToCellIDField % ioinfo % start(1) = readCellStart
indexToCellIDField % ioinfo % count(1) = nReadCells
allocate(indexToCellIDField % array(nReadCells))
- call mpas_io_input_field(input_obj, indexToCellIDField)
+ allocate(readIndices(nReadCells))
+ do i=1,nReadCells
+ readIndices(i) = i + readCellStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellIDField % array, ierr)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
@@ -191,7 +214,9 @@
xCellField % ioinfo % start(1) = readCellStart
xCellField % ioinfo % count(1) = nReadCells
allocate(xCellField % array(nReadCells))
- call mpas_io_input_field(input_obj, xCellField)
+ call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xCell', xCellField % array, ierr)
! Cell y-coordinates (in 3d Cartesian space)
allocate(yCellField % ioinfo)
@@ -199,7 +224,9 @@
yCellField % ioinfo % start(1) = readCellStart
yCellField % ioinfo % count(1) = nReadCells
allocate(yCellField % array(nReadCells))
- call mpas_io_input_field(input_obj, yCellField)
+ call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yCell', yCellField % array, ierr)
! Cell z-coordinates (in 3d Cartesian space)
allocate(zCellField % ioinfo)
@@ -207,9 +234,12 @@
zCellField % ioinfo % start(1) = readCellStart
zCellField % ioinfo % count(1) = nReadCells
allocate(zCellField % array(nReadCells))
- call mpas_io_input_field(input_obj, zCellField)
+ call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zCell', zCellField % array, ierr)
#endif
#endif
+ deallocate(readIndices)
! Global edge indices
@@ -218,7 +248,14 @@
indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
indexToEdgeIDField % ioinfo % count(1) = nReadEdges
allocate(indexToEdgeIDField % array(nReadEdges))
- call mpas_io_input_field(input_obj, indexToEdgeIDField)
+ allocate(indexToEdgeIDField % array(nReadEdges))
+ allocate(readIndices(nReadEdges))
+ do i=1,nReadEdges
+ readIndices(i) = i + readEdgeStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeIDField % array, ierr)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
@@ -228,7 +265,9 @@
xEdgeField % ioinfo % start(1) = readEdgeStart
xEdgeField % ioinfo % count(1) = nReadEdges
allocate(xEdgeField % array(nReadEdges))
- call mpas_io_input_field(input_obj, xEdgeField)
+ call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xEdge', xEdgeField % array, ierr)
! Edge y-coordinates (in 3d Cartesian space)
allocate(yEdgeField % ioinfo)
@@ -236,7 +275,9 @@
yEdgeField % ioinfo % start(1) = readEdgeStart
yEdgeField % ioinfo % count(1) = nReadEdges
allocate(yEdgeField % array(nReadEdges))
- call mpas_io_input_field(input_obj, yEdgeField)
+ call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yEdge', yEdgeField % array, ierr)
! Edge z-coordinates (in 3d Cartesian space)
allocate(zEdgeField % ioinfo)
@@ -244,17 +285,27 @@
zEdgeField % ioinfo % start(1) = readEdgeStart
zEdgeField % ioinfo % count(1) = nReadEdges
allocate(zEdgeField % array(nReadEdges))
- call mpas_io_input_field(input_obj, zEdgeField)
+ call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zEdge', zEdgeField % array, ierr)
#endif
#endif
+ deallocate(readIndices)
+
! Global vertex indices
allocate(indexToVertexIDField % ioinfo)
indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
indexToVertexIDField % ioinfo % start(1) = readVertexStart
indexToVertexIDField % ioinfo % count(1) = nReadVertices
allocate(indexToVertexIDField % array(nReadVertices))
- call mpas_io_input_field(input_obj, indexToVertexIDField)
+ allocate(readIndices(nReadVertices))
+ do i=1,nReadVertices
+ readIndices(i) = i + readVertexStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexIDField % array, ierr)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
@@ -264,7 +315,9 @@
xVertexField % ioinfo % start(1) = readVertexStart
xVertexField % ioinfo % count(1) = nReadVertices
allocate(xVertexField % array(nReadVertices))
- call mpas_io_input_field(input_obj, xVertexField)
+ call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xVertex', xVertexField % array, ierr)
! Vertex y-coordinates (in 3d Cartesian space)
allocate(yVertexField % ioinfo)
@@ -272,7 +325,9 @@
yVertexField % ioinfo % start(1) = readVertexStart
yVertexField % ioinfo % count(1) = nReadVertices
allocate(yVertexField % array(nReadVertices))
- call mpas_io_input_field(input_obj, yVertexField)
+ call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yVertex', yVertexField % array, ierr)
! Vertex z-coordinates (in 3d Cartesian space)
allocate(zVertexField % ioinfo)
@@ -280,9 +335,12 @@
zVertexField % ioinfo % start(1) = readVertexStart
zVertexField % ioinfo % count(1) = nReadVertices
allocate(zVertexField % array(nReadVertices))
- call mpas_io_input_field(input_obj, zVertexField)
+ call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zVertex', zVertexField % array, ierr)
#endif
#endif
+ deallocate(readIndices)
! Number of cell/edges/vertices adjacent to each cell
allocate(nEdgesOnCellField % ioinfo)
@@ -290,7 +348,13 @@
nEdgesOnCellField % ioinfo % start(1) = readCellStart
nEdgesOnCellField % ioinfo % count(1) = nReadCells
allocate(nEdgesOnCellField % array(nReadCells))
- call mpas_io_input_field(input_obj, nEdgesOnCellField)
+ allocate(readIndices(nReadCells))
+ do i=1,nReadCells
+ readIndices(i) = i + readCellStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCellField % array, ierr)
! Global indices of cells adjacent to each cell
allocate(cellsOnCellField % ioinfo)
@@ -300,7 +364,9 @@
cellsOnCellField % ioinfo % count(1) = maxEdges
cellsOnCellField % ioinfo % count(2) = nReadCells
allocate(cellsOnCellField % array(maxEdges,nReadCells))
- call mpas_io_input_field(input_obj, cellsOnCellField)
+ call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCellField % array, ierr)
! Global indices of edges adjacent to each cell
allocate(edgesOnCellField % ioinfo)
@@ -310,7 +376,9 @@
edgesOnCellField % ioinfo % count(1) = maxEdges
edgesOnCellField % ioinfo % count(2) = nReadCells
allocate(edgesOnCellField % array(maxEdges,nReadCells))
- call mpas_io_input_field(input_obj, edgesOnCellField)
+ call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCellField % array, ierr)
! Global indices of vertices adjacent to each cell
allocate(verticesOnCellField % ioinfo)
@@ -320,7 +388,10 @@
verticesOnCellField % ioinfo % count(1) = maxEdges
verticesOnCellField % ioinfo % count(2) = nReadCells
allocate(verticesOnCellField % array(maxEdges,nReadCells))
- call mpas_io_input_field(input_obj, verticesOnCellField)
+ call MPAS_io_inq_var(inputHandle, 'verticesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCellField % array, ierr)
+ deallocate(readIndices)
! Global indices of cells adjacent to each edge
! used for determining which edges are owned by a block, where
@@ -332,7 +403,14 @@
cellsOnEdgeField % ioinfo % count(1) = 2
cellsOnEdgeField % ioinfo % count(2) = nReadEdges
allocate(cellsOnEdgeField % array(2,nReadEdges))
- call mpas_io_input_field(input_obj, cellsOnEdgeField)
+ allocate(readIndices(nReadEdges))
+ do i=1,nReadEdges
+ readIndices(i) = i + readEdgeStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'cellsOnEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdgeField % array, ierr)
+ deallocate(readIndices)
! Global indices of cells adjacent to each vertex
! used for determining which vertices are owned by a block, where
@@ -344,7 +422,14 @@
cellsOnVertexField % ioinfo % count(1) = vertexDegree
cellsOnVertexField % ioinfo % count(2) = nReadVertices
allocate(cellsOnVertexField % array(vertexDegree,nReadVertices))
- call mpas_io_input_field(input_obj, cellsOnVertexField)
+ allocate(readIndices(nReadVertices))
+ do i=1,nReadVertices
+ readIndices(i) = i + readVertexStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertexField % array, ierr)
+ deallocate(readIndices)
!
@@ -373,7 +458,7 @@
! Determine which cells are owned by this process
- call mpas_block_decomp_cells_for_proc(domain % dminfo, partial_global_graph_info, local_cell_list)
+ call mpas_block_decomp_cells_for_proc(domain % dminfo, partial_global_graph_info, local_cell_list, block_id, block_start, block_count)
deallocate(partial_global_graph_info % vertexID)
deallocate(partial_global_graph_info % nAdjacent)
@@ -489,6 +574,8 @@
block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
+ nOwnCells = block_graph_2Halo % nVertices
+
#ifdef HAVE_ZOLTAN
#ifdef _MPI
!! For now, only use Zoltan with MPI
@@ -524,6 +611,7 @@
! on each cell and which vertices are on each cell from the processes that read these
! fields for each cell to the processes that own the cells
!
+ allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
@@ -532,6 +620,10 @@
indexToCellIDField % array, block_graph_2Halo % vertexID, &
sendCellList, recvCellList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
+ size(indexToCellIDField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
sendCellList, recvCellList)
@@ -576,15 +668,90 @@
sendVertexList, recvVertexList)
- call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+ call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+ block_graph_2Halo % vertexID(1:nOwnCells), &
2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
- call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+
+ call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+ block_graph_2Halo % vertexID(1:nOwnCells), &
vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
+ !------- set owned and halo cell indices -------!
+
+ nCellsCumulative(1) = nOwnCells
+ nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
+ nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
- ! At this point, local_edge_list(1;ghostEdgeStart-1) contains all of the owned edges for this block
+ !------- determin the perimeter and owned edges of own cells and halos -------!
+
+ nOwnEdges = ghostEdgeStart-1
+ nOwnVertices = ghostVertexStart-1
+
+ ! skip the own edges found at the beginning of local_edge_list
+ call mpas_hash_init(edgeHash)
+ do i=1,nOwnEdges
+ call mpas_hash_insert(edgeHash, local_edge_list(i))
+ end do
+
+ ! skip the own vertices found at the beginning of local_vertex_list
+ call mpas_hash_init(vertexHash)
+ do i=1,nOwnVertices
+ call mpas_hash_insert(vertexHash, local_vertex_list(i))
+ end do
+
+ cellCount = 1 !tracks the index of the local cell array
+ edgeCount = nOwnEdges !tracks where to insert the next local edge
+ vertexCount = nOwnVertices !tracks where to insert the next local vertex
+
+ nEdgesCumulative(1) = nOwnEdges
+ nVerticesCumulative(1) = nOwnVertices
+
+ !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
+ do i = 1, nHalos + 1 ! for the own cells and each halo...
+ do j = cellCount, nCellsCumulative(i)
+
+ ! the number of edges on a cell is same to the number of vertices, and therefore
+ ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
+ do k = 1, nEdgesOnCell_2Halo(j)
+ iEdge = edgesOnCell_2Halo(k,j)
+ if (.not. mpas_hash_search(edgeHash, iEdge)) then
+ edgeCount = edgeCount + 1
+ local_edge_list(edgeCount) = iEdge
+ call mpas_hash_insert(edgeHash, iEdge)
+ end if
+
+ iVertex = verticesOnCell_2Halo(k,j)
+ if (.not. mpas_hash_search(vertexHash, iVertex)) then
+ vertexCount = vertexCount + 1
+ local_vertex_list(vertexCount) = iVertex
+ call mpas_hash_insert(vertexHash, iVertex)
+ end if
+ end do
+
+ end do
+
+ cellCount = nCellsCumulative(i) + 1
+ nEdgesCumulative(i+1) = edgeCount
+ nVerticesCumulative(i+1) = vertexCount
+ end do
+
+ do i = 1, nHalos
+ nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
+ end do
+
+ do i = 1, nHalos + 1
+ nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
+ end do
+
+ do i = 1, nHalos + 1
+ nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
+ end do
+
+ call mpas_hash_destroy(edgeHash)
+ call mpas_hash_destroy(vertexHash)
+
+
+ ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
@@ -656,13 +823,13 @@
!!!!!!!!!!!!!!!!!!
!! Reorder edges
!!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_edges(ghostEdgeStart-1,local_edge_list,3,xEdge,yEdge,zEdge)
+ call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!
!! Reorder vertices
!!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_verts(ghostVertexStart-1,local_vertex_list,3,xVertex,yVertex,zVertex)
+ call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
!!!!!!!!!!!!!!!!!!
deallocate(sendEdgeList % list)
@@ -730,73 +897,71 @@
nEdges = nlocal_edges
nVertices = nlocal_vertices
- call mpas_allocate_block(domain % blocklist, domain, &
+ call mpas_allocate_block(domain % blocklist, domain, domain%dminfo%my_proc_id, &
#include "dim_dummy_args.inc"
)
+!!!!!!!!!!MGD HERE WE NEED TO READ IN indexTo*ID fields !!!!!!!!!!!!!!!!!
+ call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', local_cell_list(1:nOwnCells), ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToCellID', domain % blocklist % mesh % indexToCellID % array, ierr)
+
+ call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', local_edge_list(1:nOwnEdges), ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToEdgeID', domain % blocklist % mesh % indexToEdgeID % array, ierr)
+
+ call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', local_vertex_list(1:nOwnVertices), ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToVertexID', domain % blocklist % mesh % indexToVertexID % array, ierr)
+
+ domain % blocklist % mesh % nCellsSolve = nOwnCells
+ domain % blocklist % mesh % nEdgesSolve = nOwnEdges
+ domain % blocklist % mesh % nVerticesSolve = nOwnVertices
+ domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
+
+ call mpas_io_input_init(input_obj, domain % blocklist, domain % dminfo)
+
+
!
! Read attributes
!
- call mpas_io_input_get_att_text(input_obj, 'on_a_sphere', c_on_a_sphere)
- call mpas_io_input_get_att_real(input_obj, 'sphere_radius', r_sphere_radius)
- if (index(c_on_a_sphere, 'YES') /= 0) then
+ call MPAS_readStreamAtt(input_obj % io_stream, 'sphere_radius', r_sphere_radius, ierr)
+ if (ierr /= MPAS_STREAM_NOERR) then
+ write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting sphere_radius to 1.0'
+ domain % blocklist % mesh % sphere_radius = 1.0
+ else
+ domain % blocklist % mesh % sphere_radius = r_sphere_radius
+ end if
+
+ call MPAS_readStreamAtt(input_obj % io_stream, 'on_a_sphere', c_on_a_sphere, ierr)
+ if (ierr /= MPAS_STREAM_NOERR) then
+ write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting on_a_sphere to ''YES'''
domain % blocklist % mesh % on_a_sphere = .true.
else
- domain % blocklist % mesh % on_a_sphere = .false.
+ if (index(c_on_a_sphere, 'YES') /= 0) then
+ domain % blocklist % mesh % on_a_sphere = .true.
+ else
+ domain % blocklist % mesh % on_a_sphere = .false.
+ end if
end if
- domain % blocklist % mesh % sphere_radius = r_sphere_radius
if (.not. config_do_restart) then
input_obj % time = 1
else
- input_obj % time = 1
-
!
! If doing a restart, we need to decide which time slice to read from the
! restart file
!
- if (input_obj % rdLocalTime <= 0) then
- write(0,*) 'Error: Couldn''t find any times in restart file.'
+ input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+ if (ierr == MPAS_IO_ERR) then
+ write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
call mpas_dmpar_abort(domain % dminfo)
end if
- if (domain % dminfo % my_proc_id == IO_NODE) then
- allocate(xtime % ioinfo)
- xtime % ioinfo % start(1) = 1
- xtime % ioinfo % count(1) = input_obj % rdLocalTime
- allocate(xtime % array(input_obj % rdLocalTime))
+write(0,*) 'MGD DEBUGGING time = ', input_obj % time
+ write(0,*) 'Restarting model from time ', trim(timeStamp)
- xtime % ioinfo % fieldName = 'xtime'
- call mpas_io_input_field(input_obj, xtime)
-
- call mpas_set_timeInterval(interval=minTimeDiff, DD=10000)
- call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
-
- do i=1,input_obj % rdLocalTime
- call mpas_set_time(curr_time=sliceTime, dateTimeString=xtime % array(i))
- timeDiff = abs(sliceTime - startTime)
- if (timeDiff < minTimeDiff) then
- minTimeDiff = timeDiff
- input_obj % time = i
- end if
- end do
-
- ! require restart time to exactly match start time (this error should never be reached as we have by this point opened the restart file with a name containing the startTime)
- if(sliceTime /= startTime) then
- write(0,*) "Error: restart file ", filename, " did not contain time ", config_start_time
- call mpas_dmpar_abort(domain % dminfo)
- end if
-
- timeStamp = xtime % array(input_obj % time)
-
- deallocate(xtime % ioinfo)
- deallocate(xtime % array)
- end if
-
- call mpas_dmpar_bcast_int(domain % dminfo, input_obj % time)
- call mpas_dmpar_bcast_char(domain % dminfo, timeStamp)
-
- write(0,*) 'Restarting model from time ', timeStamp
-
end if
@@ -810,17 +975,130 @@
! processes that own those indices based on
! {send,recv}{Cell,Edge,Vertex,VertLevel}List
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- call mpas_read_and_distribute_fields(domain % dminfo, input_obj, domain % blocklist, &
- readCellStart, nReadCells, readEdgeStart, nReadEdges, readVertexStart, nReadVertices, &
- readVertLevelStart, nReadVertLevels, &
- sendCellList, recvCellList, sendEdgeList, recvEdgeList, sendVertexList, recvVertexList, &
- sendVertLevelList, recvVertLevelList)
+ call mpas_read_and_distribute_fields(input_obj)
-
call mpas_io_input_finalize(input_obj, domain % dminfo)
+ call MPAS_io_close(inputHandle, ierr)
+
!
+ ! Work out halo exchange lists for cells, edges, and vertices
+ ! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
+ ! the list, since Fortran does not allow arrays of pointers
+ !
+
+ !--------- Create Cell Exchange Lists ---------!
+
+ ! pass in neededList of ownedCells and halo layer 1 cells
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnCells, nCellsCumulative(2), &
+ block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
+ domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
+
+ ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
+ offset = nCellsHalo(1)
+ nTempIDs = nOwnCells + nCellsHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
+ tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnCells, nTempIDs, &
+ block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
+ domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ !--------- Create Edge Exchange Lists ---------!
+
+ ! pass in neededList of ownedEdges and ownedCell perimeter edges
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnEdges, nEdgesCumulative(2), &
+ local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
+ domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
+
+ ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
+ offset = nEdgesHalo(1)
+ nTempIDs = nOwnEdges + nEdgesHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+ tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnEdges, nTempIDs, &
+ local_edge_list(1:nOwnEdges), tempIDs, &
+ domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+ ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
+ offset = nEdgesHalo(1) + nEdgesHalo(2)
+ nTempIDs = nOwnEdges + nEdgesHalo(3)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+ tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnEdges, nTempIDs, &
+ local_edge_list(1:nOwnEdges), tempIDs, &
+ domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ !--------- Create Vertex Exchange Lists ---------!
+
+
+ ! pass in neededList of ownedVertices and ownedCell perimeter vertices
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nVerticesCumulative(2), &
+ local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
+ domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
+
+ ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
+ offset = nVerticesHalo(1)
+ nTempIDs = nOwnVertices + nVerticesHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+ tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nTempIDs, &
+ local_vertex_list(1:nOwnVertices), tempIDs, &
+ domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+ ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
+ offset = nVerticesHalo(1) + nVerticesHalo(2)
+ nTempIDs = nOwnVertices + nVerticesHalo(3)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+ tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nTempIDs, &
+ local_vertex_list(1:nOwnVertices), tempIDs, &
+ domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ domain % blocklist % mesh % nCellsSolve = nOwnCells
+ domain % blocklist % mesh % nEdgesSolve = nOwnEdges
+ domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
+ domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
+
+ ! Link the sendList and recvList pointers in each field type to the appropriate lists
+ ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
+ ! link blocks of fields to eachother
+ call mpas_create_field_links(domain % blocklist)
+
+
+ !
+ ! Exchange halos for all of the fields that were read from the input file
+ !
+ call mpas_exch_input_field_halos(domain, input_obj)
+
+
+ !
! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
!
allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
@@ -855,7 +1133,6 @@
domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
else
domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnCell % array(j,i) = 0
end if
k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
@@ -864,7 +1141,6 @@
domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
else
domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnCell % array(j,i) = 0
end if
k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
@@ -873,7 +1149,6 @@
domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
else
domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
-! domain % blocklist % mesh % verticesOnCell % array(j,i) = 0
end if
end do
@@ -888,7 +1163,6 @@
domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
else
domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnEdge % array(j,i) = 0
end if
k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
@@ -897,7 +1171,6 @@
domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
else
domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
-! domain % blocklist % mesh % verticesOnEdge % array(j,i) = 0
end if
end do
@@ -910,7 +1183,6 @@
domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
else
domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnEdge % array(j,i) = 0
end if
end do
@@ -925,7 +1197,6 @@
domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
else
domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnVertex % array(j,i) = 0
end if
k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
@@ -934,7 +1205,6 @@
domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
else
domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnVertex % array(j,i) = 0
end if
end do
@@ -944,30 +1214,6 @@
deallocate(edgeIDSorted)
deallocate(vertexIDSorted)
-
- !
- ! Work out halo exchange lists for cells, edges, and vertices
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_2Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), block_graph_2Halo % vertexID, &
- domain % blocklist % parinfo % cellsToSend, domain % blocklist % parinfo % cellsToRecv)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- ghostEdgeStart-1, nlocal_edges, &
- local_edge_list(1:ghostEdgeStart-1), local_edge_list, &
- domain % blocklist % parinfo % edgesToSend, domain % blocklist % parinfo % edgesToRecv)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- ghostVertexStart-1, nlocal_vertices, &
- local_vertex_list(1:ghostVertexStart-1), local_vertex_list, &
- domain % blocklist % parinfo % verticesToSend, domain % blocklist % parinfo % verticesToRecv)
-
- domain % blocklist % mesh % nCellsSolve = block_graph_2Halo % nVertices
- domain % blocklist % mesh % nEdgesSolve = ghostEdgeStart-1
- domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
!
! Deallocate fields, graphs, and other memory
@@ -1003,6 +1249,7 @@
deallocate(indexToCellID_0Halo)
deallocate(cellsOnEdge_2Halo)
deallocate(cellsOnVertex_2Halo)
+ deallocate(nEdgesOnCell_2Halo)
deallocate(edgesOnCell_2Halo)
deallocate(verticesOnCell_2Halo)
deallocate(block_graph_0Halo % vertexID)
@@ -1038,92 +1285,41 @@
end if
end do
+ do i=1,len_trim(filename)
+ if (filename(i:i) == ':') filename(i:i) = '.'
+ end do
+
end subroutine mpas_insert_string_suffix
- subroutine mpas_read_and_distribute_fields(dminfo, input_obj, block, &
- readCellsStart, readCellsCount, &
- readEdgesStart, readEdgesCount, &
- readVerticesStart, readVerticesCount, &
- readVertLevelsStart, readVertLevelsCount, &
- sendCellsList, recvCellsList, &
- sendEdgesList, recvEdgesList, &
- sendVerticesList, recvVerticesList, &
- sendVertLevelsList, recvVertLevelsList)
+ subroutine mpas_read_and_distribute_fields(input_obj)
implicit none
- type (dm_info), intent(in) :: dminfo
- type (io_input_object), intent(in) :: input_obj
- type (block_type), intent(inout) :: block
- integer, intent(in) :: readCellsStart, readCellsCount, readEdgesStart, readEdgesCount, readVerticesStart, readVerticesCount
- integer, intent(in) :: readVertLevelsStart, readVertLevelsCount
- type (exchange_list), pointer :: sendCellsList, recvCellsList
- type (exchange_list), pointer :: sendEdgesList, recvEdgesList
- type (exchange_list), pointer :: sendVerticesList, recvVerticesList
- type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
+ type (io_input_object), intent(inout) :: input_obj
- type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
- type (field0dReal) :: real0d
- type (field1dReal) :: real1d
- type (field2dReal) :: real2d
- type (field3dReal) :: real3d
- type (field0dChar) :: char0d
- type (field1dChar) :: char1d
+ integer :: ierr
- integer :: i1, i2, i3, i4
- integer, dimension(:), pointer :: super_int1d
- integer, dimension(:,:), pointer :: super_int2d
- real (kind=RKIND) :: super_real0d
- real (kind=RKIND), dimension(:), pointer :: super_real1d
- real (kind=RKIND), dimension(:,:), pointer :: super_real2d
- real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
- character (len=64) :: super_char0d
- character (len=64), dimension(:), pointer :: super_char1d
+ call MPAS_readStream(input_obj % io_stream, 1, ierr)
- integer :: i, k
-#include "nondecomp_dims.inc"
-
- allocate(int1d % ioinfo)
- allocate(int2d % ioinfo)
- allocate(real0d % ioinfo)
- allocate(real1d % ioinfo)
- allocate(real2d % ioinfo)
- allocate(real3d % ioinfo)
- allocate(char0d % ioinfo)
- allocate(char1d % ioinfo)
-
-
-#include "io_input_fields.inc"
-
-#include "nondecomp_dims_dealloc.inc"
-
end subroutine mpas_read_and_distribute_fields
- subroutine mpas_io_input_init(input_obj, dminfo)
+ subroutine mpas_io_input_init(input_obj, blocklist, dminfo)
implicit none
type (io_input_object), intent(inout) :: input_obj
+ type (block_type), intent(in) :: blocklist
type (dm_info), intent(in) :: dminfo
- include 'netcdf.inc'
-
integer :: nferr
-
-#ifdef OFFSET64BIT
- nferr = nf_open(trim(input_obj % filename), ior(NF_SHARE,NF_64BIT_OFFSET), input_obj % rd_ncid)
-#else
- nferr = nf_open(trim(input_obj % filename), NF_SHARE, input_obj % rd_ncid)
-#endif
-
- if (nferr /= NF_NOERR) then
+ call MPAS_createStream(input_obj % io_stream, trim(input_obj % filename), MPAS_IO_PNETCDF, MPAS_IO_READ, 1, nferr)
+ if (nferr /= MPAS_STREAM_NOERR) then
write(0,*) ' '
if (input_obj % stream == STREAM_RESTART) then
write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
@@ -1135,9 +1331,9 @@
write(0,*) ' '
call mpas_dmpar_abort(dminfo)
end if
-
-#include "netcdf_read_ids.inc"
+#include "add_input_fields.inc"
+
end subroutine mpas_io_input_init
@@ -1149,7 +1345,7 @@
character (len=*), intent(in) :: dimname
integer, intent(out) :: dimsize
-#include "get_dimension_by_name.inc"
+!include "get_dimension_by_name.inc"
end subroutine mpas_io_input_get_dimension
@@ -1162,24 +1358,8 @@
character (len=*), intent(in) :: attname
real (kind=RKIND), intent(out) :: attvalue
- include 'netcdf.inc'
-
integer :: nferr
- if (RKIND == 8) then
- nferr = nf_get_att_double(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- else
- nferr = nf_get_att_real(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- end if
- if (nferr /= NF_NOERR) then
- write(0,*) 'Warning: Attribute '//trim(attname)//&
- ' not found in '//trim(input_obj % filename)
- if (index(attname, 'sphere_radius') /= 0) then
- write(0,*) ' Setting '//trim(attname)//' to 1.0'
- attvalue = 1.0
- end if
- end if
-
end subroutine mpas_io_input_get_att_real
@@ -1191,448 +1371,23 @@
character (len=*), intent(in) :: attname
character (len=*), intent(out) :: attvalue
- include 'netcdf.inc'
-
integer :: nferr
- nferr = nf_get_att_text(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- if (nferr /= NF_NOERR) then
- write(0,*) 'Warning: Attribute '//trim(attname)//&
- ' not found in '//trim(input_obj % filename)
- if (index(attname, 'on_a_sphere') /= 0) then
- write(0,*) ' Setting '//trim(attname)//' to ''YES'''
- attvalue = 'YES'
- end if
- end if
-
end subroutine mpas_io_input_get_att_text
- subroutine mpas_io_input_field0d_real(input_obj, field)
-
- implicit none
+ subroutine mpas_exch_input_field_halos(domain, input_obj)
- type (io_input_object), intent(in) :: input_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = 1
- count1(1) = 1
-
-#include "input_field0dreal.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#endif
-
- end subroutine mpas_io_input_field0d_real
-
-
- subroutine mpas_io_input_field1d_real(input_obj, field)
-
implicit none
- type (io_input_object), intent(in) :: input_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
+ type (domain_type), intent(inout) :: domain
+ type (io_input_object), intent(inout) :: input_obj
- !
- ! Special case: we may want to read the xtime variable across the
- ! time dimension as a 1d array.
- !
- if (trim(field % ioinfo % fieldName) == 'xtime') then
- varID = input_obj % rdVarIDxtime
- end if
-
-#include "input_field1dreal.inc"
+#include "exchange_input_field_halos.inc"
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % array)
-#endif
-
- end subroutine mpas_io_input_field1d_real
+ end subroutine mpas_exch_input_field_halos
- subroutine mpas_io_input_field2d_real(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "input_field2dreal.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
-#endif
-
- end subroutine mpas_io_input_field2d_real
-
-
- subroutine mpas_io_input_field3d_real(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = field % ioinfo % start(3)
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = field % ioinfo % count(3)
-
-#include "input_field3dreal.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
-#endif
-
- end subroutine mpas_io_input_field3d_real
-
-
- subroutine mpas_io_input_field0d_real_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = input_obj % time
- count1(1) = 1
-
-#include "input_field0dreal_time.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#endif
-
- end subroutine mpas_io_input_field0d_real_time
-
-
- subroutine mpas_io_input_field1d_real_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = input_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "input_field1dreal_time.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
-#endif
-
- end subroutine mpas_io_input_field1d_real_time
-
-
- subroutine mpas_io_input_field2d_real_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = input_obj % time
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = 1
-
-#include "input_field2dreal_time.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
-#endif
-
- end subroutine mpas_io_input_field2d_real_time
-
-
- subroutine mpas_io_input_field3d_real_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(4) :: start4, count4
-
- start4(1) = field % ioinfo % start(1)
- start4(2) = field % ioinfo % start(2)
- start4(3) = field % ioinfo % start(3)
- start4(4) = input_obj % time
- count4(1) = field % ioinfo % count(1)
- count4(2) = field % ioinfo % count(2)
- count4(3) = field % ioinfo % count(3)
- count4(4) = 1
-
-#include "input_field3dreal_time.inc"
-
-#if SINGLE_PRECISION
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start4, count4, field % array)
-#else
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start4, count4, field % array)
-#endif
-
- end subroutine mpas_io_input_field3d_real_time
-
-
- subroutine mpas_io_input_field1d_integer(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "input_field1dinteger.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start1, count1, field % array)
-
- end subroutine mpas_io_input_field1d_integer
-
-
- subroutine mpas_io_input_field2d_integer(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "input_field2dinteger.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine mpas_io_input_field2d_integer
-
-
- subroutine mpas_io_input_field1d_integer_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = input_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "input_field1dinteger_time.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine mpas_io_input_field1d_integer_time
-
-
- subroutine mpas_io_input_field0d_char_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = input_obj % time
- count1(2) = 1
-
-#include "input_field0dchar_time.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-
- end subroutine mpas_io_input_field0d_char_time
-
-
- subroutine mpas_io_input_field1d_char_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start2, count2
-
- start2(1) = 1
- start2(2) = field % ioinfo % start(1)
- start2(3) = input_obj % time
- count2(1) = 64
- count2(2) = field % ioinfo % count(1)
- count2(3) = 1
-
-#include "input_field1dchar_time.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine mpas_io_input_field1d_char_time
-
-
- subroutine mpas_io_input_field0d_char(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = 1
- count1(2) = 1
-
-#include "input_field0dchar.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-
- end subroutine mpas_io_input_field0d_char
-
-
- subroutine mpas_io_input_field1d_char(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = field % ioinfo % start(1)
- count1(2) = field % ioinfo % count(1)
-
- !
- ! Special case: we may want to read the xtime variable across the
- ! time dimension as a 1d array.
- !
- if (trim(field % ioinfo % fieldName) == 'xtime') then
- varID = input_obj % rdVarIDxtime
- end if
-
-#include "input_field1dchar.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % array)
-
- end subroutine mpas_io_input_field1d_char
-
-
subroutine mpas_io_input_finalize(input_obj, dminfo)
implicit none
@@ -1640,11 +1395,9 @@
type (io_input_object), intent(inout) :: input_obj
type (dm_info), intent(in) :: dminfo
- include 'netcdf.inc'
-
integer :: nferr
- nferr = nf_close(input_obj % rd_ncid)
+ call MPAS_closeStream(input_obj % io_stream, nferr)
end subroutine mpas_io_input_finalize
Modified: branches/atmos_physics/src/framework/mpas_io_output.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_output.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_io_output.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -4,52 +4,22 @@
use mpas_dmpar
use mpas_sort
use mpas_configure
+ use mpas_io_streams
integer, parameter :: OUTPUT = 1
integer, parameter :: RESTART = 2
integer, parameter :: SFC = 3
type io_output_object
- integer :: wr_ncid
- character (len=1024) :: filename
+ character (len=StrKIND) :: filename
+ integer :: stream
integer :: time
- integer :: stream
-
- integer :: wrDimIDStrLen
-#include "io_output_obj_decls.inc"
-
- logical :: validExchangeLists
- type (exchange_list), pointer :: sendCellsList, recvCellsList
- type (exchange_list), pointer :: sendEdgesList, recvEdgesList
- type (exchange_list), pointer :: sendVerticesList, recvVerticesList
- type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
+ type (MPAS_Stream_type) :: io_stream
end type io_output_object
- interface mpas_io_output_field
- module procedure mpas_io_output_field0d_real
- module procedure mpas_io_output_field1d_real
- module procedure mpas_io_output_field2d_real
- module procedure mpas_io_output_field3d_real
- module procedure mpas_io_output_field1d_integer
- module procedure mpas_io_output_field2d_integer
- module procedure mpas_io_output_field0d_char
- module procedure mpas_io_output_field1d_char
- end interface mpas_io_output_field
-
- interface mpas_io_output_field_time
- module procedure mpas_io_output_field0d_real_time
- module procedure mpas_io_output_field1d_real_time
- module procedure mpas_io_output_field2d_real_time
- module procedure mpas_io_output_field3d_real_time
- module procedure mpas_io_output_field1d_integer_time
- module procedure mpas_io_output_field0d_char_time
- module procedure mpas_io_output_field1d_char_time
- end interface mpas_io_output_field_time
-
-
contains
@@ -62,29 +32,12 @@
character (len=*) :: stream
character (len=*), optional :: outputSuffix
- character (len=128) :: tempfilename
+ character (len=StrKIND) :: tempfilename
type (block_type), pointer :: block_ptr
-#include "output_dim_actual_decls.inc"
block_ptr => domain % blocklist
- nullify(output_obj % sendCellsList)
- nullify(output_obj % recvCellsList)
- nullify(output_obj % sendEdgesList)
- nullify(output_obj % recvEdgesList)
- nullify(output_obj % sendVerticesList)
- nullify(output_obj % recvVerticesList)
- nullify(output_obj % sendVertLevelsList)
- nullify(output_obj % recvVertLevelsList)
- output_obj % validExchangeLists = .false.
-#include "output_dim_inits.inc"
-
- call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nCellsSolve, nCellsGlobal)
- call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nEdgesSolve, nEdgesGlobal)
- call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nVerticesSolve, nVerticesGlobal)
- nVertLevelsGlobal = block_ptr % mesh % nVertLevels
-
if (trim(stream) == 'OUTPUT') then
if(present(outputSuffix)) then
call mpas_insert_string_suffix(config_output_name, outputSuffix, tempfilename)
@@ -109,9 +62,8 @@
! For now, we assume that a domain consists only of one block,
! although in future, work needs to be done to write model state
! from many distributed blocks
- call mpas_io_output_init(output_obj, domain % dminfo, &
- block_ptr % mesh, &
-#include "output_dim_actual_args.inc"
+ call mpas_io_output_init(domain, output_obj, domain % dminfo, &
+ block_ptr % mesh &
)
end subroutine mpas_output_state_init
@@ -136,6 +88,10 @@
end if
end do
+ do i=1,len_trim(filename)
+ if (filename(i:i) == ':') filename(i:i) = '.'
+ end do
+
end subroutine mpas_insert_string_suffix
@@ -147,15 +103,8 @@
type (domain_type), intent(inout) :: domain
integer, intent(in) :: itime
+ integer :: ierr
integer :: i, j
- integer :: nCellsGlobal
- integer :: nEdgesGlobal
- integer :: nVerticesGlobal
- integer :: nVertLevelsGlobal
- integer, dimension(:), pointer :: neededCellList
- integer, dimension(:), pointer :: neededEdgeList
- integer, dimension(:), pointer :: neededVertexList
- integer, dimension(:), pointer :: neededVertLevelList
integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
cellsOnEdge, verticesOnEdge, edgesOnEdge, cellsOnVertex, edgesOnVertex
integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
@@ -170,35 +119,8 @@
type (field0dChar) :: char0d
type (field1dChar) :: char1d
- integer :: i1, i2, i3, i4
-
- integer, dimension(:), pointer :: super_int1d
- integer, dimension(:,:), pointer :: super_int2d
- real (kind=RKIND) :: super_real0d
- real (kind=RKIND), dimension(:), pointer :: super_real1d
- real (kind=RKIND), dimension(:,:), pointer :: super_real2d
- real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
- character (len=64) :: super_char0d
- character (len=64), dimension(:), pointer :: super_char1d
-
-#include "nondecomp_outputs.inc"
-
output_obj % time = itime
- allocate(int1d % ioinfo)
- allocate(int2d % ioinfo)
- allocate(real0d % ioinfo)
- allocate(real1d % ioinfo)
- allocate(real2d % ioinfo)
- allocate(real3d % ioinfo)
- allocate(char0d % ioinfo)
- allocate(char1d % ioinfo)
-
- call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nCellsSolve, nCellsGlobal)
- call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nEdgesSolve, nEdgesGlobal)
- call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nVerticesSolve, nVerticesGlobal)
- nVertLevelsGlobal = domain % blocklist % mesh % nVertLevels
-
allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
@@ -259,58 +181,6 @@
end do
end do
- if (domain % dminfo % my_proc_id == 0) then
- allocate(neededCellList(nCellsGlobal))
- allocate(neededEdgeList(nEdgesGlobal))
- allocate(neededVertexList(nVerticesGlobal))
- allocate(neededVertLevelList(nVertLevelsGlobal))
- do i=1,nCellsGlobal
- neededCellList(i) = i
- end do
- do i=1,nEdgesGlobal
- neededEdgeList(i) = i
- end do
- do i=1,nVerticesGlobal
- neededVertexList(i) = i
- end do
- do i=1,nVertLevelsGlobal
- neededVertLevelList(i) = i
- end do
- else
- allocate(neededCellList(0))
- allocate(neededEdgeList(0))
- allocate(neededVertexList(0))
- allocate(neededVertLevelList(0))
- end if
-
- if (.not. output_obj % validExchangeLists) then
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nCellsSolve, size(neededCellList), &
- domain % blocklist % mesh % indexToCellID % array, neededCellList, &
- output_obj % sendCellsList, output_obj % recvCellsList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nEdgesSolve, size(neededEdgeList), &
- domain % blocklist % mesh % indexToEdgeID % array, neededEdgeList, &
- output_obj % sendEdgesList, output_obj % recvEdgesList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nVerticesSolve, size(neededVertexList), &
- domain % blocklist % mesh % indexToVertexID % array, neededVertexList, &
- output_obj % sendVerticesList, output_obj % recvVerticesList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(neededVertLevelList), size(neededVertLevelList), &
- neededVertLevelList, neededVertLevelList, &
- output_obj % sendVertLevelsList, output_obj % recvVertLevelsList)
-
- output_obj % validExchangeLists = .true.
- end if
-
- deallocate(neededCellList)
- deallocate(neededEdgeList)
- deallocate(neededVertexList)
-
cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
@@ -329,7 +199,7 @@
domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
-#include "io_output_fields.inc"
+ call MPAS_writeStream(output_obj % io_stream, output_obj % time, ierr)
domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
@@ -349,8 +219,6 @@
deallocate(cellsOnVertex)
deallocate(edgesOnVertex)
-#include "nondecomp_outputs_dealloc.inc"
-
end subroutine mpas_output_state_for_domain
@@ -366,504 +234,47 @@
end subroutine mpas_output_state_finalize
- subroutine mpas_io_output_init( output_obj, &
+ subroutine mpas_io_output_init( domain, output_obj, &
dminfo, &
- mesh, &
-#include "dim_dummy_args.inc"
+ mesh &
)
implicit none
- include 'netcdf.inc'
-
+ type (domain_type), intent(in) :: domain
type (io_output_object), intent(inout) :: output_obj
type (dm_info), intent(in) :: dminfo
type (mesh_type), intent(in) :: mesh
-#include "dim_dummy_decls.inc"
- integer :: nferr
+ integer :: nferr, ierr
integer, dimension(10) :: dimlist
- if (dminfo % my_proc_id == 0) then
-#ifdef OFFSET64BIT
- nferr = nf_create(trim(output_obj % filename), ior(NF_CLOBBER,NF_64BIT_OFFSET), output_obj % wr_ncid)
-#else
- nferr = nf_create(trim(output_obj % filename), NF_CLOBBER, output_obj % wr_ncid)
-#endif
-
- nferr = nf_def_dim(output_obj % wr_ncid, 'StrLen', 64, output_obj % wrDimIDStrLen)
-#include "netcdf_def_dims_vars.inc"
+ call MPAS_createStream(output_obj % io_stream, trim(output_obj % filename), MPAS_IO_PNETCDF, MPAS_IO_WRITE, 1, nferr)
+#include "add_output_fields.inc"
+
if (mesh % on_a_sphere) then
- nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'YES ')
+ call MPAS_writeStreamAtt(output_obj % io_stream, 'on_a_sphere', 'YES ', nferr)
else
- nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'NO ')
+ call MPAS_writeStreamAtt(output_obj % io_stream, 'on_a_sphere', 'NO ', nferr)
end if
- if (RKIND == 8) then
- nferr = nf_put_att_double(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_DOUBLE, 1, mesh % sphere_radius)
- else
- nferr = nf_put_att_real(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_FLOAT, 1, mesh % sphere_radius)
- end if
+ call MPAS_writeStreamAtt(output_obj % io_stream, 'sphere_radius', mesh % sphere_radius, nferr)
+
+#include "add_output_atts.inc"
- nferr = nf_enddef(output_obj % wr_ncid)
- end if
-
end subroutine mpas_io_output_init
- subroutine mpas_io_output_field0d_real(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = 1
- count1(1) = 1
-
-#include "output_field0dreal.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field0d_real
-
-
- subroutine mpas_io_output_field1d_real(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "output_field1dreal.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, VarID, start1, count1, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, VarID, start1, count1, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_real
-
-
- subroutine mpas_io_output_field2d_real(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "output_field2dreal.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field2d_real
-
-
- subroutine mpas_io_output_field3d_real(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = field % ioinfo % start(3)
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = field % ioinfo % count(3)
-
-#include "output_field3dreal.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field3d_real
-
-
- subroutine mpas_io_output_field0d_real_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = output_obj % time
- count1(1) = 1
-
-#include "output_field0dreal_time.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field0d_real_time
-
-
- subroutine mpas_io_output_field1d_real_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = output_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "output_field1dreal_time.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_real_time
-
-
- subroutine mpas_io_output_field2d_real_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = output_obj % time
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = 1
-
-#include "output_field2dreal_time.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field2d_real_time
-
-
- subroutine mpas_io_output_field3d_real_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(4) :: start4, count4
-
- start4(1) = field % ioinfo % start(1)
- start4(2) = field % ioinfo % start(2)
- start4(3) = field % ioinfo % start(3)
- start4(4) = output_obj % time
- count4(1) = field % ioinfo % count(1)
- count4(2) = field % ioinfo % count(2)
- count4(3) = field % ioinfo % count(3)
- count4(4) = 1
-
-#include "output_field3dreal_time.inc"
-
-#ifdef SINGLE_PRECISION
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start4, count4, field % array)
-#else
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start4, count4, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field3d_real_time
-
-
- subroutine mpas_io_output_field1d_integer(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "output_field1dinteger.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start1, count1, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_integer
-
-
- subroutine mpas_io_output_field2d_integer(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "output_field2dinteger.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field2d_integer
-
-
- subroutine mpas_io_output_field1d_integer_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = output_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "output_field1dinteger_time.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_integer_time
-
-
- subroutine mpas_io_output_field0d_char_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = output_obj % time
- count1(2) = 1
-
-#include "output_field0dchar_time.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field0d_char_time
-
-
- subroutine mpas_io_output_field1d_char_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start2, count2
-
- start2(1) = 1
- start2(2) = field % ioinfo % start(1)
- start2(3) = output_obj % time
- count2(1) = 64
- count2(2) = field % ioinfo % count(1)
- count2(3) = 1
-
-#include "output_field1dchar_time.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_char_time
-
-
- subroutine mpas_io_output_field0d_char(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = 1
- count1(2) = 1
-
-#include "output_field0dchar.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field0d_char
-
-
- subroutine mpas_io_output_field1d_char(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = field % ioinfo % start(1)
- count1(2) = field % ioinfo % count(1)
-
-#include "output_field1dchar.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, VarID, start1, count1, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine mpas_io_output_field1d_char
-
-
subroutine mpas_io_output_finalize(output_obj, dminfo)
implicit none
- include 'netcdf.inc'
-
type (io_output_object), intent(inout) :: output_obj
type (dm_info), intent(in) :: dminfo
integer :: nferr
- if (dminfo % my_proc_id == 0) then
- nferr = nf_close(output_obj % wr_ncid)
- end if
+ call MPAS_closeStream(output_obj % io_stream, nferr)
end subroutine mpas_io_output_finalize
Copied: branches/atmos_physics/src/framework/mpas_io_streams.F (from rev 1863, trunk/mpas/src/framework/mpas_io_streams.F)
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_streams.F         (rev 0)
+++ branches/atmos_physics/src/framework/mpas_io_streams.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -0,0 +1,2761 @@
+module mpas_io_streams
+
+ use mpas_attlist
+ use mpas_grid_types
+ use mpas_timekeeping
+ use mpas_io
+
+ type field_list_type
+ integer :: field_type
+ logical :: isDecomposed
+ integer :: totalDimSize ! Total size of outer dimension across all blocks for decomposed fields
+ logical, dimension(:), pointer :: isAvailable => null() ! Used for reading super-arrays where one or more
+ ! constitutent arrays may not be present in the input file
+ type (field0dInteger), pointer :: int0dField => null()
+ type (field1dInteger), pointer :: int1dField => null()
+ type (field2dInteger), pointer :: int2dField => null()
+ type (field3dInteger), pointer :: int3dField => null()
+ type (field0dReal), pointer :: real0dField => null()
+ type (field1dReal), pointer :: real1dField => null()
+ type (field2dReal), pointer :: real2dField => null()
+ type (field3dReal), pointer :: real3dField => null()
+ type (field0dChar), pointer :: char0dField => null()
+ type (field1dChar), pointer :: char1dField => null()
+ type (field_list_type), pointer :: next => null()
+ end type field_list_type
+
+ type MPAS_Stream_type
+ logical :: isInitialized = .false.
+ integer :: ioFormat
+ integer :: ioDirection
+ integer :: framesPerFile
+ type (MPAS_IO_Handle_type) :: fileHandle
+ type (att_list_type), pointer :: attList => null()
+ type (field_list_type), pointer :: fieldList => null()
+ end type MPAS_Stream_type
+
+
+ interface MPAS_streamAddField
+ module procedure MPAS_streamAddField_0dInteger
+ module procedure MPAS_streamAddField_1dInteger
+ module procedure MPAS_streamAddField_2dInteger
+ module procedure MPAS_streamAddField_3dInteger
+ module procedure MPAS_streamAddField_0dReal
+ module procedure MPAS_streamAddField_1dReal
+ module procedure MPAS_streamAddField_2dReal
+ module procedure MPAS_streamAddField_3dReal
+ module procedure MPAS_streamAddField_0dChar
+ end interface MPAS_streamAddField
+
+ interface MPAS_readStreamAtt
+ module procedure MPAS_readStreamAtt_0dInteger
+ module procedure MPAS_readStreamAtt_1dInteger
+ module procedure MPAS_readStreamAtt_0dReal
+ module procedure MPAS_readStreamAtt_1dReal
+ module procedure MPAS_readStreamAtt_text
+ end interface MPAS_readStreamAtt
+
+ interface MPAS_writeStreamAtt
+ module procedure MPAS_writeStreamAtt_0dInteger
+ module procedure MPAS_writeStreamAtt_1dInteger
+ module procedure MPAS_writeStreamAtt_0dReal
+ module procedure MPAS_writeStreamAtt_1dReal
+ module procedure MPAS_writeStreamAtt_text
+ end interface MPAS_writeStreamAtt
+
+ integer, parameter :: MPAS_STREAM_EXACT_TIME = 100, &
+ MPAS_STREAM_NEAREST = 101, &
+ MPAS_STREAM_LATEST_BEFORE = 102, &
+ MPAS_STREAM_EARLIEST_AFTER = 103
+
+
+ ! Error codes
+ integer, parameter :: MPAS_STREAM_NOERR = 0, &
+ MPAS_STREAM_NOT_INITIALIZED = -1, &
+ MPAS_IO_ERR = -2
+
+ integer, parameter :: FIELD_0D_INT = 1, &
+ FIELD_1D_INT = 2, &
+ FIELD_2D_INT = 3, &
+ FIELD_3D_INT = 4, &
+ FIELD_0D_REAL = 5, &
+ FIELD_1D_REAL = 6, &
+ FIELD_2D_REAL = 7, &
+ FIELD_3D_REAL = 8, &
+ FIELD_0D_CHAR = 9, &
+ FIELD_1D_CHAR = 10
+
+ private mergeArrays
+
+
+contains
+
+
+ subroutine MPAS_createStream(stream, fileName, ioFormat, ioDirection, framesPerFile, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(out) :: stream
+ character (len=*), intent(in) :: fileName
+ integer, intent(in) :: ioFormat
+ integer, intent(in) :: ioDirection
+ integer, intent(in) :: framesPerFile
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ stream % fileHandle = MPAS_io_open(fileName, ioDirection, ioFormat, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ stream % ioDirection = ioDirection
+ stream % ioFormat = ioFormat
+ stream % framesPerFile = framesPerFile
+
+ stream % isInitialized = .true.
+
+ end subroutine MPAS_createStream
+
+
+ integer function MPAS_seekStream(stream, seekTime, seekPosition, actualTime, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: seekTime
+ integer, intent(in) :: seekPosition
+ character (len=*), intent(out) :: actualTime
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: timeDim
+ character (len=StrKIND), dimension(:), pointer :: xtimes
+ character (len=StrKIND) :: strTemp
+ type (MPAS_Time_type) :: sliceTime, startTime
+ type (MPAS_TimeInterval_type) :: timeDiff, minTimeDiff
+
+! write(0,*) 'Called MPAS_seekStream'
+
+ !
+ ! Initialize output arguments
+ !
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+ MPAS_seekStream = 0
+
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ call MPAS_io_inq_dim(stream % fileHandle, 'Time', timeDim, io_err)
+ if (timeDim <= 0 .or. io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+!write(0,*) 'Found ', timeDim, ' times in file'
+
+ call MPAS_io_inq_var(stream % fileHandle, 'xtime', ierr=io_err)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+!write(0,*) 'Found xtime variable'
+
+ allocate(xtimes(timeDim))
+
+ do i=1,timeDim
+ call MPAS_io_set_frame(stream % fileHandle, i, io_err)
+ call MPAS_io_get_var(stream % fileHandle, 'xtime', xtimes(i), io_err)
+!write(0,*) '... just read in xtime='//xtimes(i)
+ end do
+
+ if (len(seekTime) > 32) then
+ write(strTemp, '(a)') seekTime(1:32)
+ else
+ write(strTemp, '(a)') trim(seekTime)
+ end if
+ call mpas_set_timeInterval(interval=minTimeDiff, DD=10000)
+ call mpas_set_time(curr_time=startTime, dateTimeString=strTemp)
+
+ do i=1,timeDim
+ write(strTemp, '(a)') trim(xtimes(i)(1:32))
+!write(0,*) '... converted strTemp='//strTemp
+ call mpas_set_time(curr_time=sliceTime, dateTimeString=strTemp)
+ if (seekPosition == MPAS_STREAM_EXACT_TIME) then
+ if (sliceTime == startTime) then
+ minTimeDiff = timeDiff
+ MPAS_seekStream = i
+ end if
+ else if (seekPosition == MPAS_STREAM_NEAREST) then
+ timeDiff = abs(sliceTime - startTime)
+ if (timeDiff < minTimeDiff) then
+ minTimeDiff = timeDiff
+ MPAS_seekStream = i
+ end if
+ else if (seekPosition == MPAS_STREAM_LATEST_BEFORE) then
+ if (sliceTime <= startTime) then
+ timeDiff = abs(sliceTime - startTime)
+ if (timeDiff < minTimeDiff) then
+ minTimeDiff = timeDiff
+ MPAS_seekStream = i
+ end if
+ end if
+ else if (seekPosition == MPAS_STREAM_EARLIEST_AFTER) then
+ if (sliceTime >= startTime) then
+ timeDiff = abs(sliceTime - startTime)
+ if (timeDiff < minTimeDiff) then
+ minTimeDiff = timeDiff
+ MPAS_seekStream = i
+ end if
+ end if
+ else
+ write(0,*) 'Error in MPAS_seekStream: unrecognized seekPosition'
+ deallocate(xtimes)
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+ end do
+
+ if (MPAS_seekStream == 0) then
+ deallocate(xtimes)
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ write(actualTime, '(a)') trim(xtimes(MPAS_seekStream)(1:32))
+
+ deallocate(xtimes)
+
+ end function MPAS_seekStream
+
+
+ subroutine MPAS_streamAddField_0dInteger(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field0DInteger), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field0dInteger), pointer :: field_ptr
+ character (len=StrKIND), dimension(:), pointer :: dimNames
+ integer, dimension(:), pointer :: indices
+ integer, dimension(:), pointer :: dimSizes
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = 0
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+ idim = ndims
+ allocate(indices(0))
+ allocate(dimSizes(0))
+ allocate(dimNames(0))
+ isDecomposed = .false.
+ globalDimSize = 0
+ totalDimSize = 0
+
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_INT, dimNames, dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+
+ deallocate(indices)
+ deallocate(dimSizes)
+ deallocate(dimNames)
+ if (io_err /= MPAS_STREAM_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_0D_INT
+ new_field_list_node % int0dField => field
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_0dInteger
+
+
+ subroutine MPAS_streamAddField_1dInteger(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field1DInteger), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field1dInteger), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ character (len=StrKIND), dimension(0) :: dimNames0
+ integer, dimension(0) :: dimSizes0
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_INT, dimNames0, &
+ dimSizes0, field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_INT, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_1D_INT
+ new_field_list_node % int1dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_1dInteger
+
+
+ subroutine MPAS_streamAddField_2dInteger(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field2DInteger), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field2dInteger), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_INT, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_INT, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_2D_INT
+ new_field_list_node % int2dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_2dInteger
+
+
+ subroutine MPAS_streamAddField_3dInteger(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field3DInteger), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field3dInteger), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+ any_success = .false.
+ if (field % isSuperArray) then
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_INT, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_INT, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_3D_INT
+ new_field_list_node % int3dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_3dInteger
+
+
+ subroutine MPAS_streamAddField_0dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field0DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field0dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(:), pointer :: dimNames
+ integer, dimension(:), pointer :: indices
+ integer, dimension(:), pointer :: dimSizes
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = 0
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+ idim = ndims
+ allocate(indices(0))
+ allocate(dimSizes(0))
+ allocate(dimNames(0))
+ isDecomposed = .false.
+ globalDimSize = 0
+ totalDimSize = 0
+
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, dimNames, dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+
+ deallocate(indices)
+ deallocate(dimSizes)
+ deallocate(dimNames)
+ if (io_err /= MPAS_STREAM_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_0D_REAL
+ new_field_list_node % real0dField => field
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_0dReal
+
+
+ subroutine MPAS_streamAddField_1dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field1DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field1dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ character (len=StrKIND), dimension(0) :: dimNames0
+ integer, dimension(0) :: dimSizes0
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, dimNames0, &
+ dimSizes0, field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_1D_REAL
+ new_field_list_node % real1dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_1dReal
+
+
+ subroutine MPAS_streamAddField_2dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field2DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field2dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_2D_REAL
+ new_field_list_node % real2dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_2dReal
+
+
+ subroutine MPAS_streamAddField_3dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field3DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field3dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+!write(0,*) '^^^^^^^^^^^^^^^^^^^^^^^^^^^ we are adding a super-array'
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_3D_REAL
+ new_field_list_node % real3dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+!write(0,*) 'DEBUGGING : Finished adding 3d real field '//trim(field % fieldName)
+
+ end subroutine MPAS_streamAddField_3dReal
+
+
+ subroutine MPAS_streamAddField_0dChar(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field0DChar), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field0dChar), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: dimSizes
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = 1
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+ idim = ndims
+ allocate(indices(0))
+ allocate(dimSizes(1))
+ dimSizes(1) = 64
+ dimNames(1) = 'StrLen'
+ isDecomposed = .false.
+ globalDimSize = 64
+ totalDimSize = 64
+
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_CHAR, dimNames(1:1), &
+ dimSizes, field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ end do
+ else
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_CHAR, dimNames(1:1), dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ end if
+
+ deallocate(indices)
+ deallocate(dimSizes)
+ if (io_err /= MPAS_STREAM_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_0D_CHAR
+ new_field_list_node % char0dField => field
+
+!write(0,*) '... done adding field'
+
+ end subroutine MPAS_streamAddField_0dChar
+
+
+ subroutine MPAS_streamAddField_generic(stream, fieldName, fieldType, dimNames, dimSizes, hasTimeDimension, isDecomposed, &
+ totalDimSize, globalDimSize, indices, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: fieldName
+ integer, intent(in) :: fieldType
+ character (len=StrKIND), dimension(:), intent(in) :: dimNames
+ integer, dimension(:), intent(in) :: dimSizes
+ logical, intent(in) :: hasTimeDimension
+ logical, intent(in) :: isDecomposed
+ integer, intent(in) :: totalDimSize
+ integer, intent(in) :: globalDimSize
+ integer, dimension(:), intent(in) :: indices
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: ndims
+ integer :: dimTemp
+ character (len=StrKIND), dimension(5) :: dimNamesLocal
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: dimSizesInq
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ ndims = size(dimNames)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ allocate(new_field_list_node)
+ nullify(new_field_list_node % next)
+
+ if (stream % ioDirection == MPAS_IO_WRITE) then
+!write(0,*) '... defining field'
+
+ !
+ ! Define inner dimensions
+ !
+ do idim = 1, ndims-1
+!write(0,*) '... defining dimension ', trim(dimNames(idim)), dimSizes(idim)
+ write(dimNamesLocal(idim),'(a)') dimNames(idim)
+ call MPAS_io_def_dim(stream % fileHandle, trim(dimNames(idim)), dimSizes(idim), io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+ end do
+
+ !
+ ! Define outer-most dimension
+ !
+ idim = ndims
+ if (idim > 0) write(dimNamesLocal(idim),'(a)') dimNames(idim)
+
+ if (isDecomposed) then
+ new_field_list_node % totalDimSize = totalDimSize
+ else
+ new_field_list_node % totalDimSize = globalDimSize
+ end if
+
+ new_field_list_node % isDecomposed = isDecomposed
+
+ if (ndims > 0) then
+!write(0,*) '... defining dimension ', trim(dimNames(idim)), globalDimSize
+ call MPAS_io_def_dim(stream % fileHandle, trim(dimNames(idim)), globalDimSize, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+ end if
+
+ !
+ ! Define time dimension if necessary
+ !
+ if (hasTimeDimension) then
+!write(0,*) '... defining Time dimension '
+ call MPAS_io_def_dim(stream % fileHandle, 'Time', MPAS_IO_UNLIMITED_DIM, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+ ndims = ndims + 1
+ write(dimNamesLocal(ndims),'(a)') 'Time'
+ end if
+
+ !
+ ! Define variable to low-level interface
+ !
+!write(0,*) '... defining var to low-level interface with ndims ', ndims
+
+ call MPAS_io_def_var(stream % fileHandle, trim(fieldName), fieldType, dimNamesLocal(1:ndims), io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+
+ else if (stream % ioDirection == MPAS_IO_READ) then
+!write(0,*) '... inquiring about'
+
+ call MPAS_io_inq_var(stream % fileHandle, trim(fieldName), dimnames=dimNamesInq, dimsizes=dimSizesInq, ierr=io_err)
+ ! If the field does not exist in the input file, we should handle this situation gracefully at higher levels
+ ! without printing disconcerting error messages
+ !call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+
+! Here, we should probably do a check to make sure the file agrees with what MPAS expects for the field
+ do i=1,ndims
+!write(0,*) 'Comparing '//trim(dimNames(i))//' '//trim(dimNamesInq(i))
+ if (trim(dimNames(i)) /= trim(dimNamesInq(i))) then
+!write(0,*) 'Mismatched dimension name in field'
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ deallocate(dimNamesInq)
+ deallocate(dimSizesInq)
+ return
+ end if
+ if (i < ndims) then
+ dimTemp = dimSizes(i)
+ else
+ if (trim(dimNamesInq(i)) == 'nCells' .or. &
+ trim(dimNamesInq(i)) == 'nEdges' .or. &
+ trim(dimNamesInq(i)) == 'nVertices' &
+ ) then
+ dimTemp = globalDimSize
+ else
+ dimTemp = dimSizes(i)
+ end if
+ end if
+!write(0,*) 'Comparing ', dimTemp, ' ', dimSizesInq(i)
+ if (dimTemp /= dimSizesInq(i)) then
+!write(0,*) 'Mismatched dimension size in field'
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ deallocate(dimNamesInq)
+ deallocate(dimSizesInq)
+ return
+ end if
+ end do
+
+ ! Set outer dimension sizes depending on whether the field is decomposed
+ if (isDecomposed) then
+ new_field_list_node % totalDimSize = totalDimSize
+ else
+ new_field_list_node % totalDimSize = globalDimSize
+ end if
+
+ new_field_list_node % isDecomposed = isDecomposed
+
+ deallocate(dimNamesInq)
+ deallocate(dimSizesInq)
+
+ end if
+
+
+ !
+ ! Set variable indices
+ !
+ if (ndims > 0) then
+ call MPAS_io_set_var_indices(stream % fileHandle, trim(fieldName), indices, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ deallocate(new_field_list_node)
+ return
+ end if
+ end if
+
+
+ !
+ ! Add field pointer to the list
+ !
+ if (.not. associated(stream % fieldList)) then
+!write(0,*) 'Adding field to the head of the list'
+ stream % fieldList => new_field_list_node
+ else
+!write(0,*) 'Adding field to the tail of the list'
+ field_list_cursor => stream % fieldList
+ do while (associated(field_list_cursor % next))
+ field_list_cursor => field_list_cursor % next
+ end do
+ field_list_cursor % next => new_field_list_node
+ end if
+
+ end subroutine MPAS_streamAddField_generic
+
+
+ subroutine MPAS_readStream(stream, frame, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ integer, intent(in) :: frame
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i, j
+ integer :: ncons
+ integer :: ownedSize
+ type (field0dInteger), pointer :: field_0dint_ptr
+ type (field1dInteger), pointer :: field_1dint_ptr
+ type (field2dInteger), pointer :: field_2dint_ptr
+ type (field3dInteger), pointer :: field_3dint_ptr
+ type (field0dReal), pointer :: field_0dreal_ptr
+ type (field1dReal), pointer :: field_1dreal_ptr
+ type (field2dReal), pointer :: field_2dreal_ptr
+ type (field3dReal), pointer :: field_3dreal_ptr
+ type (field0dChar), pointer :: field_0dchar_ptr
+ type (field1dChar), pointer :: field_1dchar_ptr
+ type (field_list_type), pointer :: field_cursor
+ integer :: int0d_temp
+ integer, dimension(:), pointer :: int1d_temp
+ integer, dimension(:,:), pointer :: int2d_temp
+ integer, dimension(:,:,:), pointer :: int3d_temp
+ real (kind=RKIND) :: real0d_temp
+ real (kind=RKIND), dimension(:), pointer :: real1d_temp
+ real (kind=RKIND), dimension(:,:), pointer :: real2d_temp
+ real (kind=RKIND), dimension(:,:,:), pointer :: real3d_temp
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+
+ !
+ ! Set time frame to real
+ !
+ call MPAS_io_set_frame(stream % fileHandle, frame, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+
+ !
+ ! Loop over fields in the stream
+ !
+ field_cursor => stream % fieldList
+ do while (associated(field_cursor))
+ if (field_cursor % field_type == FIELD_0D_INT) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % int0dField % fieldName)
+!write(0,*) 'Reading in field '//trim(field_cursor % int0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'MGD calling MPAS_io_get_var now...'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int0dField % fieldName, int0d_temp, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+!write(0,*) 'Distributing and Copying field to other blocks'
+
+ call mpas_dmpar_bcast_int(field_cursor % int0dField % block % domain % dminfo, int0d_temp)
+ field_0dint_ptr => field_cursor % int0dField
+ do while (associated(field_0dint_ptr))
+ field_0dint_ptr % scalar = int0d_temp
+ field_0dint_ptr => field_0dint_ptr % next
+ end do
+
+ else if (field_cursor % field_type == FIELD_1D_INT) then
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % int1dField % fieldName)
+
+ if (field_cursor % int1dField % isSuperArray) then
+ ncons = size(field_cursor % int1dField % constituentNames)
+ else
+ ncons = 1
+ allocate(int1d_temp(field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % int1dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int1dField % constituentNames(j), int0d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int1dField % fieldName, int1d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (.not. field_cursor % int1dField % isSuperArray) then
+ deallocate(int1d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_1dint_ptr => field_cursor % int1dField
+ i = 1
+ if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dint_ptr % dimSizes(1)
+ end if
+ do while (associated(field_1dint_ptr))
+ if (field_cursor % int1dField % isSuperArray) then
+ field_1dint_ptr % array(j) = int0d_temp
+ else
+ field_1dint_ptr % array(1:ownedSize) = int1d_temp(i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_1dint_ptr => field_1dint_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % int1dField % isSuperArray) then
+ call mpas_dmpar_bcast_int(field_cursor % int1dField % block % domain % dminfo, int0d_temp)
+ field_1dint_ptr => field_cursor % int1dField
+ do while (associated(field_1dint_ptr))
+ field_1dint_ptr % array(j) = int0d_temp
+ field_1dint_ptr => field_1dint_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_ints(field_cursor % int1dField % block % domain % dminfo, size(int1d_temp), int1d_temp(:))
+ field_1dint_ptr => field_cursor % int1dField
+ do while (associated(field_1dint_ptr))
+ field_1dint_ptr % array(:) = int1d_temp(:)
+ field_1dint_ptr => field_1dint_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (.not. field_cursor % int1dField % isSuperArray) then
+ deallocate(int1d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_2D_INT) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % int2dField % fieldName)
+ if (field_cursor % int2dField % isSuperArray) then
+ ncons = size(field_cursor % int2dField % constituentNames)
+ allocate(int1d_temp(field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(int2d_temp(field_cursor % int2dField % dimSizes(1), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % int2dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int2dField % constituentNames(j), int1d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int2dField % fieldName, int2d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % int2dField % isSuperArray) then
+ deallocate(int1d_temp)
+ else
+ deallocate(int2d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_2dint_ptr => field_cursor % int2dField
+ i = 1
+ if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dint_ptr % dimSizes(2)
+ end if
+ do while (associated(field_2dint_ptr))
+ if (field_cursor % int2dField % isSuperArray) then
+ field_2dint_ptr % array(j,1:ownedSize) = int1d_temp(i:i+ownedSize-1)
+ else
+ field_2dint_ptr % array(:,1:ownedSize) = int2d_temp(:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_2dint_ptr => field_2dint_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % int2dField % isSuperArray) then
+ call mpas_dmpar_bcast_ints(field_cursor % int2dField % block % domain % dminfo, size(int1d_temp), int1d_temp(:))
+ field_2dint_ptr => field_cursor % int2dField
+ do while (associated(field_2dint_ptr))
+ field_2dint_ptr % array(j,:) = int1d_temp(:)
+ field_2dint_ptr => field_2dint_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_ints(field_cursor % int2dField % block % domain % dminfo, size(int2d_temp), int2d_temp(:,1))
+ field_2dint_ptr => field_cursor % int2dField
+ do while (associated(field_2dint_ptr))
+ field_2dint_ptr % array(:,:) = int2d_temp(:,:)
+ field_2dint_ptr => field_2dint_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % int2dField % isSuperArray) then
+ deallocate(int1d_temp)
+ else
+ deallocate(int2d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_3D_INT) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % int3dField % fieldName)
+ if (field_cursor % int3dField % isSuperArray) then
+ ncons = size(field_cursor % int3dField % constituentNames)
+ allocate(int2d_temp(field_cursor % int3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(int3d_temp(field_cursor % int3dField % dimSizes(1), &
+ field_cursor % int3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % int3dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int3dField % constituentNames(j), int2d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % int3dField % fieldName, int3d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % int3dField % isSuperArray) then
+ deallocate(int2d_temp)
+ else
+ deallocate(int3d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_3dint_ptr => field_cursor % int3dField
+ i = 1
+ if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dint_ptr % dimSizes(3)
+ end if
+ do while (associated(field_3dint_ptr))
+ if (field_cursor % int3dField % isSuperArray) then
+ field_3dint_ptr % array(j,:,1:ownedSize) = int2d_temp(:,i:i+ownedSize-1)
+ else
+ field_3dint_ptr % array(:,:,1:ownedSize) = int3d_temp(:,:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_3dint_ptr => field_3dint_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % int3dField % isSuperArray) then
+ call mpas_dmpar_bcast_ints(field_cursor % int3dField % block % domain % dminfo, size(int2d_temp), int2d_temp(:,1))
+ field_3dint_ptr => field_cursor % int3dField
+ do while (associated(field_3dint_ptr))
+ field_3dint_ptr % array(j,:,:) = int2d_temp(:,:)
+ field_3dint_ptr => field_3dint_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_ints(field_cursor % int3dField % block % domain % dminfo, size(int3d_temp), int3d_temp(:,1,1))
+ field_3dint_ptr => field_cursor % int3dField
+ do while (associated(field_3dint_ptr))
+ field_3dint_ptr % array(:,:,:) = int3d_temp(:,:,:)
+ field_3dint_ptr => field_3dint_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % int3dField % isSuperArray) then
+ deallocate(int2d_temp)
+ else
+ deallocate(int3d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_0D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real0dField % fieldName)
+!write(0,*) 'Reading in field '//trim(field_cursor % real0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'MGD calling MPAS_io_get_var now...'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real0dField % fieldName, real0d_temp, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+!write(0,*) 'Distributing and Copying field to other blocks'
+
+ call mpas_dmpar_bcast_real(field_cursor % real0dField % block % domain % dminfo, real0d_temp)
+ field_0dreal_ptr => field_cursor % real0dField
+ do while (associated(field_0dreal_ptr))
+ field_0dreal_ptr % scalar = real0d_temp
+ field_0dreal_ptr => field_0dreal_ptr % next
+ end do
+
+ else if (field_cursor % field_type == FIELD_1D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real1dField % fieldName)
+ if (field_cursor % real1dField % isSuperArray) then
+ ncons = size(field_cursor % real1dField % constituentNames)
+ else
+ ncons = 1
+ allocate(real1d_temp(field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % real1dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real1dField % constituentNames(j), real0d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real1dField % fieldName, real1d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (.not. field_cursor % real1dField % isSuperArray) then
+ deallocate(real1d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_1dreal_ptr => field_cursor % real1dField
+ i = 1
+ if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dreal_ptr % dimSizes(1)
+ end if
+ do while (associated(field_1dreal_ptr))
+ if (field_cursor % real1dField % isSuperArray) then
+ field_1dreal_ptr % array(j) = real0d_temp
+ else
+ field_1dreal_ptr % array(1:ownedSize) = real1d_temp(i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_1dreal_ptr => field_1dreal_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % real1dField % isSuperArray) then
+ call mpas_dmpar_bcast_real(field_cursor % real1dField % block % domain % dminfo, real0d_temp)
+ field_1dreal_ptr => field_cursor % real1dField
+ do while (associated(field_1dreal_ptr))
+ field_1dreal_ptr % array(j) = real0d_temp
+ field_1dreal_ptr => field_1dreal_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_reals(field_cursor % real1dField % block % domain % dminfo, size(real1d_temp), real1d_temp(:))
+ field_1dreal_ptr => field_cursor % real1dField
+ do while (associated(field_1dreal_ptr))
+ field_1dreal_ptr % array(:) = real1d_temp(:)
+ field_1dreal_ptr => field_1dreal_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (.not. field_cursor % real1dField % isSuperArray) then
+ deallocate(real1d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_2D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real2dField % fieldName)
+ if (field_cursor % real2dField % isSuperArray) then
+ ncons = size(field_cursor % real2dField % constituentNames)
+ allocate(real1d_temp(field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real2d_temp(field_cursor % real2dField % dimSizes(1), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % real2dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real2dField % constituentNames(j), real1d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real2dField % fieldName, real2d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % real2dField % isSuperArray) then
+ deallocate(real1d_temp)
+ else
+ deallocate(real2d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_2dreal_ptr => field_cursor % real2dField
+ i = 1
+ if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dreal_ptr % dimSizes(2)
+ end if
+ do while (associated(field_2dreal_ptr))
+ if (field_cursor % real2dField % isSuperArray) then
+ field_2dreal_ptr % array(j,1:ownedSize) = real1d_temp(i:i+ownedSize-1)
+ else
+ field_2dreal_ptr % array(:,1:ownedSize) = real2d_temp(:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_2dreal_ptr => field_2dreal_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % real2dField % isSuperArray) then
+ call mpas_dmpar_bcast_reals(field_cursor % real2dField % block % domain % dminfo, size(real1d_temp), real1d_temp(:))
+ field_2dreal_ptr => field_cursor % real2dField
+ do while (associated(field_2dreal_ptr))
+ field_2dreal_ptr % array(j,:) = real1d_temp(:)
+ field_2dreal_ptr => field_2dreal_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_reals(field_cursor % real2dField % block % domain % dminfo, size(real2d_temp), real2d_temp(:,1))
+ field_2dreal_ptr => field_cursor % real2dField
+ do while (associated(field_2dreal_ptr))
+ field_2dreal_ptr % array(:,:) = real2d_temp(:,:)
+ field_2dreal_ptr => field_2dreal_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % real2dField % isSuperArray) then
+ deallocate(real1d_temp)
+ else
+ deallocate(real2d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_3D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real3dField % fieldName)
+!write(0,*) 'DEBUGGING : reading a 3d real array'
+ if (field_cursor % real3dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : reading a 3d real super-array'
+ ncons = size(field_cursor % real3dField % constituentNames)
+ allocate(real2d_temp(field_cursor % real3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real3d_temp(field_cursor % real3dField % dimSizes(1), &
+ field_cursor % real3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % real3dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+!write(0,*) 'DEBUGGING : calling get_var for a constitutent'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real3dField % constituentNames(j), real2d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real3dField % fieldName, real3d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % real3dField % isSuperArray) then
+ deallocate(real2d_temp)
+ else
+ deallocate(real3d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_3dreal_ptr => field_cursor % real3dField
+ i = 1
+ if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dreal_ptr % dimSizes(3)
+ end if
+ do while (associated(field_3dreal_ptr))
+ if (field_cursor % real3dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : copying the temporary array'
+ field_3dreal_ptr % array(j,:,1:ownedSize) = real2d_temp(:,i:i+ownedSize-1)
+ else
+ field_3dreal_ptr % array(:,:,1:ownedSize) = real3d_temp(:,:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_3dreal_ptr => field_3dreal_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % real3dField % isSuperArray) then
+ call mpas_dmpar_bcast_reals(field_cursor % real3dField % block % domain % dminfo, size(real2d_temp), real2d_temp(:,1))
+ field_3dreal_ptr => field_cursor % real3dField
+ do while (associated(field_3dreal_ptr))
+ field_3dreal_ptr % array(j,:,:) = real2d_temp(:,:)
+ field_3dreal_ptr => field_3dreal_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_reals(field_cursor % real3dField % block % domain % dminfo, size(real3d_temp), real3d_temp(:,1,1))
+ field_3dreal_ptr => field_cursor % real3dField
+ do while (associated(field_3dreal_ptr))
+ field_3dreal_ptr % array(:,:,:) = real3d_temp(:,:,:)
+ field_3dreal_ptr => field_3dreal_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % real3dField % isSuperArray) then
+ deallocate(real2d_temp)
+ else
+ deallocate(real3d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_0D_CHAR) then
+
+!write(0,*) 'Reading in field '//trim(field_cursor % char0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'MGD calling MPAS_io_get_var now...'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % char0dField % fieldName, field_cursor % char0dField % scalar, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+!write(0,*) 'Distributing and Copying field to other blocks'
+
+ call mpas_dmpar_bcast_char(field_cursor % char0dField % block % domain % dminfo, field_cursor % char0dField % scalar)
+ field_0dchar_ptr => field_cursor % char0dField
+ do while (associated(field_0dchar_ptr))
+ field_0dchar_ptr % scalar = field_cursor % char0dField % scalar
+ field_0dchar_ptr => field_0dchar_ptr % next
+ end do
+
+ else if (field_cursor % field_type == FIELD_1D_CHAR) then
+ end if
+ field_cursor => field_cursor % next
+ end do
+!write(0,*) 'Finished fieldlist loop...'
+
+ end subroutine MPAS_readStream
+
+
+ subroutine MPAS_writeStream(stream, frame, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ integer, intent(in) :: frame
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i, j
+ integer :: ncons
+ integer :: ownedSize
+ type (field0dInteger), pointer :: field_0dint_ptr
+ type (field1dInteger), pointer :: field_1dint_ptr
+ type (field2dInteger), pointer :: field_2dint_ptr
+ type (field3dInteger), pointer :: field_3dint_ptr
+ type (field0dReal), pointer :: field_0dreal_ptr
+ type (field1dReal), pointer :: field_1dreal_ptr
+ type (field2dReal), pointer :: field_2dreal_ptr
+ type (field3dReal), pointer :: field_3dreal_ptr
+ type (field0dChar), pointer :: field_0dchar_ptr
+ type (field1dChar), pointer :: field_1dchar_ptr
+ type (field_list_type), pointer :: field_cursor
+ integer :: int0d_temp
+ integer, dimension(:), pointer :: int1d_temp
+ integer, dimension(:,:), pointer :: int2d_temp
+ integer, dimension(:,:,:), pointer :: int3d_temp
+ real (kind=RKIND) :: real0d_temp
+ real (kind=RKIND), dimension(:), pointer :: real1d_temp
+ real (kind=RKIND), dimension(:,:), pointer :: real2d_temp
+ real (kind=RKIND), dimension(:,:,:), pointer :: real3d_temp
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ !
+ ! Set time frame to write
+ !
+ call MPAS_io_set_frame(stream % fileHandle, frame, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ !
+ ! Loop over fields in the stream
+ !
+ field_cursor => stream % fieldList
+ do while (associated(field_cursor))
+
+ if (field_cursor % field_type == FIELD_0D_INT) then
+
+!write(0,*) 'Writing out field '//trim(field_cursor % int0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'Copying field from first block'
+ int0d_temp = field_cursor % int0dField % scalar
+
+!write(0,*) 'MGD calling MPAS_io_put_var now...'
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int0dField % fieldName, int0d_temp, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ else if (field_cursor % field_type == FIELD_1D_INT) then
+
+ if (field_cursor % int1dField % isSuperArray) then
+ ncons = size(field_cursor % int1dField % constituentNames)
+ else
+ ncons = 1
+ allocate(int1d_temp(field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_1dint_ptr => field_cursor % int1dField
+ i = 1
+ do while (associated(field_1dint_ptr))
+ if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dint_ptr % dimSizes(1)
+ end if
+
+ if (field_cursor % int1dField % isSuperArray) then
+! I suspect we will never hit this code, as it doesn't make sense, really
+ int0d_temp = field_1dint_ptr % array(j)
+ else
+ int1d_temp(i:i+ownedSize-1) = field_1dint_ptr % array(1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_1dint_ptr => field_1dint_ptr % next
+ end do
+ else
+ if (field_cursor % int1dField % isSuperArray) then
+ int0d_temp = field_cursor % int1dField % array(j)
+ else
+ int1d_temp(:) = field_cursor % int1dField % array(:)
+ end if
+ end if
+
+ if (field_cursor % int1dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int1dField % constituentNames(j), int0d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int1dField % fieldName, int1d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (.not. field_cursor % int1dField % isSuperArray) then
+ deallocate(int1d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_2D_INT) then
+
+ if (field_cursor % int2dField % isSuperArray) then
+ ncons = size(field_cursor % int2dField % constituentNames)
+ allocate(int1d_temp(field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(int2d_temp(field_cursor % int2dField % dimSizes(1), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_2dint_ptr => field_cursor % int2dField
+ i = 1
+ do while (associated(field_2dint_ptr))
+ if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dint_ptr % dimSizes(2)
+ end if
+
+ if (field_cursor % int2dField % isSuperArray) then
+ int1d_temp(i:i+ownedSize-1) = field_2dint_ptr % array(j,1:ownedSize)
+ else
+ int2d_temp(:,i:i+ownedSize-1) = field_2dint_ptr % array(:,1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_2dint_ptr => field_2dint_ptr % next
+ end do
+ else
+ if (field_cursor % int2dField % isSuperArray) then
+ int1d_temp(:) = field_cursor % int2dField % array(j,:)
+ else
+ int2d_temp(:,:) = field_cursor % int2dField % array(:,:)
+ end if
+ end if
+
+ if (field_cursor % int2dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int2dField % constituentNames(j), int1d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int2dField % fieldName, int2d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (field_cursor % int2dField % isSuperArray) then
+ deallocate(int1d_temp)
+ else
+ deallocate(int2d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_3D_INT) then
+
+ if (field_cursor % int3dField % isSuperArray) then
+ ncons = size(field_cursor % int3dField % constituentNames)
+ allocate(int2d_temp(field_cursor % int3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(int3d_temp(field_cursor % int3dField % dimSizes(1), &
+ field_cursor % int3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_3dint_ptr => field_cursor % int3dField
+ i = 1
+ do while (associated(field_3dint_ptr))
+ if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dint_ptr % dimSizes(3)
+ end if
+
+ if (field_cursor % int3dField % isSuperArray) then
+ int2d_temp(:,i:i+ownedSize-1) = field_3dint_ptr % array(j,:,1:ownedSize)
+ else
+ int3d_temp(:,:,i:i+ownedSize-1) = field_3dint_ptr % array(:,:,1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_3dint_ptr => field_3dint_ptr % next
+ end do
+ else
+ if (field_cursor % int3dField % isSuperArray) then
+ int2d_temp(:,:) = field_cursor % int3dField % array(j,:,:)
+ else
+ int3d_temp(:,:,:) = field_cursor % int3dField % array(:,:,:)
+ end if
+ end if
+
+ if (field_cursor % int3dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int3dField % constituentNames(j), int2d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % int3dField % fieldName, int3d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (field_cursor % int3dField % isSuperArray) then
+ deallocate(int2d_temp)
+ else
+ deallocate(int3d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_0D_REAL) then
+
+!write(0,*) 'Writing out field '//trim(field_cursor % real0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'Copying field from first block'
+ real0d_temp = field_cursor % real0dField % scalar
+
+!write(0,*) 'MGD calling MPAS_io_put_var now...'
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real0dField % fieldName, real0d_temp, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ else if (field_cursor % field_type == FIELD_1D_REAL) then
+
+ if (field_cursor % real1dField % isSuperArray) then
+ ncons = size(field_cursor % real1dField % constituentNames)
+ else
+ ncons = 1
+ allocate(real1d_temp(field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_1dreal_ptr => field_cursor % real1dField
+ i = 1
+ do while (associated(field_1dreal_ptr))
+ if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dreal_ptr % dimSizes(1)
+ end if
+
+ if (field_cursor % real1dField % isSuperArray) then
+! I suspect we will never hit this code, as it doesn't make sense, really
+ real0d_temp = field_1dreal_ptr % array(j)
+ else
+ real1d_temp(i:i+ownedSize-1) = field_1dreal_ptr % array(1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_1dreal_ptr => field_1dreal_ptr % next
+ end do
+ else
+ if (field_cursor % real1dField % isSuperArray) then
+ real0d_temp = field_cursor % real1dField % array(j)
+ else
+ real1d_temp(:) = field_cursor % real1dField % array(:)
+ end if
+ end if
+
+ if (field_cursor % real1dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real1dField % constituentNames(j), real0d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real1dField % fieldName, real1d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (.not. field_cursor % real1dField % isSuperArray) then
+ deallocate(real1d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_2D_REAL) then
+
+ if (field_cursor % real2dField % isSuperArray) then
+ ncons = size(field_cursor % real2dField % constituentNames)
+ allocate(real1d_temp(field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real2d_temp(field_cursor % real2dField % dimSizes(1), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_2dreal_ptr => field_cursor % real2dField
+ i = 1
+ do while (associated(field_2dreal_ptr))
+ if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dreal_ptr % dimSizes(2)
+ end if
+
+ if (field_cursor % real2dField % isSuperArray) then
+ real1d_temp(i:i+ownedSize-1) = field_2dreal_ptr % array(j,1:ownedSize)
+ else
+ real2d_temp(:,i:i+ownedSize-1) = field_2dreal_ptr % array(:,1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_2dreal_ptr => field_2dreal_ptr % next
+ end do
+ else
+ if (field_cursor % real2dField % isSuperArray) then
+ real1d_temp(:) = field_cursor % real2dField % array(j,:)
+ else
+ real2d_temp(:,:) = field_cursor % real2dField % array(:,:)
+ end if
+ end if
+
+ if (field_cursor % real2dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real2dField % constituentNames(j), real1d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real2dField % fieldName, real2d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (field_cursor % real2dField % isSuperArray) then
+ deallocate(real1d_temp)
+ else
+ deallocate(real2d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_3D_REAL) then
+
+ if (field_cursor % real3dField % isSuperArray) then
+ ncons = size(field_cursor % real3dField % constituentNames)
+ allocate(real2d_temp(field_cursor % real3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real3d_temp(field_cursor % real3dField % dimSizes(1), &
+ field_cursor % real3dField % dimSizes(2), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % isDecomposed) then
+ ! Gather field from across multiple blocks
+ field_3dreal_ptr => field_cursor % real3dField
+ i = 1
+ do while (associated(field_3dreal_ptr))
+ if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dreal_ptr % dimSizes(3)
+ end if
+
+ if (field_cursor % real3dField % isSuperArray) then
+ real2d_temp(:,i:i+ownedSize-1) = field_3dreal_ptr % array(j,:,1:ownedSize)
+ else
+ real3d_temp(:,:,i:i+ownedSize-1) = field_3dreal_ptr % array(:,:,1:ownedSize)
+ end if
+ i = i + ownedSize
+ field_3dreal_ptr => field_3dreal_ptr % next
+ end do
+ else
+ if (field_cursor % real3dField % isSuperArray) then
+ real2d_temp(:,:) = field_cursor % real3dField % array(j,:,:)
+ else
+ real3d_temp(:,:,:) = field_cursor % real3dField % array(:,:,:)
+ end if
+ end if
+
+ if (field_cursor % real3dField % isSuperArray) then
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real3dField % constituentNames(j), real2d_temp, io_err)
+ else
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % real3dField % fieldName, real3d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+ end do
+
+ if (field_cursor % real3dField % isSuperArray) then
+ deallocate(real2d_temp)
+ else
+ deallocate(real3d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_0D_CHAR) then
+
+!write(0,*) 'Writing out field '//trim(field_cursor % char0dField % fieldName)
+!write(0,*) ' > is the field decomposed? ', field_cursor % isDecomposed
+!write(0,*) ' > outer dimension size ', field_cursor % totalDimSize
+
+!write(0,*) 'Copying field from first block'
+!write(0,*) 'MGD calling MPAS_io_put_var now...'
+ call MPAS_io_put_var(stream % fileHandle, field_cursor % char0dField % fieldName, field_cursor % char0dField % scalar, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ else if (field_cursor % field_type == FIELD_1D_CHAR) then
+ end if
+ field_cursor => field_cursor % next
+ end do
+
+
+ !
+ ! Sync all fields with disk
+ !
+ call MPAS_io_sync(stream % fileHandle)
+
+ end subroutine MPAS_writeStream
+
+
+ subroutine MPAS_readStreamAtt_0dInteger(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ integer, intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_readStreamAtt_0dInteger
+
+
+ subroutine MPAS_readStreamAtt_1dInteger(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), pointer :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_readStreamAtt_1dInteger
+
+
+ subroutine MPAS_readStreamAtt_0dReal(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_readStreamAtt_0dReal
+
+
+ subroutine MPAS_readStreamAtt_1dReal(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), pointer :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_readStreamAtt_1dReal
+
+
+ subroutine MPAS_readStreamAtt_text(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(out) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_get_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_readStreamAtt_text
+
+
+ subroutine MPAS_writeStreamAtt_0dInteger(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ integer, intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_writeStreamAtt_0dInteger
+
+
+ subroutine MPAS_writeStreamAtt_1dInteger(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ integer, dimension(:), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_writeStreamAtt_1dInteger
+
+
+ subroutine MPAS_writeStreamAtt_0dReal(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_writeStreamAtt_0dReal
+
+
+ subroutine MPAS_writeStreamAtt_1dReal(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ real (kind=RKIND), dimension(:), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_writeStreamAtt_1dReal
+
+
+ subroutine MPAS_writeStreamAtt_text(stream, attName, attValue, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ character (len=*), intent(in) :: attName
+ character (len=*), intent(in) :: attValue
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_put_att(stream % fileHandle, attName, attValue, ierr=io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+ end subroutine MPAS_writeStreamAtt_text
+
+
+ subroutine MPAS_closeStream(stream, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ type (field_list_type), pointer :: field_cursor
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+ call MPAS_io_close(stream % fileHandle, io_err)
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR .and. present(ierr)) ierr = MPAS_IO_ERR
+
+!write(0,*) 'Deallocating global attribute list'
+ call mpas_deallocate_attlist(stream % attList)
+
+!write(0,*) 'Deallocating field list'
+ field_cursor => stream % fieldList
+ do while (associated(field_cursor))
+ if (associated(field_cursor % isAvailable)) then
+ deallocate(field_cursor % isAvailable)
+!write(0,*) 'Deallocating isAvailable array'
+ end if
+ stream % fieldList => stream % fieldList % next
+ deallocate(field_cursor)
+ field_cursor => stream % fieldList
+ end do
+
+ stream % isInitialized = .false.
+
+ end subroutine MPAS_closeStream
+
+
+ subroutine mergeArrays(array1, array2)
+
+ implicit none
+
+ integer, dimension(:), pointer :: array1
+ integer, dimension(:), intent(in) :: array2
+
+ integer :: n1, n2
+ integer, dimension(:), pointer :: newArray
+
+ n1 = size(array1)
+ n2 = size(array2)
+
+ allocate(newArray(n1+n2))
+
+ newArray(1:n1) = array1(:)
+ newArray(n1+1:n1+n2) = array2(:)
+
+ deallocate(array1)
+ array1 => newArray
+
+ end subroutine mergeArrays
+
+
+ subroutine put_get_field_atts(handle, ioDirection, fieldname, attList)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ integer, intent(in) :: ioDirection
+ character (len=*), intent(in) :: fieldname
+ type (att_list_type), pointer :: attList
+
+ type (att_list_type), pointer :: att_cursor
+
+ if (.not. associated(attList)) return
+
+ att_cursor => attList
+ if (ioDirection == MPAS_IO_WRITE) then
+ do while (associated(att_cursor))
+ select case (att_cursor % attType)
+ case (ATT_INT)
+ call MPAS_io_put_att(handle, trim(att_cursor % attName), att_cursor % attValueInt, fieldname)
+ case (ATT_INTA)
+ call MPAS_io_put_att(handle, trim(att_cursor % attName), att_cursor % attValueIntA, fieldname)
+ case (ATT_REAL)
+ call MPAS_io_put_att(handle, trim(att_cursor % attName), att_cursor % attValueReal, fieldname)
+ case (ATT_REALA)
+ call MPAS_io_put_att(handle, trim(att_cursor % attName), att_cursor % attValueRealA, fieldname)
+ case (ATT_TEXT)
+ call MPAS_io_put_att(handle, trim(att_cursor % attName), att_cursor % attValueText, fieldname)
+ end select
+ att_cursor => att_cursor % next
+ end do
+ else
+ do while (associated(att_cursor))
+ select case (att_cursor % attType)
+ case (ATT_INT)
+ call MPAS_io_get_att(handle, trim(att_cursor % attName), att_cursor % attValueInt, fieldname)
+ case (ATT_INTA)
+ call MPAS_io_get_att(handle, trim(att_cursor % attName), att_cursor % attValueIntA, fieldname)
+ case (ATT_REAL)
+ call MPAS_io_get_att(handle, trim(att_cursor % attName), att_cursor % attValueReal, fieldname)
+ case (ATT_REALA)
+ call MPAS_io_get_att(handle, trim(att_cursor % attName), att_cursor % attValueRealA, fieldname)
+ case (ATT_TEXT)
+ call MPAS_io_get_att(handle, trim(att_cursor % attName), att_cursor % attValueText, fieldname)
+ end select
+ att_cursor => att_cursor % next
+ end do
+ end if
+
+ end subroutine put_get_field_atts
+
+end module mpas_io_streams
Modified: branches/atmos_physics/src/framework/mpas_kind_types.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_kind_types.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_kind_types.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -6,6 +6,8 @@
integer, parameter :: RKIND = selected_real_kind(12)
#endif
+ integer, parameter :: StrKIND = 512
+
contains
subroutine dummy_kinds()
Modified: branches/atmos_physics/src/framework/mpas_timekeeping.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_timekeeping.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_timekeeping.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -114,15 +114,16 @@
implicit none
- integer, intent(in) :: calendar
+ character (len=*), intent(in) :: calendar
- TheCalendar = calendar
-
- if (TheCalendar == MPAS_GREGORIAN) then
+ if (trim(calendar) == 'gregorian') then
+ TheCalendar = MPAS_GREGORIAN
call ESMF_Initialize(defaultCalendar=ESMF_CAL_GREGORIAN)
- else if (TheCalendar == MPAS_GREGORIAN_NOLEAP) then
+ else if (trim(calendar) == 'gregorian_noleap') then
+ TheCalendar = MPAS_GREGORIAN_NOLEAP
call ESMF_Initialize(defaultCalendar=ESMF_CAL_NOLEAP)
- else if (TheCalendar == MPAS_360DAY) then
+ else if (trim(calendar) == '360day') then
+ TheCalendar = MPAS_360DAY
call ESMF_Initialize(defaultCalendar=ESMF_CAL_360DAY)
else
write(0,*) 'ERROR: mpas_timekeeping_init: Invalid calendar type'
@@ -518,7 +519,7 @@
type (MPAS_TimeInterval_type) :: alarmTimeInterval
type (MPAS_Time_type) :: alarmTime
- character (len=32) :: printString
+ character (len=StrKIND) :: printString
ierr = 0
@@ -808,11 +809,11 @@
integer :: year, month, day, hour, min, sec
integer :: numerator, denominator, denominatorPower
- character (len=50) :: dateTimeString_
- character (len=50) :: dateSubString
- character (len=50) :: timeSubString
- character (len=50) :: secDecSubString
- character(len=50), pointer, dimension(:) :: subStrings
+ character (len=StrKIND) :: dateTimeString_
+ character (len=StrKIND) :: dateSubString
+ character (len=StrKIND) :: timeSubString
+ character (len=StrKIND) :: secDecSubString
+ character(len=StrKIND), pointer, dimension(:) :: subStrings
if (present(dateTimeString)) then
@@ -957,7 +958,7 @@
integer, intent(out), optional :: S
integer, intent(out), optional :: S_n
integer, intent(out), optional :: S_d
- character (len=32), intent(out), optional :: dateTimeString
+ character (len=StrKIND), intent(out), optional :: dateTimeString
integer, intent(out), optional :: ierr
call ESMF_TimeGet(curr_time % t, YY=YYYY, MM=MM, DD=DD, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
@@ -991,11 +992,11 @@
type (MPAS_TimeInterval_type) :: zeroInterval
integer :: day, hour, min, sec
- character (len=50) :: timeString_
- character (len=50) :: daySubString
- character (len=50) :: timeSubString
- character (len=50) :: secDecSubString
- character(len=50), pointer, dimension(:) :: subStrings
+ character (len=StrKIND) :: timeString_
+ character (len=StrKIND) :: daySubString
+ character (len=StrKIND) :: timeSubString
+ character (len=StrKIND) :: secDecSubString
+ character(len=StrKIND), pointer, dimension(:) :: subStrings
! if (present(DD)) then
! days = DD
@@ -1164,7 +1165,7 @@
integer, intent(out), optional :: S
integer, intent(out), optional :: S_n
integer, intent(out), optional :: S_d
- character (len=32), intent(out), optional :: timeString
+ character (len=StrKIND), intent(out), optional :: timeString
real (kind=RKIND), intent(out), optional :: dt
integer, intent(out), optional :: ierr
Modified: branches/atmos_physics/src/framework/mpas_timer.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_timer.F        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/mpas_timer.F        2012-05-03 21:04:07 UTC (rev 1864)
@@ -16,7 +16,7 @@
!#endif
type timer_node
- character (len=72) :: timer_name
+ character (len=StrKIND) :: timer_name
logical :: running, printable
integer :: levels, calls
real (kind=RKIND) :: start_time, end_time, total_time
@@ -230,7 +230,7 @@
recursive subroutine mpas_timer_write(timer_ptr, total_ptr)!{{{
type (timer_node), pointer, optional :: timer_ptr
type (timer_node), pointer, optional :: total_ptr
- character (len=10) :: tname
+ character (len=StrKIND) :: tname
logical :: total_found, string_equals
type (timer_node), pointer :: current, total
Modified: branches/atmos_physics/src/framework/streams.c
===================================================================
--- branches/atmos_physics/src/framework/streams.c        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/framework/streams.c        2012-05-03 21:04:07 UTC (rev 1864)
@@ -18,6 +18,37 @@
{
char fname[128];
+#ifndef MPAS_DEBUG
+ if(*id == 0){
+         sprintf(fname, "log.%4.4i.err", *id);
+         fd_err = open(fname,O_CREAT|O_WRONLY|O_TRUNC,0644);
+         if (dup2(fd_err, 2) < 0) {
+                 printf("Error duplicating STDERR</font>
<font color="blue">");
+                 return;
+         }
+
+         sprintf(fname, "log.%4.4i.out", *id);
+         fd_out = open(fname,O_CREAT|O_WRONLY|O_TRUNC,0644);
+         if (dup2(fd_out, 1) < 0) {
+                 printf("Error duplicating STDOUT</font>
<font color="blue">");
+                 return;
+         }
+ } else {
+         sprintf(fname, "/dev/null", *id);
+         fd_err = open(fname,O_CREAT|O_WRONLY|O_TRUNC,0644);
+         if (dup2(fd_err, 2) < 0) {
+                 printf("Error duplicating STDERR</font>
<font color="blue">");
+                 return;
+         }
+
+         sprintf(fname, "/dev/null", *id);
+         fd_out = open(fname,O_CREAT|O_WRONLY|O_TRUNC,0644);
+         if (dup2(fd_out, 1) < 0) {
+                 printf("Error duplicating STDOUT</font>
<font color="gray">");
+                 return;
+         }
+ }
+#else
sprintf(fname, "log.%4.4i.err", *id);
fd_err = open(fname,O_CREAT|O_WRONLY|O_TRUNC,0644);
if (dup2(fd_err, 2) < 0) {
@@ -31,6 +62,7 @@
printf("Error duplicating STDOUT</font>
<font color="gray">");
return;
}
+#endif
}
void close_streams()
Modified: branches/atmos_physics/src/registry/gen_inc.c
===================================================================
--- branches/atmos_physics/src/registry/gen_inc.c        2012-05-03 17:26:15 UTC (rev 1863)
+++ branches/atmos_physics/src/registry/gen_inc.c        2012-05-03 21:04:07 UTC (rev 1864)
@@ -14,6 +14,18 @@
return 0;
}
+
+void get_outer_dim(struct variable * var, char * last_dim)
+{
+ struct dimension_list * dimlist_ptr;
+
+
+ dimlist_ptr = var->dimlist;
+ while (dimlist_ptr->next) dimlist_ptr = dimlist_ptr->next;
+
+ strcpy(last_dim, dimlist_ptr->dim->name_in_file);
+}
+
void split_derived_dim_string(char * dim, char ** p1, char ** p2)
{
char * cp, * cm, * c;
@@ -56,7 +68,7 @@
if (nls_ptr->vtype == INTEGER) fortprintf(fd, " integer :: %s</font>
<font color="black">",nls_ptr->name);
if (nls_ptr->vtype == REAL) fortprintf(fd, " real (KIND=RKIND) :: %s</font>
<font color="black">",nls_ptr->name);
if (nls_ptr->vtype == LOGICAL) fortprintf(fd, " logical :: %s</font>
<font color="red">",nls_ptr->name);
- if (nls_ptr->vtype == CHARACTER) fortprintf(fd, " character (len=32) :: %s</font>
<font color="blue">",nls_ptr->name);
+ if (nls_ptr->vtype == CHARACTER) fortprintf(fd, " character (len=StrKIND) :: %s</font>
<font color="gray">",nls_ptr->name);
nls_ptr = nls_ptr->next;
}
@@ -164,15 +176,18 @@
struct variable * var_ptr2;
struct variable_list * var_list_ptr;
struct variable_list * var_list_ptr2;
+ struct variable_list * var_list_ptr3;
struct dimension * dim_ptr;
struct dimension_list * dimlist_ptr;
struct group_list * group_ptr;
FILE * fd;
char super_array[1024];
char array_class[1024];
+ char outer_dim[1024];
int i;
int class_start, class_end;
int vtype;
+ char type_str[7];
/*
@@ -261,8 +276,8 @@
fd = fopen("read_dims.inc", "w");
dim_ptr = dims;
while (dim_ptr) {
- if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call mpas_io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="red">", dim_ptr->name_in_file, dim_ptr->name_in_code);
- else if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call mpas_io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call MPAS_io_inq_dim(inputHandle, \'%s\', %s, ierr)</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_code);
+ else if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s = %s</font>
<font color="gray">", dim_ptr->name_in_file, dim_ptr->name_in_code);
dim_ptr = dim_ptr->next;
}
@@ -360,6 +375,8 @@
if (strncmp(group_ptr->name, "mesh", 1024)) {
fortprintf(fd, " type %s_type</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="gray">");
+
var_list_ptr = group_ptr->vlist;
memcpy(super_array, var_list_ptr->var->super_array, 1024);
i = 1;
@@ -482,13 +499,13 @@
fortprintf(fd, " allocate(b %% %s %% time_levs(%i))</font>
<font color="black">", group_ptr->name, group_ptr->vlist->var->ntime_levs);
fortprintf(fd, " do i=1,b %% %s %% nTimeLevels</font>
<font color="black">", group_ptr->name);
fortprintf(fd, " allocate(b %% %s %% time_levs(i) %% %s)</font>
<font color="red">", group_ptr->name, group_ptr->name);
- fortprintf(fd, " call mpas_allocate_%s(b %% %s %% time_levs(i) %% %s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_allocate_%s(b, b %% %s %% time_levs(i) %% %s, &</font>
<font color="black">", group_ptr->name, group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black">");
fortprintf(fd, " end do</font>
<font color="black"></font>
<font color="red">");
}
else {
- fortprintf(fd, " call mpas_allocate_%s(b %% %s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_allocate_%s(b, b %% %s, &</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black"></font>
<font color="gray">");
}
@@ -520,16 +537,19 @@
fd = fopen("group_alloc_routines.inc", "w");
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine mpas_allocate_%s(%s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_allocate_%s(b, %s, &</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="black">");
fortprintf(fd, " type (%s_type), intent(inout) :: %s</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_decls.inc\"</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="blue">");
+ fortprintf(fd, " %s %% block => b</font>
<font color="gray">", group_ptr->name);
+
if (!strncmp(group_ptr->name, "mesh", 1024)) {
dim_ptr = dims;
while (dim_ptr) {
@@ -537,6 +557,7 @@
if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s %% %s = %s</font>
<font color="blue">", group_ptr->name, dim_ptr->name_in_file, dim_ptr->name_in_file);
dim_ptr = dim_ptr->next;
}
+
fortprintf(fd, "</font>
<font color="gray">");
}
@@ -556,6 +577,21 @@
var_ptr2 = var_list_ptr2->var;
fortprintf(fd, " allocate(%s %% %s)</font>
<font color="black">", group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " allocate(%s %% %s %% ioinfo)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% fieldName = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% isSuperArray = .true.</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " allocate(%s %% %s %% constituentNames(%i))</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i);
+
+ /* Initialization for constituent names */
+ i = 0;
+ var_list_ptr3 = group_ptr->vlist;
+ while (var_list_ptr3) {
+ if (strncmp(super_array, var_list_ptr3->var->super_array, 1024) == 0) {
+ i++;
+ fortprintf(fd, " %s %% %s %% constituentNames(%i) = \'%s\'</font>
<font color="gray">", group_ptr->name, var_ptr2->super_array, i, var_list_ptr3->var->name_in_file);
+ }
+ var_list_ptr3 = var_list_ptr3->next;
+ }
+
fortprintf(fd, " allocate(%s %% %s %% array(%i, ", group_ptr->name, var_ptr2->super_array, i);
dimlist_ptr = var_ptr2->dimlist;
if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
@@ -586,6 +622,42 @@
else if (var_ptr->vtype == CHARACTER)
fortprintf(fd, " %s %% %s %% array = \'\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
+ fortprintf(fd, " %s %% %s %% dimSizes(1) = %i</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i);
+ fortprintf(fd, " %s %% %s %% dimNames(1) = \'num_%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+ dimlist_ptr = var_ptr2->dimlist;
+ i = 2;
+ while (dimlist_ptr) {
+ if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
+ if (!dimlist_ptr->dim->namelist_defined) {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ }
+ else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ }
+ else
+ if (dimlist_ptr->dim->namelist_defined) {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ }
+ else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+ }
+ i++;
+ dimlist_ptr = dimlist_ptr->next;
+ }
+ if (var_ptr2->timedim) fortprintf(fd, " %s %% %s %% hasTimeDimension = .true.</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ else fortprintf(fd, " %s %% %s %% hasTimeDimension = .false.</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+
if (var_ptr2->iostreams & INPUT0)
fortprintf(fd, " %s %% %s %% ioinfo %% input = .true.</font>
<font color="gray">", group_ptr->name, var_ptr2->super_array);
else
@@ -605,11 +677,15 @@
fortprintf(fd, " %s %% %s %% ioinfo %% output = .true.</font>
<font color="black">", group_ptr->name, var_ptr2->super_array);
else
fortprintf(fd, " %s %% %s %% ioinfo %% output = .false.</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+
+ fortprintf(fd, " %s %% %s %% block => b</font>
<font color="black">", group_ptr->name, var_ptr2->super_array);
fortprintf(fd, "</font>
<font color="black">");
}
else {
fortprintf(fd, " allocate(%s %% %s)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " allocate(%s %% %s %% ioinfo)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% fieldName = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_file);
+ fortprintf(fd, " %s %% %s %% isSuperArray = .false.</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
if (var_ptr->ndims > 0) {
fortprintf(fd, " allocate(%s %% %s %% array(", group_ptr->name, var_ptr->name_in_code);
dimlist_ptr = var_ptr->dimlist;
@@ -641,7 +717,42 @@
else if (var_ptr->vtype == CHARACTER)
fortprintf(fd, " %s %% %s %% array = \'\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code ); /* initialize field to zero */
+ dimlist_ptr = var_ptr->dimlist;
+ i = 1;
+ while (dimlist_ptr) {
+ if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
+ if (!dimlist_ptr->dim->namelist_defined) {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ }
+ else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ }
+ else
+ if (dimlist_ptr->dim->namelist_defined) {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ }
+ else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+ }
+ i++;
+ dimlist_ptr = dimlist_ptr->next;
+ }
}
+
+ if (var_ptr->timedim) fortprintf(fd, " %s %% %s %% hasTimeDimension = .true.</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ else fortprintf(fd, " %s %% %s %% hasTimeDimension = .false.</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+
if (var_ptr->iostreams & INPUT0)
fortprintf(fd, " %s %% %s %% ioinfo %% input = .true.</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
else
@@ -661,6 +772,8 @@
fortprintf(fd, " %s %% %s %% ioinfo %% output = .true.</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
else
fortprintf(fd, " %s %% %s %% ioinfo %% output = .false.</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+
+ fortprintf(fd, " %s %% %s %% block => b</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, "</font>
<font color="gray">");
var_list_ptr = var_list_ptr->next;
@@ -697,16 +810,19 @@
}
fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="black">", group_ptr->name, var_list_ptr2->var->super_array);
fortprintf(fd, " deallocate(%s %% %s %% ioinfo)</font>
<font color="blue">", group_ptr->name, var_list_ptr2->var->super_array);
+ fortprintf(fd, " call mpas_deallocate_attlist(%s %% %s %% attList)</font>
<font color="black">", group_ptr->name, var_list_ptr2->var->super_array);
fortprintf(fd, " deallocate(%s %% %s)</font>
<font color="black"></font>
<font color="black">", group_ptr->name, var_list_ptr2->var->super_array);
}
else {
if (var_ptr->ndims > 0) {
fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " deallocate(%s %% %s %% ioinfo)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " call mpas_deallocate_attlist(%s %% %s %% attList)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " deallocate(%s %% %s)</font>
<font color="black"></font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
}
else {
fortprintf(fd, " deallocate(%s %% %s %% ioinfo)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " call mpas_deallocate_attlist(%s %% %s %% attList)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " deallocate(%s %% %s)</font>
<font color="black"></font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
}
var_list_ptr = var_list_ptr->next;
@@ -773,19 +889,212 @@
fortprintf(fd, " type (%s_multilevel_type), intent(inout) :: %s</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " integer :: i</font>
<font color="red">");
- fortprintf(fd, " type (%s_type), pointer :: sptr</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " real (kind=RKIND) :: real0d</font>
<font color="blue">");
+ fortprintf(fd, " real (kind=RKIND), dimension(:), pointer :: real1d</font>
<font color="blue">");
+ fortprintf(fd, " real (kind=RKIND), dimension(:,:), pointer :: real2d</font>
<font color="blue">");
+ fortprintf(fd, " real (kind=RKIND), dimension(:,:,:), pointer :: real3d</font>
<font color="blue">");
+ fortprintf(fd, " integer :: int0d</font>
<font color="blue">");
+ fortprintf(fd, " integer, dimension(:), pointer :: int1d</font>
<font color="blue">");
+ fortprintf(fd, " integer, dimension(:,:), pointer :: int2d</font>
<font color="blue">");
+ fortprintf(fd, " integer, dimension(:,:,:), pointer :: int3d</font>
<font color="blue">");
+ fortprintf(fd, " character (len=64) :: char0d</font>
<font color="blue">");
+ fortprintf(fd, " character (len=64), dimension(:), pointer :: char1d</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="red">");
- fortprintf(fd, " sptr => %s %% time_levs(1) %% %s</font>
<font color="red">", group_ptr->name, group_ptr->name);
- fortprintf(fd, " do i=1,%s %% nTimeLevels-1</font>
<font color="red">", group_ptr->name);
- fortprintf(fd, " %s %% time_levs(i) %% %s => %s %% time_levs(i+1) %% %s</font>
<font color="red">", group_ptr->name, group_ptr->name, group_ptr->name, group_ptr->name);
- fortprintf(fd, " end do</font>
<font color="red">");
- fortprintf(fd, " %s %% time_levs(%s %% nTimeLevels) %% %s => sptr</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+ var_list_ptr = group_ptr->vlist;
+ while (var_list_ptr) {
+ var_ptr = var_list_ptr->var;
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0)
+ {
+ if (var_ptr->vtype == INTEGER) sprintf(type_str, "int%id", var_ptr->ndims+1);
+ else if (var_ptr->vtype == REAL) sprintf(type_str, "real%id", var_ptr->ndims+1);
+ else if (var_ptr->vtype == CHARACTER) sprintf(type_str, "char%id", var_ptr->ndims+1);
+
+ memcpy(super_array, var_ptr->super_array, 1024);
+
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0)
+ {
+ var_list_ptr2 = var_list_ptr;
+ var_list_ptr = var_list_ptr->next;
+ }
+ var_ptr2 = var_list_ptr2->var;
+
+ fortprintf(fd, " %s => %s %% time_levs(1) %% %s %% %s %% array</font>
<font color="blue">", type_str, group_ptr->name, group_ptr->name, var_ptr2->super_array);
+
+ fortprintf(fd, " do i=1,%s %% nTimeLevels-1</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " %s %% time_levs(i) %% %s %% %s %% array => %s %% time_levs(i+1) %% %s %% %s %% array</font>
<font color="blue">", group_ptr->name, group_ptr->name, var_ptr2->super_array, group_ptr->name, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " end do</font>
<font color="blue">");
+
+ fortprintf(fd, " %s %% time_levs(%s %% nTimeLevels) %% %s %% %s %% array=> %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, var_ptr2->super_array, type_str);
+ }
+ else {
+
+ if (var_ptr->vtype == INTEGER) sprintf(type_str, "int%id", var_ptr->ndims);
+ else if (var_ptr->vtype == REAL) sprintf(type_str, "real%id", var_ptr->ndims);
+ else if (var_ptr->vtype == CHARACTER) sprintf(type_str, "char%id", var_ptr->ndims);
+
+ if (var_ptr->ndims > 0)
+ fortprintf(fd, " %s => %s %% time_levs(1) %% %s %% %s %% array</font>
<font color="blue">", type_str, group_ptr->name, group_ptr->name, var_ptr->name_in_code);
+ else
+ fortprintf(fd, " %s = %s %% time_levs(1) %% %s %% %s %% scalar</font>
<font color="blue">", type_str, group_ptr->name, group_ptr->name, var_ptr->name_in_code);
+
+ fortprintf(fd, " do i=1,%s %% nTimeLevels-1</font>
<font color="blue">", group_ptr->name);
+ if (var_ptr->ndims > 0)
+ fortprintf(fd, " %s %% time_levs(i) %% %s %% %s %% array => %s %% time_levs(i+1) %% %s %% %s %% array</font>
<font color="blue">", group_ptr->name, group_ptr->name, var_ptr->name_in_code, group_ptr->name, group_ptr->name, var_ptr->name_in_code);
+ else
+ fortprintf(fd, " %s %% time_levs(i) %% %s %% %s %% scalar = %s %% time_levs(i+1) %% %s %% %s %% scalar</font>
<font color="blue">", group_ptr->name, group_ptr->name, var_ptr->name_in_code, group_ptr->name, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " end do</font>
<font color="blue">");
+
+ if (var_ptr->ndims > 0)
+ fortprintf(fd, " %s %% time_levs(%s %% nTimeLevels) %% %s %% %s %% array=> %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, var_ptr->name_in_code, type_str);
+ else
+ fortprintf(fd, " %s %% time_levs(%s %% nTimeLevels) %% %s %% %s %% scalar = %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, var_ptr->name_in_code, type_str);
+
+ var_list_ptr = var_list_ptr->next;
+ }
+ }
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " end subroutine mpas_shift_time_levels_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
}
group_ptr = group_ptr->next;
}
fclose(fd);
+
+
+ /* Definitions of deallocate subroutines */
+ fd = fopen("field_links.inc", "w");
+
+ /* subroutine to call link subroutine for every field type */
+ fortprintf(fd, " subroutine mpas_create_field_links(b)</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " implicit none</font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="black"></font>
<font color="blue">");
+ group_ptr = groups;
+ while (group_ptr)
+ {
+ var_list_ptr = group_ptr->vlist;
+ var_list_ptr = var_list_ptr->next;
+ var_ptr = var_list_ptr->var;
+
+
+ int ntime_levs = 1;
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0)
+ {
+ memcpy(super_array, var_ptr->super_array, 1024);
+ memcpy(array_class, var_ptr->array_class, 1024);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0)
+ {
+ var_list_ptr2 = var_list_ptr;
+ var_list_ptr = var_list_ptr->next;
+ }
+ var_ptr2 = var_list_ptr2->var;
+ get_outer_dim(var_ptr2, outer_dim);
+ ntime_levs = var_ptr2->ntime_levs;
+
+ if(ntime_levs > 1)
+ {
+ for(i=1; i<=ntime_levs; i++)
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+ }        
+ }
+ else
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ }
+ }
+ else if (var_ptr->ndims > 0)
+ {
+ get_outer_dim(var_ptr, outer_dim);
+ ntime_levs = var_ptr->ntime_levs;
+
+ if(ntime_levs > 1)
+ {
+ for(i=1; i<=ntime_levs; i++)
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+ }        
+ }
+ else
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ }
+ }
+
+ group_ptr = group_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="black"> end subroutine mpas_create_field_links</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">");
+
+ /* subroutines for linking specific field type */
+ group_ptr = groups;
+
+ while (group_ptr) {
+ fortprintf(fd, " subroutine mpas_create_%s_links(%s)</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " implicit none</font>
<font color="blue">");
+ fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+
+ var_list_ptr = group_ptr->vlist;
+ while (var_list_ptr) {
+ var_ptr = var_list_ptr->var;
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
+ memcpy(super_array, var_ptr->super_array, 1024);
+ memcpy(array_class, var_ptr->array_class, 1024);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0) {
+ var_list_ptr2 = var_list_ptr;
+ var_list_ptr = var_list_ptr->next;
+ }
+ var_ptr2 = var_list_ptr2->var;
+ get_outer_dim(var_ptr2, outer_dim);
+
+ if (strncmp("nCells",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ }
+ else if (strncmp("nEdges",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ }
+ else if (strncmp("nVertices",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+ }
+ else
+ {
+         if (var_ptr->ndims > 0)
+         {
+ get_outer_dim(var_ptr, outer_dim);
+
+ if (strncmp("nCells",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ }
+ else if (strncmp("nEdges",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ }
+ else if (strncmp("nVertices",outer_dim,1024) == 0) {
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+         }
+ var_list_ptr = var_list_ptr->next;
+         }
+ }
+
+ fortprintf(fd, " end subroutine mpas_create_%s_links</font>
<font color="black"></font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
+
+ group_ptr = group_ptr->next;
+ }
+ fclose(fd);
}
@@ -801,12 +1110,14 @@
FILE * fd;
char vtype[5];
char fname[32];
+ char super_array[1024];
char struct_deref[1024];
char * cp1, * cp2;
int i, j;
int ivtype;
+#ifdef LEGACY_CODE
/*
* Generate declarations of IDs belonging in io_input_object
*/
@@ -836,6 +1147,7 @@
fclose(fd);
+
/*
* Definitions of read bounds and exchange lists for non-decomposed fields
*/
@@ -1493,9 +1805,116 @@
}
fclose(fd);
+#endif
/*
+ * MGD NEW CODE
+ */
+ fd = fopen("add_input_fields.inc", "w");
+
+ group_ptr = groups;
+ while (group_ptr) {
+ var_list_ptr = group_ptr->vlist;
+ while (var_list_ptr) {
+ var_ptr = var_list_ptr->var;
+
+ if (var_ptr->ntime_levs > 1)
+ snprintf(struct_deref, 1024, "blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
+ else
+ snprintf(struct_deref, 1024, "blocklist %% %s", group_ptr->name);
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ memcpy(super_array, var_ptr->super_array, 1024);
+/* fortprintf(fd, " write(0,*) \'adding input field %s\'</font>
<font color="blue">", var_ptr->super_array); */
+ fortprintf(fd, " call MPAS_streamAddField(input_obj %% io_stream, %s %% %s, nferr)</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0) {
+ var_list_ptr = var_list_ptr->next;
+ }
+ }
+ else {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+/* fortprintf(fd, " write(0,*) \'adding input field %s\'</font>
<font color="blue">", var_ptr->name_in_code); */
+ fortprintf(fd, " call MPAS_streamAddField(input_obj %% io_stream, %s %% %s, nferr)</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ }
+
+ fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+
+ if (var_list_ptr) var_list_ptr = var_list_ptr->next;
+ }
+ group_ptr = group_ptr->next;
+ }
+
+ fclose(fd);
+
+
+ /*
+ * MGD NEW CODE
+ */
+ fd = fopen("exchange_input_field_halos.inc", "w");
+
+ group_ptr = groups;
+ while (group_ptr) {
+ var_list_ptr = group_ptr->vlist;
+ while (var_list_ptr) {
+ var_ptr = var_list_ptr->var;
+
+ dimlist_ptr = var_ptr->dimlist;
+ i = 1;
+ while (dimlist_ptr) {
+ if (i == var_ptr->ndims) {
+ if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024)) {
+
+ if (var_ptr->ntime_levs > 1)
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
+ else
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ memcpy(super_array, var_ptr->super_array, 1024);
+/* fortprintf(fd, " write(0,*) \'exchange halo for %s\'</font>
<font color="blue">", var_ptr->super_array); */
+ fortprintf(fd, " call mpas_dmpar_exch_halo_field(%s %% %s)</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0) {
+ var_list_ptr = var_list_ptr->next;
+ }
+ }
+ else {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+/* fortprintf(fd, " write(0,*) \'exchange halo for %s\'</font>
<font color="blue">", var_ptr->name_in_code); */
+ fortprintf(fd, " call mpas_dmpar_exch_halo_field(%s %% %s)</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ }
+
+ fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
+
+ }
+ }
+
+ i++;
+ dimlist_ptr = dimlist_ptr -> next;
+ }
+
+ if (var_list_ptr) var_list_ptr = var_list_ptr->next;
+ }
+ group_ptr = group_ptr->next;
+ }
+
+ fclose(fd);
+
+
+#ifdef LEGACY_CODE
+ /*
* Generate NetCDF reads of dimension and variable IDs
*/
fd = fopen("netcdf_read_ids.inc", "w");
@@ -1657,6 +2076,7 @@
fclose(fd);
}
+#endif
}
@@ -1674,11 +2094,13 @@
char vtype[5];
char fname[32];
char struct_deref[1024];
+ char super_array[1024];
char * cp1, * cp2;
int i, j;
int ivtype;
+#ifdef LEGACY_CODE
/*
* Generate declarations of IDs belonging in io_output_object
*/
@@ -1818,9 +2240,77 @@
}
fclose(fd);
+#endif
+
+
+ /*
+ * MGD NEW CODE
+ */
+ fd = fopen("add_output_fields.inc", "w");
+
+ group_ptr = groups;
+ while (group_ptr) {
+ var_list_ptr = group_ptr->vlist;
+ while (var_list_ptr) {
+ var_ptr = var_list_ptr->var;
+
+ if (group_ptr->vlist->var->ntime_levs > 1)
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
+ else
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% output .and. output_obj %% stream == OUTPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. output_obj %% stream == RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. output_obj %% stream == SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->super_array);
+ memcpy(super_array, var_ptr->super_array, 1024);
+ fortprintf(fd, " call MPAS_streamAddField(output_obj %% io_stream, %s %% %s, ierr)</font>
<font color="blue">", struct_deref, super_array);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0) {
+ var_list_ptr = var_list_ptr->next;
+ }
+ }
+ else {
+ fortprintf(fd, " if ((%s %% %s %% ioinfo %% output .and. output_obj %% stream == OUTPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% restart .and. output_obj %% stream == RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " (%s %% %s %% ioinfo %% sfc .and. output_obj %% stream == SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd, " call MPAS_streamAddField(output_obj %% io_stream, %s %% %s, ierr)</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ }
+ fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+ if (var_list_ptr) var_list_ptr = var_list_ptr->next;
+ }
+ group_ptr = group_ptr->next;
+ }
+
+ fclose(fd);
+
+
/*
+ * MGD NEW CODE
+ */
+ fd = fopen("add_output_atts.inc", "w");
+
+ nl = namelists;
+ while (nl) {
+ if (nl->vtype == LOGICAL) {
+ fortprintf(fd, " if (%s) then</font>
<font color="blue">", nl->name);
+ fortprintf(fd, " call MPAS_writeStreamAtt(output_obj %% io_stream, \'%s\', 'T', ierr)</font>
<font color="blue">", nl->name);
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " call MPAS_writeStreamAtt(output_obj %% io_stream, \'%s\', 'F', ierr)</font>
<font color="blue">", nl->name);
+ fortprintf(fd, " end if</font>
<font color="blue">");
+ }
+ else {
+ fortprintf(fd, " call MPAS_writeStreamAtt(output_obj %% io_stream, \'%s\', %s, ierr)</font>
<font color="gray">", nl->name, nl->name);
+ }
+ nl = nl->next;
+ }
+
+ fclose(fd);
+
+
+#ifdef LEGACY_CODE
+ /*
* Generate collect and write code
*/
fd = fopen("io_output_fields.inc", "w");
@@ -2194,5 +2684,6 @@
fclose(fd);
}
+#endif
}
</font>
</pre>