<p><b>duda</b> 2012-10-26 18:29:44 -0600 (Fri, 26 Oct 2012)</p><p>BRANCH COMMIT<br>
<br>
Merging trunk to atmos_physics branch; includes, among other changes, those<br>
for multiple blocks and from the DCMIP branch (which was merged to the trunk).<br>
</p><hr noshade><pre><font color="gray">Index: branches/atmos_physics
===================================================================
--- branches/atmos_physics        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics        2012-10-27 00:29:44 UTC (rev 2281)
Property changes on: branches/atmos_physics
___________________________________________________________________
Modified: svn:mergeinfo
## -3,16 +3,24 ##
/branches/ocean_projects/ale_vert_coord:1225-1383
/branches/ocean_projects/ale_vert_coord_new:1387-1428
/branches/ocean_projects/gmvar:1214-1514,1517-1738
+/branches/ocean_projects/imp_vert_mix_error:1847-1887
/branches/ocean_projects/imp_vert_mix_mrp:754-986
+/branches/ocean_projects/leith_mrp:2182-2241
/branches/ocean_projects/monotonic_advection:1499-1640
+/branches/ocean_projects/monthly_forcing:1810-1867
+/branches/ocean_projects/option3_b4b_test:2201-2231
+/branches/ocean_projects/partial_bottom_cells:2172-2226
+/branches/ocean_projects/restart_reproducibility:2239-2272
/branches/ocean_projects/split_explicit_mrp:1134-1138
/branches/ocean_projects/split_explicit_timestepping:1044-1097
/branches/ocean_projects/vert_adv_mrp:704-745
+/branches/ocean_projects/vol_cons_RK_imp_mix:1965-1992
/branches/ocean_projects/zstar_restart_new:1762-1770
/branches/omp_blocks/block_decomp:1374-1569
/branches/omp_blocks/ddt_reorg:1301-1414
/branches/omp_blocks/halo:1570-1638
/branches/omp_blocks/io:1639-1787
+/branches/omp_blocks/multiple_blocks:1803-2084
/branches/source_renaming:1082-1113
/branches/time_manager:924-962
-/trunk/mpas:1371-1863
+/trunk/mpas:1371-2274
\ No newline at end of property
Modified: branches/atmos_physics/Makefile
===================================================================
--- branches/atmos_physics/Makefile        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/Makefile        2012-10-27 00:29:44 UTC (rev 2281)
@@ -124,7 +124,7 @@
        "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form" \
        "CFLAGS_OPT = -O3 -m64" \
        "LDFLAGS_OPT = -O3 -m64" \
-        "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form -fbounds-check" \
+        "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace" \
        "CFLAGS_DEBUG = -g -m64" \
        "LDFLAGS_DEBUG = -g -m64" \
        "CORE = $(CORE)" \
@@ -231,8 +231,9 @@
        DEBUG_MESSAGE="Debug flags are not defined for this compile group. Defaulting to Optimized flags"
else # FFLAGS_DEBUG IF
        FFLAGS=$(FFLAGS_DEBUG)
-        CFLAGS=$(CFLAGS_DEBUG) -DMPAS_DEBUG
+        CFLAGS=$(CFLAGS_DEBUG)
        LDFLAGS=$(LDFLAGS_DEBUG)
+        override CPPFLAGS += -DMPAS_DEBUG
        DEBUG_MESSAGE="Debugging is on."
endif # FFLAGS_DEBUG IF
@@ -267,10 +268,32 @@
        PAPI_MESSAGE="Papi libraries are off."
endif # USE_PAPI IF
+ifeq "$(TAU)" "true"
+        LINKER=tau_f90.sh
+        CPPINCLUDES += -DMPAS_TAU
+        TAU_MESSAGE="TAU Hooks are on."
+else
+        LINKER=$(FC)
+        TAU_MESSAGE="TAU Hooks are off."
+endif
+
ifneq ($(wildcard $(NETCDF)/lib/libnetcdff.*), ) # CHECK FOR NETCDF4
        LIBS += -lnetcdff
endif # CHECK FOR NETCDF4
+####################################################
+# Section for adding external libraries and includes
+####################################################
+ifdef MPAS_EXTERNAL_LIBS
+ LIBS += $(MPAS_EXTERNAL_LIBS)
+endif
+ifdef MPAS_EXTERNAL_INCLUDES
+ CPPINCLUDES += $(MPAS_EXTERNAL_INCLUDES)
+ FCINCLUDES += $(MPAS_EXTERNAL_INCLUDES)
+endif
+####################################################
+
+
all: mpas_main
mpas_main:
@@ -278,6 +301,7 @@
CC="$(CC)" \
SFC="$(SFC)" \
SCC="$(SCC)" \
+ LINKER="$(LINKER)" \
CFLAGS="$(CFLAGS)" \
FFLAGS="$(FFLAGS)" \
LDFLAGS="$(LDFLAGS)" \
@@ -293,6 +317,7 @@
        @echo $(DEBUG_MESSAGE)
        @echo $(SERIAL_MESSAGE)
        @echo $(PAPI_MESSAGE)
+        @echo $(TAU_MESSAGE)
clean:
        cd src; $(MAKE) clean RM="$(RM)" CORE="$(CORE)"
        $(RM) $(CORE)_model.exe
@@ -324,9 +349,10 @@
        @cd src; ls -d core_* | grep ".*" | sed "s/core_/ /g"
        @echo ""
        @echo "Available Options:"
-        @echo " SERIAL=true - builds serial version. Default is parallel version."
-        @echo " DEBUG=true - builds debug version. Default is optimized version."
-        @echo " USE_PAPI=true - builds version using PAPI for timers and hardware counters. Default is off."
+        @echo " SERIAL=true - builds serial version. Default is parallel version."
+        @echo " DEBUG=true - builds debug version. Default is optimized version."
+        @echo " USE_PAPI=true - builds version using PAPI for timers. Default is off."
+        @echo " TAU=true - builds version using TAU hooks for profiling. Default is off."
        @echo ""
        @echo "Ensure that NETCDF (and PAPI if USE_PAPI=true) are environment variables"
        @echo "that point to the absolute paths for the libraries."
Modified: branches/atmos_physics/namelist.input.init_nhyd_atmos
===================================================================
--- branches/atmos_physics/namelist.input.init_nhyd_atmos        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/namelist.input.init_nhyd_atmos        2012-10-27 00:29:44 UTC (rev 2281)
@@ -5,6 +5,12 @@
config_stop_time = '2010-10-23_00:00:00'
/
+&dcmip
+ config_dcmip_case = '2-0-0'
+ config_planet_scale = 1.0
+ config_rotation_rate_scale = 1.0
+/
+
&dimensions
config_nvertlevels = 41
config_nsoillevels = 4
@@ -33,8 +39,8 @@
/
&io
- config_input_name = 'x1.40962.geogrid.nc'
- config_output_name = 'x1.40962.init.2010-10-23.nc'
+ config_input_name = 'x1.40962.grid.nc'
+ config_output_name = 'x1.40962.init.nc'
config_pio_num_iotasks = 0
config_pio_stride = 1
/
@@ -47,7 +53,4 @@
/
&restart
- config_restart_interval = 3000
- config_do_restart = .false.
- config_restart_time = 1036800.0
/
Modified: branches/atmos_physics/namelist.input.nhyd_atmos
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/namelist.input.nhyd_atmos        2012-10-27 00:29:44 UTC (rev 2281)
@@ -4,12 +4,12 @@
config_start_time = '2010-10-23_00:00:00'
config_run_duration = '5_00:00:00'
config_number_of_sub_steps = 6
- config_h_mom_eddy_visc2 = 0000.
- config_h_mom_eddy_visc4 = 0.
- config_v_mom_eddy_visc2 = 00.0
- config_h_theta_eddy_visc2 = 0000.
- config_h_theta_eddy_visc4 = 00.
- config_v_theta_eddy_visc2 = 00.0
+ config_h_mom_eddy_visc2 = 0.0
+ config_h_mom_eddy_visc4 = 0.0
+ config_v_mom_eddy_visc2 = 0.0
+ config_h_theta_eddy_visc2 = 0.0
+ config_h_theta_eddy_visc4 = 0.0
+ config_v_theta_eddy_visc2 = 0.0
config_horiz_mixing = '2d_smagorinsky'
config_len_disp = 120000.0
config_theta_adv_order = 3
@@ -35,12 +35,8 @@
config_xnutr = 0.0
/
-&dimensions
- config_nvertlevels = 41
-/
-
&io
- config_input_name = 'x1.40962.init.2010-10-23.nc'
+ config_input_name = 'x1.40962.init.nc'
config_output_name = 'x1.40962.output.nc'
config_restart_name = 'restart.nc'
config_output_interval = '1_00:00:00'
Modified: branches/atmos_physics/namelist.input.nhyd_atmos_jw
===================================================================
--- branches/atmos_physics/namelist.input.nhyd_atmos_jw        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/namelist.input.nhyd_atmos_jw        2012-10-27 00:29:44 UTC (rev 2281)
@@ -1,18 +1,17 @@
&nhyd_model
- config_test_case = 2
config_time_integration = 'SRK3'
config_dt = 450
- config_ntimesteps = 1920
- config_output_interval = 192
+ config_start_time = '0000-01-01_00:00:00'
+ config_run_duration = '10_00:00:00'
config_number_of_sub_steps = 6
- config_h_mom_eddy_visc2 = 0.0e+04
- config_h_mom_eddy_visc4 = 0.
- config_v_mom_eddy_visc2 = 00.0
- config_h_theta_eddy_visc2 = 0.0e+04
- config_h_theta_eddy_visc4 = 00.
- config_v_theta_eddy_visc2 = 00.0
+ config_h_mom_eddy_visc2 = 0.0
+ config_h_mom_eddy_visc4 = 0.0
+ config_v_mom_eddy_visc2 = 0.0
+ config_h_theta_eddy_visc2 = 0.0
+ config_h_theta_eddy_visc4 = 0.0
+ config_v_theta_eddy_visc2 = 0.0
config_horiz_mixing = '2d_smagorinsky'
- config_len_disp = 60000.
+ config_len_disp = 120000.
config_u_vadv_order = 3
config_w_vadv_order = 3
config_theta_vadv_order = 3
@@ -39,7 +38,7 @@
/
&io
- config_input_name = 'grid.nc'
+ config_input_name = 'x1.40962.init.nc'
config_output_name = 'output.nc'
config_restart_name = 'restart.nc'
config_pio_num_iotasks = 0
@@ -48,15 +47,14 @@
&decomposition
config_number_of_blocks = 0
- config_block_decomp_file_prefix = 'graph.info.part.'
+ config_block_decomp_file_prefix = 'x1.40962.graph.info.part.'
config_explicit_proc_decomp = .false.
config_proc_decomp_file_prefix = 'graph.info.part.'
/
&restart
- config_restart_interval = 3000
+ config_restart_interval = '10_00:00:00'
config_do_restart = .false.
- config_restart_time = 1036800.0
/
&physics
Modified: branches/atmos_physics/namelist.input.ocean
===================================================================
--- branches/atmos_physics/namelist.input.ocean        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/namelist.input.ocean        2012-10-27 00:29:44 UTC (rev 2281)
@@ -9,12 +9,13 @@
/
&io
config_input_name = 'grid.nc'
- config_output_name = 'output..nc'
+ config_output_name = 'output.nc'
config_restart_name = 'restart.nc'
config_output_interval = '1_00:00:00'
config_frames_per_outfile = 1000000
config_pio_num_iotasks = 0
config_pio_stride = 1
+ config_write_output_on_startup = .true.
/
&decomposition
config_number_of_blocks = 0
@@ -31,6 +32,12 @@
config_pressure_type = 'pressure'
config_rho0 = 1014.65
/
+&partial_bottom_cells
+ config_alter_ICs_for_pbcs = 'off'
+ config_min_pbc_fraction = 0.10
+ config_check_ssh_consistency = .true.
+ config_check_zlevel_consistency = .false.
+/
&split_explicit_ts
config_n_ts_iter = 2
config_n_bcl_iter_beg = 1
@@ -55,6 +62,12 @@
config_h_tracer_eddy_diff2 = 1.0e5
config_h_tracer_eddy_diff4 = 0.0
/
+&hmix_leith
+ config_use_leith_del2 = .false.
+ config_leith_parameter = 1.0
+ config_leith_dx = 15000.0
+ config_leith_visc2_max = 2.5e3
+/
&vmix
config_vert_visc_type = 'const'
config_vert_diff_type = 'const'
Modified: branches/atmos_physics/src/Makefile
===================================================================
--- branches/atmos_physics/src/Makefile        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/Makefile        2012-10-27 00:29:44 UTC (rev 2281)
@@ -3,7 +3,7 @@
all: mpas
mpas: reg_includes externals frame ops dycore drver
-        $(FC) $(LDFLAGS) -o $(CORE)_model.exe driver/*.o -L. -ldycore -lops -lframework $(LIBS) -I./external/esmf_time_f90 -L./external/esmf_time_f90 -lesmf_time
+        $(LINKER) $(LDFLAGS) -o $(CORE)_model.exe driver/*.o -L. -ldycore -lops -lframework $(LIBS) -I./external/esmf_time_f90 -L./external/esmf_time_f90 -lesmf_time
reg_includes:
        ( cd registry; $(MAKE) CC="$(SCC)" )
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -3,7 +3,6 @@
use mpas_grid_types
use mpas_timer
- use mpas_atmphys_manager
use mpas_atmphys_constants
use mpas_atmphys_manager, only: gmt,curr_julday,julday,year
use mpas_atmphys_camrad_init
Modified: branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F
===================================================================
--- branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -222,7 +222,8 @@
!local variables:
!-----------------
type(block_type),pointer :: block
- type (field2DReal):: tempField
+ type (field2DReal), pointer :: tempField
+ type (field2DReal), target :: tempFieldTarget
integer:: iCell,iEdge,k,j,nCells,nCellsSolve,nVertLevels
integer,dimension(:),pointer :: nEdgesOnCell
integer,dimension(:,:),pointer:: edgesOnCell
@@ -244,8 +245,8 @@
nEdgesOnCell => mesh % nEdgesOnCell % array
edge_normal => mesh % edgeNormalVectors % array
- allocate(Ux_tend_halo(nVertLevels,nCells))
- allocate(Uy_tend_halo(nVertLevels,nCells))
+ allocate(Ux_tend_halo(nVertLevels,nCells+1))
+ allocate(Uy_tend_halo(nVertLevels,nCells+1))
Ux_tend_halo(:,:) = 0.
Uy_tend_halo(:,:) = 0.
@@ -256,11 +257,15 @@
enddo
enddo
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = nVertLevels
tempField % dimSizes(2) = nCellsSolve
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => Ux_tend_halo
call mpas_dmpar_exch_halo_field(tempField)
Modified: branches/atmos_physics/src/core_hyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_hyd_atmos/Registry        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_hyd_atmos/Registry        2012-10-27 00:29:44 UTC (rev 2281)
@@ -21,6 +21,7 @@
namelist logical sw_model config_monotonic true
namelist integer sw_model config_mp_physics 0
namelist real sw_model config_apvm_upwinding 0.5
+namelist integer sw_model config_num_halos 2
namelist integer dimensions config_nvertlevels 26
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
Modified: branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -276,7 +276,7 @@
block % mesh % areaCell % array (iCell) &
- block % state % time_levs(2) % state % pressure % array (block % mesh % nVertLevels + 1, 1) * &
block % mesh % areaCell % array (iCell)
- do k=1, block % mesh % nVertLevelsSolve
+ do k=1, block % mesh % nVertLevels ! Could be nVertLevelsSolve?
scalar_mass = scalar_mass - block % state % time_levs(2) % state % scalars % array (2,k,iCell) * &
block % state % time_levs(2) % state % h % array (k,iCell) * &
block % mesh % dnw % array (k) * &
@@ -1378,7 +1378,7 @@
end do
wdtn(:,nVertLevels+1) = 0.
- do k=1,grid % nVertLevelsSolve
+ do k=1,grid % nVertLevels ! Could be nVertLevelsSolve?
do iScalar=1,num_scalars
scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*h_old(k,iCell) &
+ dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/h_new(k,iCell)
@@ -1420,7 +1420,8 @@
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
integer, dimension(:,:), pointer :: cellsOnEdge
- type (field3DReal) :: tempField
+ type (field3DReal), pointer :: tempField
+ type (field3DReal), target :: tempFieldTarget
real (kind=RKIND), dimension( s_old % num_scalars, grid % nEdges+1) :: h_flux
real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1) :: v_flux, v_flux_upwind, s_update
@@ -1628,12 +1629,16 @@
end do ! end loop over cells to compute scale factor
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = 2
tempField % dimSizes(2) = num_scalars
tempField % dimSizes(3) = grid % nCells
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => scale_in
call mpas_dmpar_exch_halo_field(tempField)
Modified: branches/atmos_physics/src/core_init_nhyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_init_nhyd_atmos/Registry        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_init_nhyd_atmos/Registry        2012-10-27 00:29:44 UTC (rev 2281)
@@ -7,6 +7,10 @@
namelist character nhyd_model config_stop_time none
namelist integer nhyd_model config_theta_adv_order 3
namelist real nhyd_model config_coef_3rd_order 0.25
+namelist integer nhyd_model config_num_halos 2
+namelist character dcmip config_dcmip_case 2-0-0
+namelist real dcmip config_planet_scale 1.0
+namelist real dcmip config_rotation_rate_scale 1.0
namelist integer dimensions config_nvertlevels 26
namelist integer dimensions config_nsoillevels 4
namelist integer dimensions config_nfglevels 27
Modified: branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_mpas_core.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_mpas_core.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -22,6 +22,7 @@
block => domain % blocklist
do while (associated(block))
block % state % time_levs(1) % state % xtime % scalar = startTimeStamp
+ block % mesh % sphere_radius = a / config_planet_scale
block => block % next
end do
Modified: branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F
===================================================================
--- branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -117,18 +117,66 @@
write(0,*) 'real-data surface (SST) update test case '
block_ptr => domain % blocklist
do while (associated(block_ptr))
+ ! Defined in mpas_init_atm_surface.F
call init_atm_test_case_sfc(domain, domain % dminfo, block_ptr % mesh,block_ptr % fg, block_ptr % state % time_levs(1) % state)
block_ptr => block_ptr % next
end do
+ else if (config_test_case == 9 ) then
+
+ write(0,*) ' '
+ write(0,*) ' '
+ write(0,*) ' Setting up DCMIP test case '//trim(config_dcmip_case)
+ write(0,*) ' '
+ write(0,*) ' '
+
+ if (trim(config_dcmip_case) == '2-0-0' .or. &
+ trim(config_dcmip_case) == '2-0-1') then
+
+ block_ptr => domain % blocklist
+ do while (associated(block_ptr))
+ call init_atm_test_case_resting_atmosphere(block_ptr % mesh, block_ptr % state % time_levs(1) % state, &
+ block_ptr % diag, config_test_case)
+ block_ptr => block_ptr % next
+ end do
+
+ else if (trim(config_dcmip_case) == '2-1' .or. &
+ trim(config_dcmip_case) == '2-1a' .or. &
+ trim(config_dcmip_case) == '2-2' .or. &
+ trim(config_dcmip_case) == '3-1') then
+
+ block_ptr => domain % blocklist
+ do while (associated(block_ptr))
+ call init_atm_test_case_reduced_radius(block_ptr % mesh, block_ptr % state % time_levs(1) % state, &
+ block_ptr % diag, config_test_case)
+ block_ptr => block_ptr % next
+ end do
+
+ else
+
+ write(0,*) ' '
+ write(0,*) ' *************'
+ write(0,*) ' Unrecognized DCMIP case '//trim(config_dcmip_case)
+ write(0,*) ' Please choose either 2-0-0, 2-0-1, 2-1, 2-1a, 2-2, or 3-1'
+ write(0,*) ' *************'
+ write(0,*) ' '
+ call mpas_dmpar_abort(domain % dminfo)
+
+ end if
+
else
- write(0,*) ' Only test cases 1, 2, 3, 4, 5, 6, 7, and 8 are currently supported for nonhydrostatic core '
- stop
+ write(0,*) ' '
+ write(0,*) ' *************'
+ write(0,*) ' Only test cases 1 through 9 are currently supported for the nonhydrostatic core'
+ write(0,*) ' *************'
+ write(0,*) ' '
+ call mpas_dmpar_abort(domain % dminfo)
end if
+ ! Copy initialized state to all time levels
block_ptr => domain % blocklist
do while (associated(block_ptr))
do i=2,nTimeLevs
@@ -232,23 +280,25 @@
real (kind=RKIND) :: z_edge, z_edge3, d2fdx2_cell1, d2fdx2_cell2
logical, parameter :: moisture = .true.
+! logical, parameter :: moisture = .false.
+
!
- ! Scale all distances and areas from a unit sphere to one with radius a
+ ! Scale all distances and areas from a unit sphere to one with radius sphere_radius
!
- grid % xCell % array = grid % xCell % array * a
- grid % yCell % array = grid % yCell % array * a
- grid % zCell % array = grid % zCell % array * a
- grid % xVertex % array = grid % xVertex % array * a
- grid % yVertex % array = grid % yVertex % array * a
- grid % zVertex % array = grid % zVertex % array * a
- grid % xEdge % array = grid % xEdge % array * a
- grid % yEdge % array = grid % yEdge % array * a
- grid % zEdge % array = grid % zEdge % array * a
- grid % dvEdge % array = grid % dvEdge % array * a
- grid % dcEdge % array = grid % dcEdge % array * a
- grid % areaCell % array = grid % areaCell % array * a**2.0
- grid % areaTriangle % array = grid % areaTriangle % array * a**2.0
- grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * a**2.0
+ grid % xCell % array = grid % xCell % array * grid % sphere_radius
+ grid % yCell % array = grid % yCell % array * grid % sphere_radius
+ grid % zCell % array = grid % zCell % array * grid % sphere_radius
+ grid % xVertex % array = grid % xVertex % array * grid % sphere_radius
+ grid % yVertex % array = grid % yVertex % array * grid % sphere_radius
+ grid % zVertex % array = grid % zVertex % array * grid % sphere_radius
+ grid % xEdge % array = grid % xEdge % array * grid % sphere_radius
+ grid % yEdge % array = grid % yEdge % array * grid % sphere_radius
+ grid % zEdge % array = grid % zEdge % array * grid % sphere_radius
+ grid % dvEdge % array = grid % dvEdge % array * grid % sphere_radius
+ grid % dcEdge % array = grid % dcEdge % array * grid % sphere_radius
+ grid % areaCell % array = grid % areaCell % array * grid % sphere_radius**2.0
+ grid % areaTriangle % array = grid % areaTriangle % array * grid % sphere_radius**2.0
+ grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * grid % sphere_radius**2.0
weightsOnEdge => grid % weightsOnEdge % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
@@ -314,8 +364,8 @@
znut = eta_t
etavs = (1.-0.252)*pii/2.
- r_earth = a
- omega_e = omega
+ r_earth = grid % sphere_radius
+ omega_e = omega * config_rotation_rate_scale
p0 = 1.e+05
write(0,*) ' point 1 in test case setup '
@@ -559,7 +609,7 @@
end do
call init_atm_recompute_geostrophic_wind(u_2d,rho_2d,pp_2d,qv_2d,lat_2d,zz_2d,zx_2d, &
- cf1,cf2,cf3,fzm,fzp,rdzw,nz1,nlat,dlat)
+ cf1,cf2,cf3,fzm,fzp,rdzw,nz1,nlat,dlat,grid%sphere_radius)
end if
@@ -716,24 +766,24 @@
lat2 = grid%latVertex%array(vtx2)
iCell1 = grid % cellsOnEdge % array(1,iEdge)
iCell2 = grid % cellsOnEdge % array(2,iEdge)
- flux = (0.5*(lat2-lat1) - 0.125*(sin(4.*lat2) - sin(4.*lat1)))*a/grid % dvEdge % array(iEdge)
+ flux = (0.5*(lat2-lat1) - 0.125*(sin(4.*lat2) - sin(4.*lat1))) * grid % sphere_radius / grid % dvEdge % array(iEdge)
if (config_test_case == 2) then
r_pert = sphere_distance( grid % latEdge % array (iEdge), grid % lonEdge % array (iEdge), &
lat_pert, lon_pert, 1.0_RKIND)/(pert_radius)
- u_pert = u_perturbation*exp(-r_pert**2)*(lat2-lat1)*a/grid % dvEdge % array(iEdge)
+ u_pert = u_perturbation*exp(-r_pert**2)*(lat2-lat1) * grid % sphere_radius / grid % dvEdge % array(iEdge)
else if (config_test_case == 3) then
lon_Edge = grid % lonEdge % array(iEdge)
u_pert = u_perturbation*cos(k_x*(lon_Edge - lon_pert)) &
- *(0.5*(lat2-lat1) - 0.125*(sin(4.*lat2) - sin(4.*lat1)))*a/grid % dvEdge % array(iEdge)
+ *(0.5*(lat2-lat1) - 0.125*(sin(4.*lat2) - sin(4.*lat1))) * grid % sphere_radius / grid % dvEdge % array(iEdge)
else
u_pert = 0.0
end if
if (rebalance) then
- call init_atm_calc_flux_zonal(u_2d,etavs_2d,lat_2d,flux_zonal,lat1,lat2,grid % dvEdge % array(iEdge),a,u0,nz1,nlat)
+ call init_atm_calc_flux_zonal(u_2d,etavs_2d,lat_2d,flux_zonal,lat1,lat2,grid % dvEdge % array(iEdge),grid%sphere_radius,u0,nz1,nlat)
do k=1,grid % nVertLevels
fluxk = u0*flux_zonal(k)/(0.5*(rb(k,iCell1)+rb(k,iCell2)+rr(k,iCell1)+rr(k,iCell2)))
state % u % array(k,iEdge) = fluxk + u_pert
@@ -759,14 +809,14 @@
! Generate rotated Coriolis field
!
- grid % fEdge % array(iEdge) = 2.0 * omega * &
+ grid % fEdge % array(iEdge) = 2.0 * omega_e * &
( -cos(grid%lonEdge%array(iEdge)) * cos(grid%latEdge%array(iEdge)) * sin(alpha_grid) + &
sin(grid%latEdge%array(iEdge)) * cos(alpha_grid) &
)
end do
do iVtx=1,grid % nVertices
- grid % fVertex % array(iVtx) = 2.0 * omega * &
+ grid % fVertex % array(iVtx) = 2.0 * omega_e * &
(-cos(grid%lonVertex%array(iVtx)) * cos(grid%latVertex%array(iVtx)) * sin(alpha_grid) + &
sin(grid%latVertex%array(iVtx)) * cos(alpha_grid) &
)
@@ -893,6 +943,7 @@
end subroutine init_atm_test_case_jw
+
subroutine init_atm_calc_flux_zonal(u_2d,etavs_2d,lat_2d,flux_zonal,lat1_in,lat2_in,dvEdge,a,u0,nz1,nlat)
implicit none
@@ -942,9 +993,11 @@
end subroutine init_atm_calc_flux_zonal
+
+
!SHP-balance
subroutine init_atm_recompute_geostrophic_wind(u_2d,rho_2d,pp_2d,qv_2d,lat_2d,zz_2d,zx_2d, &
- cf1,cf2,cf3,fzm,fzp,rdzw,nz1,nlat,dlat)
+ cf1,cf2,cf3,fzm,fzp,rdzw,nz1,nlat,dlat,rad)
implicit none
integer, intent(in) :: nz1,nlat
@@ -953,7 +1006,7 @@
real (kind=RKIND), dimension(nz1,nlat-1), intent(in) :: zx_2d
real (kind=RKIND), dimension(nlat), intent(in) :: lat_2d
real (kind=RKIND), dimension(nz1), intent(in) :: fzm, fzp, rdzw
- real (kind=RKIND), intent(in) :: cf1, cf2, cf3, dlat
+ real (kind=RKIND), intent(in) :: cf1, cf2, cf3, dlat, rad
!local variable
real (kind=RKIND), dimension(nz1,nlat-1) :: pgrad, ru, u
@@ -966,8 +1019,8 @@
real (kind=RKIND) :: rdx, qtot, r_earth, phi
integer :: k,i, itr
- r_earth = a
- omega_e = omega
+ r_earth = rad
+ omega_e = omega * config_rotation_rate_scale
rdx = 1./(dlat*r_earth)
do i=1,nlat-1
@@ -1037,10 +1090,9 @@
u_2d(k,i) = (3.*u_2d(k,i-1)-u_2d(k,i-2))*.5
end do
-
end subroutine init_atm_recompute_geostrophic_wind
-!----------------------------------------------------------------------------------------------------------
+
subroutine init_atm_test_case_squall_line(dminfo, grid, state, diag, test_case)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Setup squall line and supercell test case
@@ -2024,7 +2076,7 @@
write(0,*) ' *** sounding for the simulation ***'
write(0,*) ' z theta pres qv rho_m u rr'
do k=1,nz1
- write(6,'(8(f14.9,2x))') .5*(zgrid(k,1)+zgrid(k+1,1))/1000., &
+ write(0,'(8(f14.9,2x))') .5*(zgrid(k,1)+zgrid(k+1,1))/1000., &
t(k,1)/(1.+1.61*scalars(index_qv,k,1)), &
.01*p0*p(k,1)**(1./rcp), &
1000.*scalars(index_qv,k,1), &
@@ -2247,7 +2299,8 @@
real (kind=RKIND), dimension(:,:), pointer :: v
real (kind=RKIND), dimension(:,:), pointer :: sorted_arr
- type (field1DReal):: tempField
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
real(kind=RKIND), dimension(:), pointer :: hs, hs1
real(kind=RKIND) :: hm, zh, dzmin, dzmina, dzmina_global, dzminf, sm
@@ -2365,7 +2418,7 @@
etavs = (1.-0.252)*pii/2.
rcv = rgas/(cp-rgas)
- r_earth = a
+ r_earth = grid % sphere_radius
omega_e = omega
p0 = 1.e+05
@@ -2375,25 +2428,25 @@
!
- ! Scale all distances and areas from a unit sphere to one with radius a
+ ! Scale all distances and areas from a unit sphere to one with radius sphere_radius
!
if (config_static_interp) then
- grid % xCell % array = grid % xCell % array * a
- grid % yCell % array = grid % yCell % array * a
- grid % zCell % array = grid % zCell % array * a
- grid % xVertex % array = grid % xVertex % array * a
- grid % yVertex % array = grid % yVertex % array * a
- grid % zVertex % array = grid % zVertex % array * a
- grid % xEdge % array = grid % xEdge % array * a
- grid % yEdge % array = grid % yEdge % array * a
- grid % zEdge % array = grid % zEdge % array * a
- grid % dvEdge % array = grid % dvEdge % array * a
- grid % dcEdge % array = grid % dcEdge % array * a
- grid % areaCell % array = grid % areaCell % array * a**2.0
- grid % areaTriangle % array = grid % areaTriangle % array * a**2.0
- grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * a**2.0
+ grid % xCell % array = grid % xCell % array * r_earth
+ grid % yCell % array = grid % yCell % array * r_earth
+ grid % zCell % array = grid % zCell % array * r_earth
+ grid % xVertex % array = grid % xVertex % array * r_earth
+ grid % yVertex % array = grid % yVertex % array * r_earth
+ grid % zVertex % array = grid % zVertex % array * r_earth
+ grid % xEdge % array = grid % xEdge % array * r_earth
+ grid % yEdge % array = grid % yEdge % array * r_earth
+ grid % zEdge % array = grid % zEdge % array * r_earth
+ grid % dvEdge % array = grid % dvEdge % array * r_earth
+ grid % dcEdge % array = grid % dcEdge % array * r_earth
+ grid % areaCell % array = grid % areaCell % array * r_earth**2.0
+ grid % areaTriangle % array = grid % areaTriangle % array * r_earth**2.0
+ grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * r_earth**2.0
scalars(:,:,:) = 0.
@@ -3227,15 +3280,17 @@
end do
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nCells
tempField % sendList => parinfo % cellsToSend
tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
- call mpas_timer_start("EXCHANGE_1D_REAL")
call mpas_dmpar_exch_halo_field(tempField)
- call mpas_timer_stop("EXCHANGE_1D_REAL")
! dzmina = minval(hs(:)-hx(k-1,:))
dzmina = minval(zw(k)+ah(k)*hs(1:grid%nCellsSolve)-zw(k-1)-ah(k-1)*hx(k-1,1:grid%nCellsSolve))
@@ -4056,7 +4111,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % t % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
state % theta_m % array(k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1)
@@ -4071,7 +4126,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % rh % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
state % scalars % array(state % index_qv,k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=0)
@@ -4086,7 +4141,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % z % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
fg % gfs_z % array(k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1)
@@ -4104,7 +4159,7 @@
end if
sorted_arr(2,k) = log(fg % p % array(k,iCell))
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
diag % pressure % array(k,iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4122,7 +4177,7 @@
! end if
! sorted_arr(2,k) = log(fg % p % array(k,iCell))
! end do
-! call quicksort(config_nfglevels, sorted_arr)
+! call mpas_quicksort(config_nfglevels, sorted_arr)
! do k=1,grid%nVertLevels+1
! target_z = grid % zgrid % array(k,iCell)
! fg % gfs_p % array(k,iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4141,7 +4196,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % u % array(k,iEdge)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.25 * (grid % zgrid % array(k,cellsOnEdge(1,iEdge)) + grid % zgrid % array(k+1,cellsOnEdge(1,iEdge)) + grid % zgrid % array(k,cellsOnEdge(2,iEdge)) + grid % zgrid % array(k+1,cellsOnEdge(2,iEdge)))
state % u % array(k,iEdge) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=0)
@@ -4182,7 +4237,7 @@
end if
sorted_arr(2,k) = log(fg % p % array(k,iCell))
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
target_z = grid % zgrid % array(1,iCell)
fg % psfc % array(iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4344,6 +4399,1584 @@
end subroutine init_atm_test_case_gfs
+
+!--------------------- TEST CASE 9 -----------------------------------------------
+
+
+ subroutine init_atm_test_case_reduced_radius(grid, state, diag, test_case)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Setup Schar-type mountain wave test case on reduced radius sphere
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ implicit none
+
+ type (mesh_type), intent(inout) :: grid
+ type (state_type), intent(inout) :: state
+ type (diag_type), intent(inout) :: diag
+ integer, intent(in) :: test_case
+
+ real (kind=RKIND), parameter :: t0=300., hm=250., alpha=0.
+! real (kind=RKIND), parameter :: t0=288., hm=0., alpha=0.
+
+ ! Parameters for test case 3-1
+ real (kind=RKIND), parameter :: widthParm = 5000.0, &
+ dTheta = 1.0, &
+ L_z = 20000.0, &
+ theta_c = 0.0, &
+ lambda_c = 2.0 * pii / 3.0
+
+
+ real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp
+ real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx, cqw
+ real (kind=RKIND), dimension(:,:), pointer :: ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt, u, ru
+ real (kind=RKIND), dimension(:,:,:), pointer :: scalars, deriv_two, zb, zb3
+
+ !This is temporary variable here. It just need when calculate tangential velocity v.
+ integer :: eoe, j
+ integer, dimension(:), pointer :: nEdgesOnEdge, nEdgesOnCell
+ integer, dimension(:,:), pointer :: edgesOnEdge, CellsOnEdge, edgesOnCell
+ real (kind=RKIND), dimension(:), pointer :: dvEdge, AreaCell, xCell, yCell, dcEdge
+ real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge
+
+ integer :: iCell, iCell1, iCell2 , iEdge, vtx1, vtx2, ivtx, i, k, kz, nz, nz1, itr, itrp, cell1, cell2, nCellsSolve
+ integer :: index_qv
+
+ real (kind=RKIND) :: ptop, p0, pis, flux, d2fdx2_cell1, d2fdx2_cell2
+
+ real (kind=RKIND) :: ztemp, zd, zt, dz, str
+ real(kind=RKIND), dimension(:), pointer :: hs, hs1
+
+ real (kind=RKIND), dimension(grid % nVertLevels, grid % nCells) :: rh
+ real (kind=RKIND) :: es, qvs, xnutr, ptemp
+ integer :: iter, nsm
+ integer, dimension(:,:), pointer :: cellsOnCell
+
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
+
+ type (block_type), pointer :: block
+ type (parallel_info), pointer :: parinfo
+ type (dm_info), pointer :: dminfo
+
+ real (kind=RKIND), dimension(grid % nVertLevels + 1 ) :: zc, zw, ah
+ real (kind=RKIND), dimension(grid % nVertLevels ) :: zu, dzw, rdzwp, rdzwm
+ real (kind=RKIND), allocatable, dimension(:) :: psiVertex
+
+ real (kind=RKIND) :: d1, d2, d3, cof1, cof2, cf1, cf2, cf3
+ real (kind=RKIND) :: um, us, rcp, rcv
+ real (kind=RKIND) :: xmid, temp, pres, a_scale, xac, xlac, shear, tsurf, usurf
+
+ real (kind=RKIND) :: xi, yi, ri, xa, xc, yc, xla, zinv, xn2, xn2m, xn2l, sm, dzh, dzht, dzmin, dzmina, dzminf, &
+ dzmina_global, z_edge, z_edge3, sm0
+ real (kind=RKIND) :: theta_pert, s
+
+ integer, dimension(grid % nCells, 2) :: next_cell
+ real (kind=RKIND), dimension(grid % nCells) :: hxzt, pitop, ptopb
+ logical, parameter :: terrain_smooth = .false.
+
+ block => grid % block
+ parinfo => block % parinfo
+ dminfo => block % domain % dminfo
+
+
+ !
+ ! Scale all distances
+ !
+ a_scale = grid % sphere_radius
+
+ grid % xCell % array = grid % xCell % array * a_scale
+ grid % yCell % array = grid % yCell % array * a_scale
+ grid % zCell % array = grid % zCell % array * a_scale
+ grid % xVertex % array = grid % xVertex % array * a_scale
+ grid % yVertex % array = grid % yVertex % array * a_scale
+ grid % zVertex % array = grid % zVertex % array * a_scale
+ grid % xEdge % array = grid % xEdge % array * a_scale
+ grid % yEdge % array = grid % yEdge % array * a_scale
+ grid % zEdge % array = grid % zEdge % array * a_scale
+ grid % dvEdge % array = grid % dvEdge % array * a_scale
+ grid % dcEdge % array = grid % dcEdge % array * a_scale
+ grid % areaCell % array = grid % areaCell % array * a_scale**2.0
+ grid % areaTriangle % array = grid % areaTriangle % array * a_scale**2.0
+ grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * a_scale**2.0
+
+ weightsOnEdge => grid % weightsOnEdge % array
+ nEdgesOnEdge => grid % nEdgesOnEdge % array
+ edgesOnEdge => grid % edgesOnEdge % array
+ edgesOnCell => grid % edgesOnCell % array
+ dvEdge => grid % dvEdge % array
+ dcEdge => grid % dcEdge % array
+ AreaCell => grid % AreaCell % array
+ CellsOnEdge => grid % CellsOnEdge % array
+ cellsOnCell => grid % cellsOnCell % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ deriv_two => grid % deriv_two % array
+
+ nz1 = grid % nVertLevels
+ nz = nz1 + 1
+ nCellsSolve = grid % nCellsSolve
+
+ zgrid => grid % zgrid % array
+ zb => grid % zb % array
+ zb3 => grid % zb3 % array
+ rdzw => grid % rdzw % array
+ dzu => grid % dzu % array
+ rdzu => grid % rdzu % array
+ fzm => grid % fzm % array
+ fzp => grid % fzp % array
+ zx => grid % zx % array
+ zz => grid % zz % array
+ hx => grid % hx % array
+ dss => grid % dss % array
+
+ xCell => grid % xCell % array
+ yCell => grid % yCell % array
+
+ ppb => diag % pressure_base % array
+ pb => diag % exner_base % array
+ rb => diag % rho_base % array
+ tb => diag % theta_base % array
+ rtb => diag % rtheta_base % array
+ p => diag % exner % array
+ cqw => diag % cqw % array
+
+ rho_zz => state % rho_zz % array
+
+ pp => diag % pressure_p % array
+ rr => diag % rho_p % array
+ t => state % theta_m % array
+ rt => diag % rtheta_p % array
+ u => state % u % array
+ ru => diag % ru % array
+
+ scalars => state % scalars % array
+
+ index_qv = state % index_qv
+
+ scalars(:,:,:) = 0.
+
+ call atm_initialize_advection_rk(grid)
+ call atm_initialize_deformation_weights(grid)
+
+ if (trim(config_dcmip_case) == '2-1') then
+ zt = 30000.
+ xnutr = 0.1 ! Coefficient for implicit w damping in absorbing layer
+ zd = 20000. ! Bottom of absorbing layer
+ write(0,*) ' test case 2-1, zt, zd, xnutr ', zt,zd,xnutr
+ end if
+
+ if (trim(config_dcmip_case) == '2-1a') then
+ zt = 20000.
+ xnutr = 0.1 ! Coefficient for implicit w damping in absorbing layer
+ zd = 10000. ! Bottom of absorbing layer
+ write(0,*) ' test case 2-1a, zt, zd, xnutr ', zt,zd,xnutr
+ end if
+
+ if (trim(config_dcmip_case) == '2-2') then
+ zt = 30000.
+ xnutr = 0.1 ! Coefficient for implicit w damping in absorbing layer
+ zd = 20000. ! Bottom of absorbing layer
+ write(0,*) ' test case 2-2, zt, zd, xnutr ', zt,zd,xnutr
+ end if
+
+ if (trim(config_dcmip_case) == '3-1') then
+ zt = 10000.
+ xnutr = 0.0 ! Coefficient for implicit w damping in absorbing layer
+ zd = 10000. ! Bottom of absorbing layer
+ write(0,*) ' test case 3-1, zt, zd, xnutr ', zt,zd,xnutr
+ end if
+
+ p0 = 1.e+05
+ rcp = rgas/cp
+ rcv = rgas/(cp-rgas)
+
+ ! metrics for hybrid coordinate and vertical stretching
+ str = 1.0
+
+
+ dz = zt/float(nz1)
+! write(0,*) ' dz = ',dz
+
+ do k=1,nz
+                
+! sh(k) is the stretching specified for height surfaces
+
+ zc(k) = zt*(real(k-1)*dz/zt)**str
+                                
+! to specify specific heights zc(k) for coordinate surfaces,
+! input zc(k)
+! zw(k) is the hieght of zeta surfaces
+! zw(k) = (k-1)*dz yields constant dzeta
+! and nonconstant dzeta/dz
+! zw(k) = sh(k)*zt yields nonconstant dzeta
+! and nearly constant dzeta/dz
+
+! zw(k) = float(k-1)*dz
+ zw(k) = zc(k)
+!
+! ah(k) governs the transition between terrain-following
+! and pureheight coordinates
+! ah(k) = 0 is a terrain-following coordinate
+! ah(k) = 1 is a height coordinate
+
+! ah(k) = 1.-cos(.5*pii*(k-1)*dz/zt)**6
+ ah(k) = 1.
+!         write(0,*) ' k, zc, zw, ah ',k,zc(k),zw(k),ah(k)                        
+ end do
+ do k=1,nz1
+ dzw (k) = zw(k+1)-zw(k)
+ rdzw(k) = 1./dzw(k)
+ zu(k ) = .5*(zw(k)+zw(k+1))
+ end do
+ do k=2,nz1
+ dzu (k) = .5*(dzw(k)+dzw(k-1))
+ rdzu(k) = 1./dzu(k)
+ fzp (k) = .5* dzw(k )/dzu(k)
+ fzm (k) = .5* dzw(k-1)/dzu(k)
+ rdzwp(k) = dzw(k-1)/(dzw(k )*(dzw(k)+dzw(k-1)))
+ rdzwm(k) = dzw(k )/(dzw(k-1)*(dzw(k)+dzw(k-1)))
+ end do
+
+!********** how are we storing cf1, cf2 and cf3?
+
+ d1 = .5*dzw(1)
+ d2 = dzw(1)+.5*dzw(2)
+ d3 = dzw(1)+dzw(2)+.5*dzw(3)
+ !cf1 = d2*d3*(d3-d2)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+ !cf2 = d1*d3*(d1-d3)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+ !cf3 = d1*d2*(d2-d1)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+
+ cof1 = (2.*dzu(2)+dzu(3))/(dzu(2)+dzu(3))*dzw(1)/dzu(2)
+ cof2 = dzu(2) /(dzu(2)+dzu(3))*dzw(1)/dzu(3)
+ cf1 = fzp(2) + cof1
+ cf2 = fzm(2) - cof1 - cof2
+ cf3 = cof2
+
+ grid % cf1 % scalar = cf1
+ grid % cf2 % scalar = cf2
+ grid % cf3 % scalar = cf3
+
+ write(0,*) 'EARTH RADIUS = ', grid % sphere_radius
+
+! setting for terrain
+
+! MGD for both 2-1 and 2-1a (and 2-2)
+ if (trim(config_dcmip_case) == '2-1' .or. &
+ trim(config_dcmip_case) == '2-1a' .or. &
+ trim(config_dcmip_case) == '2-2') then
+ xa = 5000.
+ xla = 4000.
+ end if
+
+ write(0,*) ' hm, xa, xla ',hm,xa,xla
+
+ hx = 0.
+
+ do iCell=1,grid % nCells
+
+ xi = grid % lonCell % array(iCell)
+ yi = grid % latCell % array(iCell)
+ xc = sphere_distance(yi, xi, yi, 0., grid % sphere_radius)
+ yc = sphere_distance(yi, xi, 0., xi, grid % sphere_radius)
+ xac = sphere_distance(yi, xa /grid % sphere_radius, yi, 0., grid % sphere_radius)
+ xlac = sphere_distance(yi, xla/grid % sphere_radius, yi, 0., grid % sphere_radius)
+
+ ri = sphere_distance(yi, xi, 0., 0., grid % sphere_radius)
+
+! MGD BEGIN 2-1
+! Circular mountain with Schar mtn cross section
+ if (trim(config_dcmip_case) == '2-1') then
+ hx(1,iCell) = hm*exp(-(ri/xa)**2)*cos(pii*ri/xla)**2
+ end if
+! MGD END 2-1
+
+! MGD BEGIN 2-2
+! Circular mountain with Schar mtn cross section
+ if (trim(config_dcmip_case) == '2-2') then
+ hx(1,iCell) = hm*exp(-(ri/xa)**2)*cos(pii*ri/xla)**2
+ end if
+! MGD END 2-2
+
+! MGD BEGIN 2-1a
+! proposed to be run with x333 rather than x500
+! Ridge mountain with Schar mtn cross section
+ if (trim(config_dcmip_case) == '2-1a') then
+ hx(1,iCell) = hm*exp(-(xc/xac)**2)*cos(pii*xc/xlac)**2*cos(yc/grid % sphere_radius)
+ end if
+! MGD END 2-1a
+
+ hx(nz,iCell) = zt
+
+
+ enddo
+ write(0,*) ' hx computation complete '
+
+!!! MGD WE NEED TO REPLACE THIS TERRAIN SMOOTHING WITH TC9
+
+ kz = nz
+
+ if (config_smooth_surfaces) then
+
+ write(0,*) ' '
+ write(0,*) ' Smoothing vertical coordinate surfaces'
+ write(0,*) ' '
+
+ allocate(hs (grid % nCells+1))
+ allocate(hs1(grid % nCells+1))
+
+ dzmin = 0.5
+ sm0 = 0.5
+ nsm = 30
+
+ write(6,*) 'dzmin = ',dzmin,' sm0 = ',sm0,' nsm = ',nsm
+
+ do k=2,kz-1
+ hx(k,:) = hx(k-1,:)
+ dzminf = zw(k)-zw(k-1)
+
+! dzmin = max(0.5_RKIND,1.-.5*zw(k)/hm)
+
+ sm = sm0*max( min(.5*zw(k)/hm,1.0_RKIND), .05 )
+
+ do i=1,nsm
+ do iCell=1,grid % nCells
+ hs1(iCell) = 0.
+ do j = 1,nEdgesOnCell(iCell)
+
+ hs1(iCell) = hs1(iCell) + dvEdge(edgesOnCell(j,iCell)) &
+ / dcEdge(edgesOnCell(j,iCell)) &
+ * (hx(k,cellsOnCell(j,iCell))-hx(k,iCell))
+ end do
+ hs1(iCell) = hx(k,iCell) + sm*hs1(iCell)
+
+ hs(iCell) = 0.
+ ! do j = 1,nEdgesOnCell(iCell)
+ ! hs(iCell) = hs(iCell) + dvEdge(edgesOnCell(j,iCell)) &
+ ! / dcEdge(edgesOnCell(j,iCell)) &
+ ! * (hs1(cellsOnCell(j,iCell))-hs1(iCell))
+ ! end do
+ hs(iCell) = hs1(iCell) - 0.*hs(iCell)
+
+ end do
+
+ tempField => tempFieldTarget
+ tempField % block => block
+ tempField % dimSizes(1) = grid % nCells
+ tempField % sendList => parinfo % cellsToSend
+ tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
+ tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
+
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ ! dzmina = minval(hs(:)-hx(k-1,:))
+ dzmina = minval(zw(k)+ah(k)*hs(1:grid%nCellsSolve)-zw(k-1)-ah(k-1)*hx(k-1,1:grid%nCellsSolve))
+ call mpas_dmpar_min_real(dminfo, dzmina, dzmina_global)
+ ! write(0,*) ' k,i, dzmina, dzmin, zw(k)-zw(k-1) ', k,i, dzmina, dzmin, zw(k)-zw(k-1)
+ if (dzmina_global >= dzmin*(zw(k)-zw(k-1))) then
+ hx(k,:)=hs(:)
+ dzminf = dzmina_global
+ else
+ exit
+ end if
+ end do
+ write(0,*) k,i,sm,dzminf/(zw(k)-zw(k-1)),dzmina/(zw(k)-zw(k-1))
+ end do
+
+ do k=kz,nz
+ hx(k,:) = 0.
+ end do
+
+ deallocate(hs )
+ deallocate(hs1)
+
+ else
+
+ do k=2,nz1
+ dzmina = minval(zw(k)+ah(k)*hx(k,:)-zw(k-1)-ah(k-1)*hx(k-1,:))
+ write(0,*) k,dzmina/(zw(k)-zw(k-1))
+ end do
+
+ end if
+
+
+ do iCell=1,grid % nCells
+ do k=1,nz
+ if (config_smooth_surfaces) then
+ zgrid(k,iCell) = ah(k)*(zc(k)*(1.-hx(k,iCell)/zt)+hx(k,iCell)) &
+ + (1.-ah(k)) * zc(k)
+ else
+ zgrid(k,iCell) = ah(k)*(zc(k)*(1.-hx(1,iCell)/zt)+hx(1,iCell)) &
+ + (1.-ah(k)) * zc(k)
+ end if
+ end do
+ do k=1,nz1
+ zz (k,iCell) = (zw(k+1)-zw(k))/(zgrid(k+1,iCell)-zgrid(k,iCell))
+ end do
+ end do
+
+ do i=1, grid % nEdges
+ iCell1 = grid % CellsOnEdge % array(1,i)
+ iCell2 = grid % CellsOnEdge % array(2,i)
+ do k=1,nz
+ zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / grid % dcEdge % array(i)
+ end do
+ end do
+ do i=1, grid % nCells
+ do k=1,nz1
+ ztemp = .5*(zgrid(k+1,i)+zgrid(k,i))
+ dss(k,i) = 0.
+ ztemp = zgrid(k,i)
+ if(ztemp.gt.zd+.1) then
+ dss(k,i) = dss(k,i)+xnutr*sin(.5*pii*(ztemp-zd)/(zt-zd))**2
+ end if
+ end do
+ enddo
+
+ write(0,*) ' grid metrics setup complete '
+
+!
+! mountain wave initialization
+!
+!MGD BEGIN 3-1
+! Coefficients used to initialize 2 layer sounding based on stability
+ if (trim(config_dcmip_case) == '3-1') then
+ zinv = 3000. ! Height of lower layer
+ xn2 = 0.0001 ! N^2 for upper layer
+ xn2m = 0.0001 ! N^2 for reference sounding
+ xn2l = 0.0001 ! N^@ for lower layer
+ end if
+!MGD END 3-1
+
+ if (trim(config_dcmip_case) == '2-1' .or. &
+ trim(config_dcmip_case) == '2-1a' .or. &
+ trim(config_dcmip_case) == '2-2' .or. &
+ trim(config_dcmip_case) == '3-1') then
+ um = 20. ! base wind for 2-1, 2-1a, 2-2, and 3-1
+ end if
+
+ if (trim(config_dcmip_case) == '2-2') then
+ shear = 0.00025 ! MGD 2-2
+ else
+ shear = 0. ! MGD everything else, 2-1, ...
+ end if
+
+ do i=1,grid % nCells
+
+! Surface temp and Exner function as function of latitude to balance wind fed
+
+ tsurf = t0*exp(-shear*um**2/gravity*sin(grid%latCell%array(i))**2)
+ pis = exp(-um**2*sin(grid%latCell%array(i))**2/(2.*cp*tsurf))
+
+ do k=1,nz1
+ ztemp = .5*(zgrid(k,i)+zgrid(k+1,i))
+
+!MGD FOR 2-1, 2-1a, 2-2
+! Isothermal temerature initialization
+ if (trim(config_dcmip_case) == '2-1' .or. &
+ trim(config_dcmip_case) == '2-1a' .or. &
+ trim(config_dcmip_case) == '2-2') then
+
+ t (k,i) = tsurf/pis*exp(gravity*ztemp/(cp*tsurf))
+ tb (k,i) = t0*exp(gravity*ztemp/(cp*t0))
+!! JBK fix, 20120801
+ !! tb(k,i) = t(k,i)
+
+ end if
+
+!MGD FOR 3-1
+! Initialization based on stability
+ if (trim(config_dcmip_case) == '3-1') then
+ if(ztemp .le. zinv) then
+ t (k,i) = t0*(1.+xn2l/gravity*ztemp)
+ else
+ t (k,i) = t0*(1.+xn2l/gravity*zinv+xn2/gravity*(ztemp-zinv))
+ end if
+ tb(k,i) = t0*(1. + xn2m/gravity*ztemp)
+ end if
+
+ rh(k,i) = 0.
+ end do
+
+
+! MGD ADD CODE HERE FOR 3-1 THERMAL PERT
+ if (trim(config_dcmip_case) == '3-1') then
+ do k=1,nz1
+ s = widthParm**2.0 / (widthParm**2.0 + sphere_distance(theta_c, lambda_c, &
+ grid%latCell%array(i), grid%lonCell%array(i), &
+ grid%sphere_radius)**2.0)
+ theta_pert = dTheta * s * sin((2.0_RKIND * pii * 0.5*(zgrid(k,i)+zgrid(k+1,i))) / L_z)
+ ! diag % theta % array(k,i) = diag % theta % array(k,i) + theta_pert
+ t(k,i) = t(k,i) + theta_pert
+ end do
+ end if
+
+
+
+ end do
+
+ !
+ ! Initialize wind field
+ !
+ allocate(psiVertex(grid % nVertices))
+ do iVtx=1,grid % nVertices
+ psiVertex(iVtx) = -grid % sphere_radius * um * ( &
+ sin(grid%latVertex%array(iVtx)) * cos(alpha) - &
+ cos(grid%lonVertex%array(iVtx)) * cos(grid%latVertex%array(iVtx)) * sin(alpha) &
+ )
+ end do
+ do iEdge=1,grid % nEdges
+ cell1 = grid % CellsOnEdge % array(1,iEdge)
+ cell2 = grid % CellsOnEdge % array(2,iEdge)
+ usurf = -1.0 * ( &
+ psiVertex(grid%verticesOnEdge%array(2,iEdge)) - &
+ psiVertex(grid%verticesOnEdge%array(1,iEdge)) &
+ ) / grid%dvEdge%array(iEdge)
+ do k=1,nz1
+ ztemp = .25*( zgrid(k,cell1)+zgrid(k+1,cell1 ) &
+ +zgrid(k,cell2)+zgrid(k+1,cell2))
+
+! Top of shear layer set at 10 km
+! if(ztemp.lt.10000.) then
+ u(k,iEdge) = usurf * sqrt(1.+2.*shear*ztemp)
+! else
+! u(k,iEdge) = usurf * sqrt(1.+2.*shear*10000.)
+! end if
+ end do
+ end do
+ deallocate(psiVertex)
+
+ do k=1,nz1
+ ztemp = .5*( zw(k)+zw(k+1))
+! if(ztemp.lt.10000.) then
+ grid % u_init % array(k) = um * sqrt(1.+2.*shear*ztemp)
+! else
+! grid % u_init % array(k) = um * sqrt(1.+2.*shear*10000.)
+! end if
+ end do
+
+!
+! reference sounding based on dry atmosphere
+!
+ do i=1, grid % nCells
+
+ tsurf = t0*exp(-shear*um**2/gravity*sin(grid%latCell%array(i))**2)
+
+!! JBK fix 20120801
+!! pis = exp(-um**2*sin(grid%latCell%array(i))**2/(2.*cp*tsurf))
+ pis = 1.
+
+ pitop(i) = pis-.5*dzw(1)*gravity/(cp*tb(1,1)*zz(1,1))
+ do k=2,nz1
+ pitop(i) = pitop(i)-dzu(k)*gravity/(cp*(fzm(k)*tb(k,1)+fzp(k)*tb(k-1,1)) &
+ *(fzm(k)*zz(k,1)+fzp(k)*zz(k-1,1)))
+ end do
+ pitop(i) = pitop(i)-.5*dzw(nz1)*gravity/(cp*tb(nz1,1)*zz(nz1,1))
+ ptopb(i) = p0*pitop(i)**(1./rcp)
+
+ pb(nz1,i) = pitop(i)+.5*dzw(nz1)*gravity/(cp*tb(nz1,i)*zz(nz1,i))
+ p (nz1,i) = pitop(i)+.5*dzw(nz1)*gravity/(cp*t (nz1,i)*zz(nz1,i))
+ do k=nz1-1,1,-1
+ pb(k,i) = pb(k+1,i) + dzu(k+1)*gravity/(cp*.5*(tb(k,i)+tb(k+1,i)) &
+ *.5*(zz(k,i)+zz(k+1,i)))
+ p (k,i) = p (k+1,i) + dzu(k+1)*gravity/(cp*.5*(t (k,i)+t (k+1,i)) &
+ *.5*(zz(k,i)+zz(k+1,i)))
+ end do
+ do k=1,nz1
+ rb (k,i) = pb(k,i)**(1./rcv)/((rgas/p0)*tb(k,i)*zz(k,i))
+ rtb(k,i) = rb(k,i)*tb(k,i)
+ rr (k,i) = p (k,i)**(1./rcv)/((rgas/p0)*t (k,i)*zz(k,i))-rb(k,i)
+ cqw(k,i) = 1.
+ end do
+ end do
+
+ write(0,*) ' ***** base state sounding ***** '
+ write(0,*) 'k pb p rb rtb rr tb t'
+ do k=1,grid%nVertLevels
+ write(0,'(i2,7(2x,f14.9))') k,pb(k,1),p(k,1),rb(k,1),rtb(k,1),rr(k,1),tb(k,1),t(k,1)
+ end do
+
+ scalars(index_qv,:,:) = 0.
+!!!
+!-------------------------------------------------------------------
+! ITERATIONS TO CONVERGE MOIST SOUNDING
+ do itr=1,30
+
+ do i = 1, grid % nCells
+
+ tsurf = t0*exp(-shear*um**2/gravity*sin(grid%latCell%array(i))**2)
+ pis = exp(-um**2*sin(grid%latCell%array(i))**2/(2.*cp*tsurf))
+! pis = 1.
+
+ pitop(i) = pis-.5*dzw(1)*gravity*(1.+scalars(index_qv,1,1))/(cp*t(1,1)*zz(1,1))
+
+ do k=2,nz1
+ pitop(i) = pitop(i)-dzu(k)*gravity/(cp*cqw(k,1)*(fzm(k)*t (k,1)+fzp(k)*t (k-1,1)) &
+ *(fzm(k)*zz(k,1)+fzp(k)*zz(k-1,1)))
+ end do
+ pitop(i) = pitop(i) - .5*dzw(nz1)*gravity*(1.+scalars(index_qv,nz1,1))/(cp*t(nz1,1)*zz(nz1,1))
+ ptop = p0*pitop(i)**(1./rcp)
+
+ pp(nz1,i) = ptop-ptopb(i)+.5*dzw(nz1)*gravity* &
+ (rr(nz1,i)+(rr(nz1,i)+rb(nz1,i))*scalars(index_qv,nz1,i))
+ do k=nz1-1,1,-1
+ pp(k,i) = pp(k+1,i)+dzu(k+1)*gravity* &
+ (fzm(k)*(rr(k ,i)+(rr(k ,i)+rb(k ,i))*scalars(index_qv,k ,i)) &
+ +fzp(k)*(rr(k+1,i)+(rr(k+1,i)+rb(k+1,i))*scalars(index_qv,k+1,i)))
+ end do
+ do k=1,nz1
+ rt(k,i) = (pp(k,i)/(rgas*zz(k,i)) &
+ -rtb(k,i)*(p(k,i)-pb(k,i)))/p(k,i)
+ p (k,i) = (zz(k,i)*(rgas/p0)*(rtb(k,i)+rt(k,i)))**rcv
+ rr(k,i) = (rt(k,i)-rb(k,i)*(t(k,i)-tb(k,i)))/t(k,i)
+ end do
+!
+! update water vapor mixing ratio from humitidty profile
+!
+ do k=1,nz1
+ temp = p(k,i)*t(k,i)
+ pres = p0*p(k,i)**(1./rcp)
+ qvs = 380.*exp(17.27*(temp-273.)/(temp-36.))/pres
+ scalars(index_qv,k,i) = min(0.014_RKIND,rh(k,i)*qvs)
+ end do
+
+ do k=1,nz1
+ t (k,i) = t(k,i)*(1.+1.61*scalars(index_qv,k,i))
+ end do
+ do k=2,nz1
+ cqw(k,i) = 1./(1.+.5*( scalars(index_qv,k-1,i) &
+ +scalars(index_qv,k ,i)))
+ end do
+
+ end do ! loop over cells
+
+ end do ! iteration loop
+!----------------------------------------------------------------------
+!
+ write(0,*) ' *** sounding for the simulation ***'
+ write(0,*) ' z theta pres qv rho_m u rr'
+ do k=1,nz1
+ write(0,'(8(f14.9,2x))') .5*(zgrid(k,1)+zgrid(k+1,1))/1000., &
+ t(k,1)/(1.+1.61*scalars(index_qv,k,1)), &
+ .01*p0*p(k,1)**(1./rcp), &
+ 1000.*scalars(index_qv,k,1), &
+ (rb(k,1)+rr(k,1))*(1.+scalars(index_qv,k,1)), &
+ grid % u_init % array(k), rr(k,1)
+ end do
+
+ do i=1,grid % ncells
+ do k=1,nz1
+ rho_zz(k,i) = rb(k,i)+rr(k,i)
+ end do
+
+ do k=1,nz1
+ grid % t_init % array(k,i) = t(k,i)
+ end do
+ end do
+
+ do i=1,grid % nEdges
+ cell1 = grid % CellsOnEdge % array(1,i)
+ cell2 = grid % CellsOnEdge % array(2,i)
+ if(cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then
+ do k=1,nz1
+ ru (k,i) = 0.5*(rho_zz(k,cell1)+rho_zz(k,cell2))*u(k,i)
+ end do
+ end if
+ end do
+
+!
+! pre-calculation z-metric terms in omega eqn.
+!
+ do iEdge = 1,grid % nEdges
+ cell1 = CellsOnEdge(1,iEdge)
+ cell2 = CellsOnEdge(2,iEdge)
+ if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then
+
+ do k = 1, grid%nVertLevels
+
+ if (config_theta_adv_order == 2) then
+!! test for metric consistency - forces 2nd order metrics with 4th order advection
+! if (config_theta_adv_order == 4) then
+
+ z_edge = (zgrid(k,cell1)+zgrid(k,cell2))/2.
+
+ else !theta_adv_order == 3 or 4
+
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * zgrid(k,cell1)
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * zgrid(k,cell2)
+ do i=1, grid % nEdgesOnCell % array (cell1)
+ if ( grid % CellsOnCell % array (i,cell1) > 0) &
+ d2fdx2_cell1 = d2fdx2_cell1 + deriv_two(i+1,1,iEdge) * zgrid(k,grid % CellsOnCell % array (i,cell1))
+ end do
+ do i=1, grid % nEdgesOnCell % array (cell2)
+ if ( grid % CellsOnCell % array (i,cell2) > 0) &
+ d2fdx2_cell2 = d2fdx2_cell2 + deriv_two(i+1,2,iEdge) * zgrid(k,grid % CellsOnCell % array (i,cell2))
+ end do
+
+ z_edge = 0.5*(zgrid(k,cell1) + zgrid(k,cell2)) &
+ - (grid % dcEdge % array(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12.
+
+ if (config_theta_adv_order == 3) then
+ z_edge3 = - (grid % dcEdge % array(iEdge) **2) * (d2fdx2_cell1 - d2fdx2_cell2) / 12.
+ else
+ z_edge3 = 0.
+ end if
+
+ end if
+
+ zb(k,1,iEdge) = (z_edge-zgrid(k,cell1))*dvEdge(iEdge)/AreaCell(cell1)
+ zb(k,2,iEdge) = (z_edge-zgrid(k,cell2))*dvEdge(iEdge)/AreaCell(cell2)
+ zb3(k,1,iEdge)= z_edge3*dvEdge(iEdge)/AreaCell(cell1)
+ zb3(k,2,iEdge)= z_edge3*dvEdge(iEdge)/AreaCell(cell2)
+
+! if (k /= 1) then
+! zf(k,1,iEdge) = ( fzm(k)*zz(k,cell1)+fzp(k)*zz(k-1,cell1) )*zb(k,1,iEdge)
+! zf(k,2,iEdge) = ( fzm(k)*zz(k,cell2)+fzp(k)*zz(k-1,cell2) )*zb(k,2,iEdge)
+! zf3(k,1,iEdge)= ( fzm(k)*zz(k,cell1)+fzp(k)*zz(k-1,cell1) )*zb3(k,1,iEdge)
+! zf3(k,2,iEdge)= ( fzm(k)*zz(k,cell2)+fzp(k)*zz(k-1,cell2) )*zb3(k,2,iEdge)
+! end if
+
+ end do
+
+ end if
+ end do
+
+! for including terrain
+ state % w % array(:,:) = 0.0
+ diag % rw % array(:,:) = 0.0
+
+!
+! calculation of omega, rw = zx * ru + zz * rw
+!
+
+! do iEdge = 1,grid % nEdges
+
+! cell1 = CellsOnEdge(1,iEdge)
+! cell2 = CellsOnEdge(2,iEdge)
+
+! if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then
+! do k = 2, grid%nVertLevels
+! flux = (fzm(k)*ru(k,iEdge)+fzp(k)*ru(k-1,iEdge))
+! diag % rw % array(k,cell2) = diag % rw % array(k,cell2) + zf(k,2,iEdge)*flux
+! diag % rw % array(k,cell1) = diag % rw % array(k,cell1) - zf(k,1,iEdge)*flux
+
+! if (config_theta_adv_order ==3) then
+! diag % rw % array(k,cell2) = diag % rw % array(k,cell2) &
+! - sign(1.0_RKIND,ru(k,iEdge))*config_coef_3rd_order*zf3(k,2,iEdge)*flux
+! diag % rw % array(k,cell1) = diag % rw % array(k,cell1) &
+! + sign(1.0_RKIND,ru(k,iEdge))*config_coef_3rd_order*zf3(k,1,iEdge)*flux
+! end if
+
+! end do
+! end if
+
+! end do
+
+ ! Compute w from rho_zz and rw
+ do iCell=1,grid%nCells
+ do k=2,grid%nVertLevels
+ state % w % array(k,iCell) = diag % rw % array(k,iCell) &
+ / (fzp(k) * state % rho_zz % array(k-1,iCell) + fzm(k) * state % rho_zz % array(k,iCell))
+ end do
+ end do
+
+
+ do iEdge=1,grid % nEdges
+ grid % fEdge % array(iEdge) = 0.
+ end do
+
+ do iVtx=1,grid % nVertices
+ grid % fVertex % array(iVtx) = 0.
+ end do
+
+ !
+ ! Compute mass fluxes tangential to each edge (i.e., through the faces of dual grid cells)
+ !
+ diag % v % array(:,:) = 0.0
+ do iEdge = 1, grid%nEdges
+ do i=1,nEdgesOnEdge(iEdge)
+ eoe = edgesOnEdge(i,iEdge)
+ if (eoe > 0) then
+ do k = 1, grid%nVertLevels
+ diag % v % array(k,iEdge) = diag % v %array(k,iEdge) + weightsOnEdge(i,iEdge) * state % u % array(k, eoe)
+ end do
+ end if
+ end do
+ end do
+
+! do k=1,grid%nVertLevels
+! write(0,*) ' k,u_init, t_init, qv_init ',k,grid % u_init % array(k),grid % t_init% array(k),grid % qv_init % array(k)
+! end do
+
+ ! Compute rho and theta from rho_zz and theta_m
+ do iCell=1,grid%nCells
+ do k=1,grid%nVertLevels
+ diag % rho % array(k,iCell) = state % rho_zz % array(k,iCell) * zz(k,iCell)
+ diag % theta % array(k,iCell) = state % theta_m % array(k,iCell) / (1.0 + 1.61 * scalars(index_qv,k,iCell))
+ end do
+ end do
+
+! MGD FOR 3-1:
+! zt = 10000.0
+! nVertLevels = 10
+! X = 125
+! dt = 12.
+! nso = 8
+! 2nd-order horiz mixing = 50.0
+
+ end subroutine init_atm_test_case_reduced_radius
+
+
+!--------------------- TEST CASE 9 -----------------------------------------------
+
+
+ subroutine init_atm_test_case_resting_atmosphere(grid, state, diag, test_case)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Setup resting atmosphere test case with terrian
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ implicit none
+
+ type (mesh_type), intent(inout) :: grid
+ type (state_type), intent(inout) :: state
+ type (diag_type), intent(inout) :: diag
+ integer, intent(in) :: test_case
+
+ real (kind=RKIND), parameter :: t0=300., alpha=0.
+ real (kind=RKIND) :: hm
+
+ real (kind=RKIND), dimension(:), pointer :: rdzw, dzu, rdzu, fzm, fzp
+ real (kind=RKIND), dimension(:,:), pointer :: zgrid, zx, zz, hx, cqw
+ real (kind=RKIND), dimension(:,:), pointer :: ppb, pb, rho_zz, rb, rr, tb, rtb, p, pp, dss, t, rt, u, ru
+ real (kind=RKIND), dimension(:,:,:), pointer :: scalars, deriv_two, zb, zb3
+
+ !This is temporary variable here. It just need when calculate tangential velocity v.
+ integer :: eoe, j
+ integer, dimension(:), pointer :: nEdgesOnEdge
+ integer, dimension(:,:), pointer :: edgesOnEdge, CellsOnEdge, cellsOnCell, edgesOnCell
+ integer, dimension(:), pointer :: nEdgesOnCell
+ real (kind=RKIND), dimension(:), pointer :: dvEdge, dcedge, AreaCell, xCell, yCell
+ real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge
+
+ integer :: iCell, iCell1, iCell2 , iEdge, vtx1, vtx2, ivtx, i, k, nz, nz1, itr, itrp, cell1, cell2, nCellsSolve
+ integer :: index_qv
+
+ real (kind=RKIND) :: ptop, p0, pis, flux, d2fdx2_cell1, d2fdx2_cell2
+ real(kind=RKIND), dimension(:), pointer :: hs, hs1
+
+ real (kind=RKIND) :: ztemp, zd, zt, dz, str, zh, hmax
+
+ real (kind=RKIND), dimension(grid % nVertLevels, grid % nCells) :: rh
+ real (kind=RKIND) :: es, qvs, xnutr, ptemp
+ integer :: iter, nsm, kz
+
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
+
+ type (block_type), pointer :: block
+ type (parallel_info), pointer :: parinfo
+ type (dm_info), pointer :: dminfo
+
+ real (kind=RKIND), dimension(grid % nVertLevels + 1 ) :: zc, zw, ah
+ real (kind=RKIND), dimension(grid % nVertLevels ) :: zu, dzw, rdzwp, rdzwm
+ real (kind=RKIND), allocatable, dimension(:) :: psiVertex
+
+ real (kind=RKIND) :: d1, d2, d3, cof1, cof2, cf1, cf2, cf3
+ real (kind=RKIND) :: um, us, rcp, rcv, gamma, xa, zinb, zint, tinv, th_inb, th_int
+ real (kind=RKIND) :: xmid, temp, pres, a_scale, rad, shear, tsurf, usurf, sm0, dzmina, dzmina_global, dzminf
+
+ real (kind=RKIND) :: xi, yi, r1m, r2m, xc, yc, xla, zinv, xn2, xn2m, xn2l, sm, dzh, dzht, dzmin, z_edge, z_edge3
+
+ integer, dimension(grid % nCells, 2) :: next_cell
+ real (kind=RKIND), dimension(grid % nCells) :: pitop, ptopb
+ logical, parameter :: hybrid = .false.
+! logical, parameter :: hybrid = .true.
+
+ block => grid % block
+ parinfo => block % parinfo
+ dminfo => block % domain % dminfo
+
+
+ !
+ ! Scale all distances
+ !
+ a_scale = grid % sphere_radius
+
+ grid % xCell % array = grid % xCell % array * a_scale
+ grid % yCell % array = grid % yCell % array * a_scale
+ grid % zCell % array = grid % zCell % array * a_scale
+ grid % xVertex % array = grid % xVertex % array * a_scale
+ grid % yVertex % array = grid % yVertex % array * a_scale
+ grid % zVertex % array = grid % zVertex % array * a_scale
+ grid % xEdge % array = grid % xEdge % array * a_scale
+ grid % yEdge % array = grid % yEdge % array * a_scale
+ grid % zEdge % array = grid % zEdge % array * a_scale
+ grid % dvEdge % array = grid % dvEdge % array * a_scale
+ grid % dcEdge % array = grid % dcEdge % array * a_scale
+ grid % areaCell % array = grid % areaCell % array * a_scale**2.0
+ grid % areaTriangle % array = grid % areaTriangle % array * a_scale**2.0
+ grid % kiteAreasOnVertex % array = grid % kiteAreasOnVertex % array * a_scale**2.0
+
+ weightsOnEdge => grid % weightsOnEdge % array
+ nEdgesOnEdge => grid % nEdgesOnEdge % array
+ edgesOnEdge => grid % edgesOnEdge % array
+ dvEdge => grid % dvEdge % array
+ dcEdge => grid % dcEdge % array
+ AreaCell => grid % AreaCell % array
+ CellsOnEdge => grid % CellsOnEdge % array
+ cellsOnCell => grid % cellsOnCell % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
+ deriv_two => grid % deriv_two % array
+
+ nz1 = grid % nVertLevels
+ nz = nz1 + 1
+ nCellsSolve = grid % nCellsSolve
+
+ zgrid => grid % zgrid % array
+ zb => grid % zb % array
+ zb3 => grid % zb3 % array
+ rdzw => grid % rdzw % array
+ dzu => grid % dzu % array
+ rdzu => grid % rdzu % array
+ fzm => grid % fzm % array
+ fzp => grid % fzp % array
+ zx => grid % zx % array
+ zz => grid % zz % array
+ hx => grid % hx % array
+ dss => grid % dss % array
+
+ xCell => grid % xCell % array
+ yCell => grid % yCell % array
+
+ ppb => diag % pressure_base % array
+ pb => diag % exner_base % array
+ rb => diag % rho_base % array
+ tb => diag % theta_base % array
+ rtb => diag % rtheta_base % array
+ p => diag % exner % array
+ cqw => diag % cqw % array
+
+ rho_zz => state % rho_zz % array
+
+ pp => diag % pressure_p % array
+ rr => diag % rho_p % array
+ t => state % theta_m % array
+ rt => diag % rtheta_p % array
+ u => state % u % array
+ ru => diag % ru % array
+
+ scalars => state % scalars % array
+
+ index_qv = state % index_qv
+
+ scalars(:,:,:) = 0.
+
+ call atm_initialize_advection_rk(grid)
+ call atm_initialize_deformation_weights(grid)
+
+ xnutr = 0.1
+ zd = 12000.
+
+ p0 = 1.e+05
+ rcp = rgas/cp
+ rcv = rgas/(cp-rgas)
+
+ ! metrics for hybrid coordinate and vertical stretching
+ str = 1.0
+
+ zt = 12000.
+
+ dz = zt/float(nz1)
+! write(0,*) ' dz = ',dz
+
+ do k=1,nz
+ zw(k) = (real(k-1)/real(nz1))**str*zt
+ if(k.gt.1) dzw(k-1) = zw(k)-zw(k-1)
+ end do
+
+! ah(k) governs the transition between terrain-following
+! and pure height coordinates
+! ah(k) = 1 is a smoothed terrain-following coordinate
+! ah(k) = 1.-zw(k)/zt is the basic terrain-following coordinate
+! ah(k) = 0 is a height coordinate
+
+ write(6,*) ' hybrid = ',hybrid
+ kz = nz
+
+ if(hybrid) then
+
+ zh = zt
+
+ do k=1,nz
+ if(zw(k).lt.zh) then
+
+! if(k.le.2) then
+! ah(k) = 1.
+! else
+! ah(k) = cos(.5*pii*(zw(k)-zw(2))/zh)**6
+! end if
+
+! ah(k) = cos(.5*pii*zw(k)/zh)**6
+ ah(k) = cos(.5*pii*zw(k)/zh)**2
+!
+! ah(k) = ah(k)*(1.-zw(k)/zt)
+!
+ else
+ ah(k) = 0.
+ kz = min(kz,k)
+ end if
+ end do
+
+ else
+        
+ do k=1,nz
+ ah(k) = 1.-zw(k)/zt
+ end do
+
+ end if
+
+
+ do k=1,nz
+ write(6,*) k,zw(k), ah(k)
+ end do
+
+ write(0,*) 'EARTH RADIUS = ', grid % sphere_radius
+
+! MGD 2-0-0, not used in 2-0-1
+ if (trim(config_dcmip_case) == '2-0-0') then
+ ! for hx computation
+ r1m = .75*pii
+ r2m = pii/16.
+ end if
+
+! MGD 2-0-1, not used in 2-0-0
+ if (trim(config_dcmip_case) == '2-0-1') then
+! setting for terrain
+! xa = pii/16. ! for specifying mtn with in degrees
+ xa = pii*grid%sphere_radius/16. ! corresponds to ~11 grid intervals across entire mtn with 2 deg res
+ end if
+
+
+! MGD both 2-0-0 and 2-0-1
+ hm = 2000.0
+
+ do iCell=1,grid % nCells
+
+
+ if (trim(config_dcmip_case) == '2-0-0') then
+! Comb mountain as specified for DCMIP case 2.0
+! MGD BEGIN 2-0-0
+ xi = grid % lonCell % array(iCell)
+ yi = grid % latCell % array(iCell)
+
+ rad = acos(cos(xi)*cos(yi))
+
+ if (rad.lt.r1m) THEN
+ hx(1,iCell) = hm*cos(.5*pii*rad/r1m)**2.*cos(pii*rad/r2m)**2
+ else
+ hx(1,iCell) = 0.
+ end if
+! MGD END 2-0-0
+ end if
+
+ if (trim(config_dcmip_case) == '2-0-1') then
+! cosine**2 ridge
+! MGD BEGIN 2-0-1
+
+ xi = grid % lonCell % array(iCell)
+ yi = grid % latCell % array(iCell)
+ xc = sphere_distance(yi, xi, yi, 0., grid % sphere_radius)
+ yc = sphere_distance(yi, xi, 0., xi, grid % sphere_radius)
+
+ if (abs(xc).ge.xa) then ! for mtn ridge with uniform width in km
+! if (abs(xi).ge.xa.and.abs(2.*pii-xi).ge.xa) then ! for mtn ridge with uniform width in degrees
+ hx(1,iCell) = 0.
+ else
+! for mtn ridge with uniform width in km
+ hx(1,iCell) = hm*cos(.5*pii*xc/xa)**2*cos(yc/grid % sphere_radius)
+! for mtn ridge with uniform width in degrees
+! hx(1,iCell) = hm*cos(.5*pii*xi/xa)**2*cos(yc/grid % sphere_radius)
+ end if
+! MGD END 2-0-1
+ end if
+
+ hx(:,iCell) = hx(1,iCell)
+
+ hx(nz,iCell) = zt
+
+ end do
+
+ hmax = maxval(hx(1,:))
+ write(6,*) "max terrain height = ",hmax
+
+ if (config_smooth_surfaces) then
+
+ write(0,*) ' '
+ write(0,*) ' Smoothing vertical coordinate surfaces'
+ write(0,*) ' '
+
+ allocate(hs (grid % nCells+1))
+ allocate(hs1(grid % nCells+1))
+
+ dzmin = 0.5
+ sm0 = 0.5
+ nsm = 30
+
+ write(6,*) 'dzmin = ',dzmin,' sm0 = ',sm0,' nsm = ',nsm
+
+ do k=2,kz-1
+ hx(k,:) = hx(k-1,:)
+ dzminf = zw(k)-zw(k-1)
+
+! dzmin = max(0.5_RKIND,1.-.5*zw(k)/hm)
+
+ sm = sm0*max( min(.5*zw(k)/hm,1.0_RKIND), .05 )
+
+ do i=1,nsm
+ do iCell=1,grid % nCells
+ hs1(iCell) = 0.
+ do j = 1,nEdgesOnCell(iCell)
+
+ hs1(iCell) = hs1(iCell) + dvEdge(edgesOnCell(j,iCell)) &
+ / dcEdge(edgesOnCell(j,iCell)) &
+ * (hx(k,cellsOnCell(j,iCell))-hx(k,iCell))
+ end do
+ hs1(iCell) = hx(k,iCell) + sm*hs1(iCell)
+
+ hs(iCell) = 0.
+ ! do j = 1,nEdgesOnCell(iCell)
+ ! hs(iCell) = hs(iCell) + dvEdge(edgesOnCell(j,iCell)) &
+ ! / dcEdge(edgesOnCell(j,iCell)) &
+ ! * (hs1(cellsOnCell(j,iCell))-hs1(iCell))
+ ! end do
+ hs(iCell) = hs1(iCell) - 0.*hs(iCell)
+
+ end do
+
+ tempField => tempFieldTarget
+ tempField % block => block
+ tempField % dimSizes(1) = grid % nCells
+ tempField % sendList => parinfo % cellsToSend
+ tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
+ tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
+
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ ! dzmina = minval(hs(:)-hx(k-1,:))
+ dzmina = minval(zw(k)+ah(k)*hs(1:grid%nCellsSolve)-zw(k-1)-ah(k-1)*hx(k-1,1:grid%nCellsSolve))
+ call mpas_dmpar_min_real(dminfo, dzmina, dzmina_global)
+ ! write(0,*) ' k,i, dzmina, dzmin, zw(k)-zw(k-1) ', k,i, dzmina, dzmin, zw(k)-zw(k-1)
+ if (dzmina_global >= dzmin*(zw(k)-zw(k-1))) then
+ hx(k,:)=hs(:)
+ dzminf = dzmina_global
+ else
+ exit
+ end if
+ end do
+ write(0,*) k,i,sm,dzminf/(zw(k)-zw(k-1)),dzmina/(zw(k)-zw(k-1))
+ end do
+
+ do k=kz,nz
+ hx(k,:) = 0.
+ end do
+
+ deallocate(hs )
+ deallocate(hs1)
+
+ else
+
+ do k=2,nz1
+ dzmina = minval(zw(k)+ah(k)*hx(k,:)-zw(k-1)-ah(k-1)*hx(k-1,:))
+ write(0,*) k,dzmina/(zw(k)-zw(k-1))
+ end do
+
+ end if
+
+
+ do iCell=1,grid % nCells
+ do k=1,nz        
+ zgrid(k,iCell) = zw(k) + ah(k)*hx(k,iCell)
+ end do
+ do k=1,nz1
+ zz (k,iCell) = (zw(k+1)-zw(k))/(zgrid(k+1,iCell)-zgrid(k,iCell))
+ end do
+ end do
+
+ do i=1, grid % nEdges
+ iCell1 = grid % CellsOnEdge % array(1,i)
+ iCell2 = grid % CellsOnEdge % array(2,i)
+ do k=1,nz
+ zx (k,i) = (zgrid(k,iCell2)-zgrid(k,iCell1)) / grid % dcEdge % array(i)
+ end do
+ end do
+ do i=1, grid % nCells
+ do k=1,nz1
+ ztemp = .5*(zgrid(k+1,i)+zgrid(k,i))
+ dss(k,i) = 0.
+ ztemp = zgrid(k,i)
+ if(ztemp.gt.zd+.1) then
+ dss(k,i) = dss(k,i)+xnutr*sin(.5*pii*(ztemp-zd)/(zt-zd))**2
+ end if
+ end do
+ enddo
+
+ write(0,*) ' grid metrics setup complete '
+
+ do k=1,nz1
+ dzw (k) = zw(k+1)-zw(k)
+ rdzw(k) = 1./dzw(k)
+ zu(k ) = .5*(zw(k)+zw(k+1))
+ end do
+ do k=2,nz1
+ dzu (k) = .5*(dzw(k)+dzw(k-1))
+ rdzu(k) = 1./dzu(k)
+ fzp (k) = .5* dzw(k )/dzu(k)
+ fzm (k) = .5* dzw(k-1)/dzu(k)
+ rdzwp(k) = dzw(k-1)/(dzw(k )*(dzw(k)+dzw(k-1)))
+ rdzwm(k) = dzw(k )/(dzw(k-1)*(dzw(k)+dzw(k-1)))
+ end do
+
+! d1 = .5*dzw(1)
+! d2 = dzw(1)+.5*dzw(2)
+! d3 = dzw(1)+dzw(2)+.5*dzw(3)
+! cf1 = d2*d3*(d3-d2)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+! cf2 = d1*d3*(d1-d3)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+! cf3 = d1*d2*(d2-d1)/(d2*d3*(d3-d2)+d1*d3*(d1-d3)+d1*d2*(d2-d1))
+
+ cof1 = (2.*dzu(2)+dzu(3))/(dzu(2)+dzu(3))*dzw(1)/dzu(2)
+ cof2 = dzu(2) /(dzu(2)+dzu(3))*dzw(1)/dzu(3)
+ cf1 = fzp(2) + cof1
+ cf2 = fzm(2) - cof1 - cof2
+ cf3 = cof2
+
+ grid % cf1 % scalar = cf1
+ grid % cf2 % scalar = cf2
+ grid % cf3 % scalar = cf3
+
+ um = 0.
+ gamma = .0065 ! temp lapse rate in K/km
+
+! MGD BEGIN 2-0-0
+ if (trim(config_dcmip_case) == '2-0-0') then
+ zinb = zt ! no inversion layer
+ zint = zt ! no inversion layer
+ end if
+! MGD END 2-0-0
+! MGD BEGIN 2-0-1
+ if (trim(config_dcmip_case) == '2-0-1') then
+ zinb = 3000. ! bottom of inversion layer
+ zint = 5000. ! top of inversion layer
+ end if
+! MGD END 2-0-1
+
+ ! computing intermediate T and Theta used to build sounding that includes inversion layer
+ tinv = t0-gamma*zinb
+ th_inb = t0*(1.-gamma*zinb/t0)**(1.-gravity/(cp*gamma))
+ th_int = th_inb*exp((gravity*(zint-zinb))/(cp*tinv))
+ write(6,*) ' zinb = ',zinb,' zint = ',zint,' tinv = ',tinv,'th_inb = ',th_inb,' th_int = ',th_int
+
+ do i=1,grid % nCells
+
+ pis = 1.
+
+ do k=1,nz1
+ ztemp = .5*(zgrid(k,i)+zgrid(k+1,i))
+
+! Isothermal reference sounding
+
+ tb(k,i) = t0*exp(gravity*ztemp/(cp*t0))
+
+! Low level inversion initial sounding
+
+ if(ztemp.le.zinb) then
+ t (k,i) = t0*(1.-gamma*ztemp/t0)**(1.-gravity/(cp*gamma))
+ else if(ztemp.le.zint) then
+ t (k,i) = th_inb*exp((gravity*(ztemp-zinb))/(cp*tinv))
+ else
+ t (k,i) = th_int*(1.-gamma*(ztemp-zint)/tinv)**(1.-gravity/(cp*gamma))
+ end if
+
+ rh(k,i) = 0.
+ end do
+ end do
+
+ !
+ ! Initialize wind field
+ !
+ do iEdge=1,grid % nEdges
+ do k=1,nz1
+ u(k,iEdge) = um
+ end do
+ end do
+
+ do k=1,nz1
+ grid % u_init % array(k) = um
+ end do
+
+!
+! reference sounding based on dry atmosphere
+!
+ do i=1, grid % nCells
+
+ pis = 1.
+
+ pitop(i) = pis-.5*dzw(1)*gravity/(cp*tb(1,1)*zz(1,1))
+ do k=2,nz1
+ pitop(i) = pitop(i)-dzu(k)*gravity/(cp*(fzm(k)*tb(k,1)+fzp(k)*tb(k-1,1)) &
+ *(fzm(k)*zz(k,1)+fzp(k)*zz(k-1,1)))
+ end do
+ pitop(i) = pitop(i)-.5*dzw(nz1)*gravity/(cp*tb(nz1,1)*zz(nz1,1))
+ ptopb(i) = p0*pitop(i)**(1./rcp)
+
+ pb(nz1,i) = pitop(i)+.5*dzw(nz1)*gravity/(cp*tb(nz1,i)*zz(nz1,i))
+ p (nz1,i) = pitop(i)+.5*dzw(nz1)*gravity/(cp*t (nz1,i)*zz(nz1,i))
+ do k=nz1-1,1,-1
+ pb(k,i) = pb(k+1,i) + dzu(k+1)*gravity/(cp*.5*(tb(k,i)+tb(k+1,i)) &
+ *.5*(zz(k,i)+zz(k+1,i)))
+ p (k,i) = p (k+1,i) + dzu(k+1)*gravity/(cp*.5*(t (k,i)+t (k+1,i)) &
+ *.5*(zz(k,i)+zz(k+1,i)))
+ end do
+ do k=1,nz1
+ rb (k,i) = pb(k,i)**(1./rcv)/((rgas/p0)*tb(k,i)*zz(k,i))
+ rtb(k,i) = rb(k,i)*tb(k,i)
+ rr (k,i) = p (k,i)**(1./rcv)/((rgas/p0)*t (k,i)*zz(k,i))-rb(k,i)
+ cqw(k,i) = 1.
+ end do
+ end do
+
+ write(0,*) ' ***** base state sounding ***** '
+ write(0,*) 'k pb p rb rtb rr tb t'
+ do k=1,grid%nVertLevels
+ write(0,'(i2,7(2x,f14.9))') k,pb(k,1),p(k,1),rb(k,1),rtb(k,1),rr(k,1),tb(k,1),t(k,1)
+ end do
+
+ scalars(index_qv,:,:) = 0.
+!!!
+!-------------------------------------------------------------------
+! ITERATIONS TO CONVERGE MOIST SOUNDING
+ do itr=1,30
+
+ do i = 1, grid % nCells
+
+ pis = 1.
+
+ pitop(i) = pis-.5*dzw(1)*gravity*(1.+scalars(index_qv,1,1))/(cp*t(1,1)*zz(1,1))
+
+ do k=2,nz1
+ pitop(i) = pitop(i)-dzu(k)*gravity/(cp*cqw(k,1)*(fzm(k)*t (k,1)+fzp(k)*t (k-1,1)) &
+ *(fzm(k)*zz(k,1)+fzp(k)*zz(k-1,1)))
+ end do
+ pitop(i) = pitop(i) - .5*dzw(nz1)*gravity*(1.+scalars(index_qv,nz1,1))/(cp*t(nz1,1)*zz(nz1,1))
+ ptop = p0*pitop(i)**(1./rcp)
+
+ pp(nz1,i) = ptop-ptopb(i)+.5*dzw(nz1)*gravity* &
+ (rr(nz1,i)+(rr(nz1,i)+rb(nz1,i))*scalars(index_qv,nz1,i))
+ do k=nz1-1,1,-1
+ pp(k,i) = pp(k+1,i)+dzu(k+1)*gravity* &
+ (fzm(k)*(rr(k ,i)+(rr(k ,i)+rb(k ,i))*scalars(index_qv,k ,i)) &
+ +fzp(k)*(rr(k+1,i)+(rr(k+1,i)+rb(k+1,i))*scalars(index_qv,k+1,i)))
+ end do
+ do k=1,nz1
+ rt(k,i) = (pp(k,i)/(rgas*zz(k,i)) &
+ -rtb(k,i)*(p(k,i)-pb(k,i)))/p(k,i)
+ p (k,i) = (zz(k,i)*(rgas/p0)*(rtb(k,i)+rt(k,i)))**rcv
+ rr(k,i) = (rt(k,i)-rb(k,i)*(t(k,i)-tb(k,i)))/t(k,i)
+ end do
+!
+! update water vapor mixing ratio from humitidty profile
+!
+ do k=1,nz1
+ temp = p(k,i)*t(k,i)
+ pres = p0*p(k,i)**(1./rcp)
+ qvs = 380.*exp(17.27*(temp-273.)/(temp-36.))/pres
+ scalars(index_qv,k,i) = min(0.014_RKIND,rh(k,i)*qvs)
+ end do
+
+ do k=1,nz1
+ t (k,i) = t(k,i)*(1.+1.61*scalars(index_qv,k,i))
+ end do
+ do k=2,nz1
+ cqw(k,i) = 1./(1.+.5*( scalars(index_qv,k-1,i) &
+ +scalars(index_qv,k ,i)))
+ end do
+
+ end do ! loop over cells
+
+ end do ! iteration loop
+!----------------------------------------------------------------------
+!
+ write(0,*) ' *** sounding for the simulation ***'
+ write(0,*) ' z temp theta pres rho_m u rr'
+ do k=1,nz1
+ write(0,'(8(f14.9,2x))') .5*(zgrid(k,1)+zgrid(k+1,1))/1000., &
+ t(k,1)/(1.+1.61*scalars(index_qv,k,1))*p(k,1), &
+ t(k,1)/(1.+1.61*scalars(index_qv,k,1)), &
+ .01*p0*p(k,1)**(1./rcp), &
+! 1000.*scalars(index_qv,k,1), &
+ (rb(k,1)+rr(k,1))*(1.+scalars(index_qv,k,1)), &
+ grid % u_init % array(k), rr(k,1)
+ end do
+
+ do i=1,grid % ncells
+ do k=1,nz1
+ rho_zz(k,i) = rb(k,i)+rr(k,i)
+ end do
+
+ do k=1,nz1
+ grid % t_init % array(k,i) = t(k,i)
+ end do
+ end do
+
+ do i=1,grid % nEdges
+ cell1 = grid % CellsOnEdge % array(1,i)
+ cell2 = grid % CellsOnEdge % array(2,i)
+ if(cell1 <= nCellsSolve .or. cell2 <= nCellsSolve) then
+ do k=1,nz1
+ ru (k,i) = 0.5*(rho_zz(k,cell1)+rho_zz(k,cell2))*u(k,i)
+ end do
+ end if
+ end do
+
+!
+! pre-calculation z-metric terms in omega eqn.
+!
+ do iEdge = 1,grid % nEdges
+ cell1 = CellsOnEdge(1,iEdge)
+ cell2 = CellsOnEdge(2,iEdge)
+ if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then
+
+ do k = 1, grid%nVertLevels
+
+ if (config_theta_adv_order == 2) then
+!! test for metric consistency - forces 2nd order metrics with 4th order advection
+! if (config_theta_adv_order == 4) then
+
+ z_edge = (zgrid(k,cell1)+zgrid(k,cell2))/2.
+
+ else !theta_adv_order == 3 or 4
+
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * zgrid(k,cell1)
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * zgrid(k,cell2)
+ do i=1, grid % nEdgesOnCell % array (cell1)
+ if ( grid % CellsOnCell % array (i,cell1) > 0) &
+ d2fdx2_cell1 = d2fdx2_cell1 + deriv_two(i+1,1,iEdge) * zgrid(k,grid % CellsOnCell % array (i,cell1))
+ end do
+ do i=1, grid % nEdgesOnCell % array (cell2)
+ if ( grid % CellsOnCell % array (i,cell2) > 0) &
+ d2fdx2_cell2 = d2fdx2_cell2 + deriv_two(i+1,2,iEdge) * zgrid(k,grid % CellsOnCell % array (i,cell2))
+ end do
+
+ z_edge = 0.5*(zgrid(k,cell1) + zgrid(k,cell2)) &
+ - (grid % dcEdge % array(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12.
+
+ if (config_theta_adv_order == 3) then
+ z_edge3 = - (grid % dcEdge % array(iEdge) **2) * (d2fdx2_cell1 - d2fdx2_cell2) / 12.
+ else
+ z_edge3 = 0.
+ end if
+
+ end if
+
+ zb(k,1,iEdge) = (z_edge-zgrid(k,cell1))*dvEdge(iEdge)/AreaCell(cell1)
+ zb(k,2,iEdge) = (z_edge-zgrid(k,cell2))*dvEdge(iEdge)/AreaCell(cell2)
+ zb3(k,1,iEdge)= z_edge3*dvEdge(iEdge)/AreaCell(cell1)
+ zb3(k,2,iEdge)= z_edge3*dvEdge(iEdge)/AreaCell(cell2)
+
+! if (k /= 1) then
+! zf(k,1,iEdge) = ( fzm(k)*zz(k,cell1)+fzp(k)*zz(k-1,cell1) )*zb(k,1,iEdge)
+! zf(k,2,iEdge) = ( fzm(k)*zz(k,cell2)+fzp(k)*zz(k-1,cell2) )*zb(k,2,iEdge)
+! zf3(k,1,iEdge)= ( fzm(k)*zz(k,cell1)+fzp(k)*zz(k-1,cell1) )*zb3(k,1,iEdge)
+! zf3(k,2,iEdge)= ( fzm(k)*zz(k,cell2)+fzp(k)*zz(k-1,cell2) )*zb3(k,2,iEdge)
+! end if
+
+ end do
+
+ end if
+ end do
+
+! for including terrain
+ state % w % array(:,:) = 0.0
+ diag % rw % array(:,:) = 0.0
+
+!
+! calculation of omega, rw = zx * ru + zz * rw
+!
+
+! do iEdge = 1,grid % nEdges
+
+! cell1 = CellsOnEdge(1,iEdge)
+! cell2 = CellsOnEdge(2,iEdge)
+
+! if (cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then
+! do k = 2, grid%nVertLevels
+! flux = (fzm(k)*ru(k,iEdge)+fzp(k)*ru(k-1,iEdge))
+! diag % rw % array(k,cell2) = diag % rw % array(k,cell2) + zf(k,2,iEdge)*flux
+! diag % rw % array(k,cell1) = diag % rw % array(k,cell1) - zf(k,1,iEdge)*flux
+
+! if (config_theta_adv_order ==3) then
+! diag % rw % array(k,cell2) = diag % rw % array(k,cell2) &
+! - sign(1.0_RKIND,ru(k,iEdge))*config_coef_3rd_order*zf3(k,2,iEdge)*flux
+! diag % rw % array(k,cell1) = diag % rw % array(k,cell1) &
+! + sign(1.0_RKIND,ru(k,iEdge))*config_coef_3rd_order*zf3(k,1,iEdge)*flux
+! end if
+
+! end do
+! end if
+
+! end do
+
+ ! Compute w from rho_zz and rw
+ do iCell=1,grid%nCells
+ do k=2,grid%nVertLevels
+ state % w % array(k,iCell) = diag % rw % array(k,iCell) &
+ / (fzp(k) * state % rho_zz % array(k-1,iCell) + fzm(k) * state % rho_zz % array(k,iCell))
+ end do
+ end do
+
+
+ do iEdge=1,grid % nEdges
+ grid % fEdge % array(iEdge) = 0.
+ end do
+
+ do iVtx=1,grid % nVertices
+ grid % fVertex % array(iVtx) = 0.
+ end do
+
+ !
+ ! Compute mass fluxes tangential to each edge (i.e., through the faces of dual grid cells)
+ !
+ diag % v % array(:,:) = 0.0
+ do iEdge = 1, grid%nEdges
+ do i=1,nEdgesOnEdge(iEdge)
+ eoe = edgesOnEdge(i,iEdge)
+ if (eoe > 0) then
+ do k = 1, grid%nVertLevels
+ diag % v % array(k,iEdge) = diag % v %array(k,iEdge) + weightsOnEdge(i,iEdge) * state % u % array(k, eoe)
+ end do
+ end if
+ end do
+ end do
+
+! do k=1,grid%nVertLevels
+! write(0,*) ' k,u_init, t_init, qv_init ',k,grid % u_init % array(k),grid % t_init% array(k),grid % qv_init % array(k)
+! end do
+
+ ! Compute rho and theta from rho_zz and theta_m
+ do iCell=1,grid%nCells
+ do k=1,grid%nVertLevels
+ diag % rho % array(k,iCell) = state % rho_zz % array(k,iCell) * zz(k,iCell)
+ diag % theta % array(k,iCell) = state % theta_m % array(k,iCell) / (1.0 + 1.61 * scalars(index_qv,k,iCell))
+ end do
+ end do
+
+ end subroutine init_atm_test_case_resting_atmosphere
+
+
integer function nearest_cell(target_lat, target_lon, &
start_cell, &
nCells, maxEdges, nEdgesOnCell, cellsOnCell, latCell, lonCell)
Modified: branches/atmos_physics/src/core_nhyd_atmos/Registry
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/Registry        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_nhyd_atmos/Registry        2012-10-27 00:29:44 UTC (rev 2281)
@@ -35,9 +35,9 @@
namelist logical nhyd_model config_newpx false
namelist real nhyd_model config_apvm_upwinding 0.5
namelist logical nhyd_model config_h_ScaleWithMesh false
+namelist integer nhyd_model config_num_halos 2
namelist real damping config_zd 22000.0
namelist real damping config_xnutr 0.0
-namelist integer dimensions config_nvertlevels 26
namelist character io config_input_name init.nc
namelist character io config_sfc_update_name sfc_update.nc
namelist character io config_output_name output.nc
@@ -69,7 +69,7 @@
dim FIFTEEN 15
dim TWENTYONE 21
dim R3 3
-dim nVertLevels namelist:config_nvertlevels
+dim nVertLevels nVertLevels
dim nVertLevelsP1 nVertLevels+1
%
Modified: branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_advection.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_advection.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_advection.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -111,9 +111,9 @@
do i=1,n
advCells(i+1,iCell) = cell_list(i)
- xc(i) = grid % xCell % array(advCells(i+1,iCell))/a
- yc(i) = grid % yCell % array(advCells(i+1,iCell))/a
- zc(i) = grid % zCell % array(advCells(i+1,iCell))/a
+ xc(i) = grid % xCell % array(advCells(i+1,iCell))/grid%sphere_radius
+ yc(i) = grid % yCell % array(advCells(i+1,iCell))/grid%sphere_radius
+ zc(i) = grid % zCell % array(advCells(i+1,iCell))/grid%sphere_radius
end do
theta_abs(iCell) = pii/2. - sphere_angle( xc(1), yc(1), zc(1), &
@@ -131,8 +131,8 @@
xc(i+1), yc(i+1), zc(i+1), &
xc(ip2), yc(ip2), zc(ip2) )
- dl_sphere(i) = a*arc_length( xc(1), yc(1), zc(1), &
- xc(i+1), yc(i+1), zc(i+1) )
+ dl_sphere(i) = grid%sphere_radius*arc_length( xc(1), yc(1), zc(1), &
+ xc(i+1), yc(i+1), zc(i+1) )
end do
length_scale = 1.
@@ -262,12 +262,12 @@
if (ip1 > n-1) ip1 = 1
iEdge = grid % EdgesOnCell % array (i,iCell)
- xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/a
- xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/a
- yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/a
- zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/a
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid%sphere_radius
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid%sphere_radius
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid%sphere_radius
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid%sphere_radius
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid%sphere_radius
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid%sphere_radius
if ( grid % on_a_sphere ) then
call arc_bisect( xv1, yv1, zv1, &
@@ -825,16 +825,16 @@
! compute poynomial fit for this cell if all needed neighbors exist
if (grid % on_a_sphere) then
- xc(1) = grid % xCell % array(iCell)/a
- yc(1) = grid % yCell % array(iCell)/a
- zc(1) = grid % zCell % array(iCell)/a
+ xc(1) = grid % xCell % array(iCell)/grid%sphere_radius
+ yc(1) = grid % yCell % array(iCell)/grid%sphere_radius
+ zc(1) = grid % zCell % array(iCell)/grid%sphere_radius
do i=2,n
iv = grid % verticesOnCell % array(i-1,iCell)
- xc(i) = grid % xVertex % array(iv)/a
- yc(i) = grid % yVertex % array(iv)/a
- zc(i) = grid % zVertex % array(iv)/a
+ xc(i) = grid % xVertex % array(iv)/grid%sphere_radius
+ yc(i) = grid % yVertex % array(iv)/grid%sphere_radius
+ zc(i) = grid % zVertex % array(iv)/grid%sphere_radius
end do
theta_abs(iCell) = pii/2. - sphere_angle( xc(1), yc(1), zc(1), &
@@ -852,8 +852,8 @@
xc(i+1), yc(i+1), zc(i+1), &
xc(ip2), yc(ip2), zc(ip2) )
- dl_sphere(i) = a*arc_length( xc(1), yc(1), zc(1), &
- xc(i+1), yc(i+1), zc(i+1) )
+ dl_sphere(i) = grid%sphere_radius*arc_length( xc(1), yc(1), zc(1), &
+ xc(i+1), yc(i+1), zc(i+1) )
end do
length_scale = 1.
Modified: branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -58,6 +58,8 @@
call atm_simulation_clock_init(domain, dt, startTimeStamp)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(1) % state % u)
+
block => domain % blocklist
do while (associated(block))
call atm_mpas_init_block(domain % dminfo, block, block % mesh, dt)
@@ -65,6 +67,10 @@
block => block % next
end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % pv_edge)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % ru)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rw)
+
current_outfile_frames = 0
if (config_sfc_update_interval /= "none") then
@@ -172,20 +178,12 @@
type (block_type), intent(inout) :: block
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
-
- call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % u)
if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then
call atm_init_coupled_diagnostics( block % state % time_levs(1) % state, block % diag, mesh)
end if
call atm_compute_solve_diagnostics(dt, block % state % time_levs(1) % state, block % diag, mesh)
- call mpas_dmpar_exch_halo_field(block % diag % pv_edge)
-
- call mpas_dmpar_exch_halo_field(block % diag % ru)
-
- call mpas_dmpar_exch_halo_field(block % diag % rw)
-
call mpas_rbf_interp_initialize(mesh)
call mpas_init_reconstruct(mesh)
call mpas_reconstruct(mesh, block % state % time_levs(1) % state % u % array, &
Modified: branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -1327,7 +1327,7 @@
wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell))
end do
end do
- do k=1,grid % nVertLevelsSolve
+ do k=1,grid % nVertLevels ! Could be nVertLevelsSolve?
do iScalar=1,s_old % num_scalars
scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*h_old(k,iCell) &
+ dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/h_new(k,iCell)
@@ -1356,7 +1356,7 @@
wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell))
enddo
- do k=1,grid % nVertLevelsSolve
+ do k=1,grid % nVertLevels ! Could be nVertLevelsSolve?
do iScalar=1,s_old % num_scalars
scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*h_old(k,iCell) &
+ dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/h_new(k,iCell)
@@ -1384,7 +1384,7 @@
wdtn(iScalar,k) = wwAvg(k,iCell)*(fnm(k)*scalar_new(iScalar,k,iCell)+fnp(k)*scalar_new(iScalar,k-1,iCell))
enddo
- do k=1,grid % nVertLevelsSolve
+ do k=1,grid % nVertLevels ! Could be nVertLevelsSolve?
do iScalar=1,s_old % num_scalars
scalar_new(iScalar,k,iCell) = ( scalar_old(iScalar,k,iCell)*h_old(k,iCell) &
+ dt*( scalar_tend(iScalar,k,iCell) -rdnw(k)*(wdtn(iScalar,k+1)-wdtn(iScalar,k)) ) )/h_new(k,iCell)
@@ -1434,7 +1434,8 @@
integer, dimension(:), pointer :: nAdvCellsForEdge
real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd
- type (field2DReal) :: tempField
+ type (field2DReal), pointer :: tempField
+ type (field2DReal), target :: tempFieldTarget
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scalar_old, scalar_new
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: s_max, s_min, s_update
@@ -1700,12 +1701,16 @@
!
! WCS_halo_opt_2 - communicate only first halo row in these next two exchanges
!
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nVertLevels
tempField % dimSizes(2) = grid % nCells
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => scale_in
call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
@@ -1863,12 +1868,11 @@
!SHP-curvature
logical, parameter :: curvature = .true.
- !real (kind=RKIND), parameter :: omega_e = 7.29212e-05
- !real (kind=RKIND) :: r_earth
+ real (kind=RKIND) :: r_earth
real (kind=RKIND), dimension(:,:), pointer :: ur_cell, vr_cell
real (kind=RKIND), parameter :: c_s = 0.125
-! real (kind=RKIND), parameter :: c_s = 0.25
+! real (kind=RKIND), parameter :: c_s = 0.25
real (kind=RKIND), dimension( grid % nVertLevels ) :: d_diag, d_off_diag, flux_arr
real (kind=RKIND), dimension(:,:), pointer :: defc_a, defc_b
logical :: delsq_horiz_mixing, newpx
@@ -1889,7 +1893,7 @@
!-----------
!SHP-curvature
- !r_earth = a
+ r_earth = grid % sphere_radius
ur_cell => diag % uReconstructZonal % array
vr_cell => diag % uReconstructMeridional % array
@@ -2168,7 +2172,7 @@
- 2.*omega*cos(grid % angleEdge % array(iEdge))*cos(grid % latEdge % array(iEdge)) &
*rho_edge(k,iEdge)*.25*(w(k,cell1)+w(k+1,cell1)+w(k,cell2)+w(k+1,cell2)) &
- u(k,iEdge)*.25*(w(k+1,cell1)+w(k,cell1)+w(k,cell2)+w(k+1,cell2)) &
- *rho_edge(k,iEdge)/a
+ *rho_edge(k,iEdge)/r_earth
!old-err.
!tend_u(k,iEdge) = tend_u(k,iEdge) &
! - 2.*omega_e*cos(grid % angleEdge % array(iEdge))*cos(grid % latEdge % array(iEdge)) &
@@ -2530,7 +2534,7 @@
do k=2,nVertLevels
tend_w(k,iCell) = tend_w(k,iCell) + (rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k))* &
( (fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell))**2. &
- +(fzm(k)*vr_cell(k,iCell)+fzp(k)*vr_cell(k-1,iCell))**2. )/a &
+ +(fzm(k)*vr_cell(k,iCell)+fzp(k)*vr_cell(k-1,iCell))**2. )/r_earth &
+ 2.*omega*cos(grid % latCell % array(iCell)) &
*(fzm(k)*ur_cell(k,iCell)+fzp(k)*ur_cell(k-1,iCell)) &
*(rho_zz(k,iCell)*fzm(k)+rho_zz(k-1,iCell)*fzp(k))
Index: branches/atmos_physics/src/core_ocean
===================================================================
--- branches/atmos_physics/src/core_ocean        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean        2012-10-27 00:29:44 UTC (rev 2281)
Property changes on: branches/atmos_physics/src/core_ocean
___________________________________________________________________
Added: svn:mergeinfo
## -0,0 +1,28 ##
+/branches/cam_mpas_nh/src/core_ocean:1260-1270
+/branches/ocean_projects/ale_split_exp/src/core_ocean:1437-1483
+/branches/ocean_projects/ale_vert_coord/src/core_ocean:1225-1383
+/branches/ocean_projects/ale_vert_coord_new/src/core_ocean:1387-1428
+/branches/ocean_projects/gmvar/src/core_ocean:1214-1514,1517-1738
+/branches/ocean_projects/imp_vert_mix_error/src/core_ocean:1847-1887
+/branches/ocean_projects/imp_vert_mix_mrp/src/core_ocean:754-986
+/branches/ocean_projects/leith_mrp/src/core_ocean:2182-2241
+/branches/ocean_projects/monotonic_advection/src/core_ocean:1499-1640
+/branches/ocean_projects/monthly_forcing/src/core_ocean:1810-1867
+/branches/ocean_projects/option3_b4b_test/src/core_ocean:2201-2231
+/branches/ocean_projects/partial_bottom_cells/src/core_ocean:2172-2226
+/branches/ocean_projects/restart_reproducibility/src/core_ocean:2239-2272
+/branches/ocean_projects/split_explicit_mrp/src/core_ocean:1134-1138
+/branches/ocean_projects/split_explicit_timestepping/src/core_ocean:1044-1097
+/branches/ocean_projects/vert_adv_mrp/src/core_ocean:704-745
+/branches/ocean_projects/vol_cons_RK_imp_mix/src/core_ocean:1965-1992
+/branches/ocean_projects/zstar_restart_new/src/core_ocean:1762-1770
+/branches/omp_blocks/block_decomp/src/core_ocean:1374-1569
+/branches/omp_blocks/ddt_reorg/src/core_ocean:1301-1414
+/branches/omp_blocks/halo/src/core_ocean:1570-1638
+/branches/omp_blocks/io/src/core_ocean:1639-1787
+/branches/omp_blocks/multiple_blocks/src/core_ocean:1803-2084
+/branches/omp_blocks/openmp_test/src/core_ocean:2107-2144
+/branches/omp_blocks/openmp_test/src/core_ocean_elements:2161-2201
+/branches/source_renaming/src/core_ocean:1082-1113
+/branches/time_manager/src/core_ocean:924-962
+/trunk/mpas/src/core_ocean:1371-2274
\ No newline at end of property
Modified: branches/atmos_physics/src/core_ocean/Makefile
===================================================================
--- branches/atmos_physics/src/core_ocean/Makefile        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/Makefile        2012-10-27 00:29:44 UTC (rev 2281)
@@ -10,6 +10,7 @@
         mpas_ocn_vel_vadv.o \
         mpas_ocn_vel_hmix.o \
         mpas_ocn_vel_hmix_del2.o \
+         mpas_ocn_vel_hmix_leith.o \
         mpas_ocn_vel_hmix_del4.o \
         mpas_ocn_vel_forcing.o \
         mpas_ocn_vel_forcing_windstress.o \
@@ -53,7 +54,8 @@
         mpas_ocn_equation_of_state_jm.o \
         mpas_ocn_equation_of_state_linear.o \
mpas_ocn_global_diagnostics.o \
-         mpas_ocn_time_average.o
+         mpas_ocn_time_average.o \
+         mpas_ocn_monthly_forcing.o
all: core_hyd
@@ -86,10 +88,12 @@
mpas_ocn_vel_vadv.o:
-mpas_ocn_vel_hmix.o: mpas_ocn_vel_hmix_del2.o mpas_ocn_vel_hmix_del4.o
+mpas_ocn_vel_hmix.o: mpas_ocn_vel_hmix_del2.o mpas_ocn_vel_hmix_leith.o mpas_ocn_vel_hmix_del4.o
mpas_ocn_vel_hmix_del2.o:
+mpas_ocn_vel_hmix_leith.o:
+
mpas_ocn_vel_hmix_del4.o:
mpas_ocn_vel_forcing.o: mpas_ocn_vel_forcing_windstress.o mpas_ocn_vel_forcing_bottomdrag.o mpas_ocn_vel_forcing_rayleigh.o
@@ -166,16 +170,19 @@
mpas_ocn_equation_of_state_linear.o:
+mpas_ocn_monthly_forcing.o:
+
mpas_ocn_mpas_core.o: mpas_ocn_mpas_core.o \
                         mpas_ocn_test_cases.o \
                                         mpas_ocn_advection.o \
                                         mpas_ocn_thick_hadv.o \
- mpas_ocn_gm.o \
+ mpas_ocn_gm.o \
                                         mpas_ocn_thick_vadv.o \
                                         mpas_ocn_vel_coriolis.o \
                                         mpas_ocn_vel_vadv.o \
                                         mpas_ocn_vel_hmix.o \
                                         mpas_ocn_vel_hmix_del2.o \
+                                         mpas_ocn_vel_hmix_leith.o \
                                         mpas_ocn_vel_hmix_del4.o \
                                         mpas_ocn_vel_forcing.o \
                                         mpas_ocn_vel_forcing_windstress.o \
@@ -218,7 +225,8 @@
                                         mpas_ocn_equation_of_state_jm.o \
                                         mpas_ocn_equation_of_state_linear.o \
                                         mpas_ocn_global_diagnostics.o \
-                                         mpas_ocn_time_average.o
+                                         mpas_ocn_time_average.o \
+                                         mpas_ocn_monthly_forcing.o
clean:
        $(RM) *.o *.mod *.f90 libdycore.a
Modified: branches/atmos_physics/src/core_ocean/Registry
===================================================================
--- branches/atmos_physics/src/core_ocean/Registry        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/Registry        2012-10-27 00:29:44 UTC (rev 2281)
@@ -13,16 +13,18 @@
namelist logical sw_model config_initial_stats false
namelist logical sw_model config_prescribe_velocity false
namelist logical sw_model config_prescribe_thickness false
+namelist integer sw_model config_num_halos 3
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
namelist character io config_output_interval 24:00:00
namelist integer io config_frames_per_outfile 0
-namelist integer io config_pio_num_iotasks 0
-namelist integer io config_pio_stride 1
+namelist integer io config_pio_num_iotasks 0
+namelist integer io config_pio_stride 1
+namelist logical io config_write_output_on_startup true
namelist character decomposition config_block_decomp_file_prefix graph.info.part.
namelist integer decomposition config_number_of_blocks 0
-namelist logical decomposition config_explicit_proc_decomp .false.
+namelist logical decomposition config_explicit_proc_decomp false
namelist character decomposition config_proc_decomp_file_prefix graph.info.part.
namelist logical restart config_do_restart false
namelist character restart config_restart_interval none
@@ -30,6 +32,10 @@
namelist character grid config_pressure_type pressure
namelist real grid config_rho0 1028
namelist logical grid config_enforce_zstar_at_restart false
+namelist character partial_bottom_cells config_alter_ICs_for_pbcs zlevel_pbcs_off
+namelist real partial_bottom_cells config_min_pbc_fraction 0.10
+namelist logical partial_bottom_cells config_check_ssh_consistency true
+namelist logical partial_bottom_cells config_check_zlevel_consistency false
namelist integer split_explicit_ts config_n_ts_iter 2
namelist integer split_explicit_ts config_n_bcl_iter_beg 2
namelist integer split_explicit_ts config_n_bcl_iter_mid 2
@@ -57,9 +63,13 @@
namelist logical hmix config_rayleigh_friction false
namelist real hmix config_rayleigh_damping_coeff 0.0
namelist real hmix config_apvm_scale_factor 0.0
+namelist logical hmix_leith config_use_leith_del2 false
+namelist real hmix_leith config_leith_parameter 0.0
+namelist real hmix_leith config_leith_dx 0.0
+namelist real hmix_leith config_leith_visc2_max 1000000.0
namelist character vmix config_vert_visc_type const
namelist character vmix config_vert_diff_type const
-namelist logical vmix config_implicit_vertical_mix .true.
+namelist logical vmix config_implicit_vertical_mix true
namelist real vmix config_convective_visc 1.0
namelist real vmix config_convective_diff 1.0
namelist real vmix config_bottom_drag_coeff 1.0e-3
@@ -85,6 +95,7 @@
namelist logical restore config_restoreTS false
namelist real restore config_restoreT_timescale 90.0
namelist real restore config_restoreS_timescale 90.0
+namelist logical restore config_use_monthly_forcing false
%
% dim type name_in_file name_in_code
@@ -102,6 +113,7 @@
dim vertexDegree vertexDegree
dim nVertLevels nVertLevels
dim nVertLevelsP1 nVertLevels+1
+dim nMonths nMonths
%
% var persistence type name_in_file ( dims ) time_levs iro- name_in_code struct super-array array_class
@@ -159,7 +171,7 @@
var persistent real kiteAreasOnVertex ( vertexDegree nVertices ) 0 iro kiteAreasOnVertex mesh - -
var persistent real fEdge ( nEdges ) 0 iro fEdge mesh - -
var persistent real fVertex ( nVertices ) 0 iro fVertex mesh - -
-var persistent real h_s ( nCells ) 0 iro h_s mesh - -
+var persistent real bottomDepth ( nCells ) 0 iro bottomDepth mesh - -
% Space needed for advection
var persistent real deriv_two ( maxEdges2 TWO nEdges ) 0 - deriv_two mesh - -
@@ -189,21 +201,26 @@
var persistent integer maxLevelEdgeBot ( nEdges ) 0 - maxLevelEdgeBot mesh - -
var persistent integer maxLevelVertexTop ( nVertices ) 0 - maxLevelVertexTop mesh - -
var persistent integer maxLevelVertexBot ( nVertices ) 0 - maxLevelVertexBot mesh - -
-var persistent real referenceBottomDepth ( nVertLevels ) 0 iro referenceBottomDepth mesh - -
-var persistent real referenceBottomDepthTopOfCell ( nVertLevelsP1 ) 0 - referenceBottomDepthTopOfCell mesh - -
+var persistent real refBottomDepth ( nVertLevels ) 0 iro refBottomDepth mesh - -
+var persistent real refBottomDepthTopOfCell ( nVertLevelsP1 ) 0 - refBottomDepthTopOfCell mesh - -
var persistent real hZLevel ( nVertLevels ) 0 iro hZLevel mesh - -
var persistent real zstarWeight ( nVertLevels ) 0 - zstarWeight mesh - -
-% Boundary conditions: read from input, saved in restart and written to output
-var persistent integer boundaryEdge ( nVertLevels nEdges ) 0 iro boundaryEdge mesh - -
-var persistent integer boundaryVertex ( nVertLevels nVertices ) 0 iro boundaryVertex mesh - -
-var persistent integer boundaryCell ( nVertLevels nCells ) 0 iro boundaryCell mesh - -
+% Boundary conditions and masks
+var persistent integer boundaryEdge ( nVertLevels nEdges ) 0 - boundaryEdge mesh - -
+var persistent integer boundaryVertex ( nVertLevels nVertices ) 0 - boundaryVertex mesh - -
+var persistent integer boundaryCell ( nVertLevels nCells ) 0 - boundaryCell mesh - -
var persistent integer edgeMask ( nVertLevels nEdges ) 0 o edgeMask mesh - -
var persistent integer vertexMask ( nVertLevels nVertices ) 0 o vertexMask mesh - -
var persistent integer cellMask ( nVertLevels nCells ) 0 o cellMask mesh - -
+
+% Forcing variables.
var persistent real u_src ( nVertLevels nEdges ) 0 ir u_src mesh - -
var persistent real temperatureRestore ( nCells ) 0 ir temperatureRestore mesh - -
var persistent real salinityRestore ( nCells ) 0 ir salinityRestore mesh - -
+var persistent real windStressMonthly ( nMonths nEdges ) 0 ir windStressMonthly mesh - -
+var persistent real temperatureRestoreMonthly ( nMonths nCells ) 0 ir temperatureRestoreMonthly mesh - -
+var persistent real salinityRestoreMonthly ( nMonths nCells ) 0 ir salinityRestoreMonthly mesh - -
% Prognostic variables: read from input, saved in restart, and written to output
var persistent real u ( nVertLevels nEdges Time ) 2 ir u state - -
@@ -222,7 +239,7 @@
var persistent real tend_tracer1 ( nVertLevels nCells Time ) 1 - tracer1 tend tracers testing
% state variables for Split Explicit timesplitting
-var persistent real uBtr ( nEdges Time ) 2 - uBtr state - -
+var persistent real uBtr ( nEdges Time ) 2 ir uBtr state - -
var persistent real ssh ( nCells Time ) 2 o ssh state - -
var persistent real uBtrSubcycle ( nEdges Time ) 2 - uBtrSubcycle state - -
var persistent real sshSubcycle ( nCells Time ) 2 - sshSubcycle state - -
@@ -231,7 +248,7 @@
var persistent real uBcl ( nVertLevels nEdges Time ) 2 - uBcl state - -
% Diagnostic fields: only written to output
-var persistent real zMid ( nVertLevels nCells Time ) 2 io zMid state - -
+var persistent real zMid ( nVertLevels nCells Time ) 2 - zMid state - -
var persistent real v ( nVertLevels nEdges Time ) 2 - v state - -
var persistent real uTransport ( nVertLevels nEdges Time ) 2 - uTransport state - -
var persistent real uBolusGM ( nVertLevels nEdges Time ) 2 - uBolusGM state - -
@@ -259,10 +276,16 @@
var persistent real uReconstructZ ( nVertLevels nCells Time ) 2 - uReconstructZ state - -
var persistent real uReconstructZonal ( nVertLevels nCells Time ) 2 o uReconstructZonal state - -
var persistent real uReconstructMeridional ( nVertLevels nCells Time ) 2 o uReconstructMeridional state - -
+var persistent real uSrcReconstructX ( nVertLevels nCells Time ) 2 - uSrcReconstructX state - -
+var persistent real uSrcReconstructY ( nVertLevels nCells Time ) 2 - uSrcReconstructY state - -
+var persistent real uSrcReconstructZ ( nVertLevels nCells Time ) 2 - uSrcReconstructZ state - -
+var persistent real uSrcReconstructZonal ( nVertLevels nCells Time ) 2 o uSrcReconstructZonal state - -
+var persistent real uSrcReconstructMeridional ( nVertLevels nCells Time ) 2 o uSrcReconstructMeridional state - -
var persistent real MontPot ( nVertLevels nCells Time ) 2 - MontPot state - -
var persistent real pressure ( nVertLevels nCells Time ) 2 - pressure state - -
var persistent real wTop ( nVertLevelsP1 nCells Time ) 2 - wTop state - -
var persistent real rhoDisplaced ( nVertLevels nCells Time ) 2 - rhoDisplaced state - -
+var persistent real viscosity ( nVertLevels nEdges Time ) 2 o viscosity state - -
% Other diagnostic variables: neither read nor written to any files
var persistent real vh ( nVertLevels nEdges Time ) 2 - vh state - -
@@ -292,5 +315,10 @@
var persistent real acc_uReconstructMeridional ( nVertLevels nCells Time ) 2 o acc_uReconstructMeridional state - -
var persistent real acc_uReconstructZonalVar ( nVertLevels nCells Time ) 2 o acc_uReconstructZonalVar state - -
var persistent real acc_uReconstructMeridionalVar ( nVertLevels nCells Time ) 2 o acc_uReconstructMeridionalVar state - -
-var persistent real         acc_u ( nVertLevels nEdges Time ) 2 o acc_u state - -
-var persistent real         acc_uVar ( nVertLevels nEdges Time ) 2 o acc_uVar state - -
+var persistent real acc_u ( nVertLevels nEdges Time ) 2 o acc_u state - -
+var persistent real acc_uVar ( nVertLevels nEdges Time ) 2 o acc_uVar state - -
+
+% Sign fields, for openmp and bit reproducibility without branching statements.
+var persistent integer edgeSignOnCell ( maxEdges nCells ) 0 - edgeSignOnCell mesh - -
+var persistent integer edgeSignOnVertex ( maxEdges nVertices ) 0 - edgeSignOnVertex mesh - -
+var persistent integer kiteIndexOnCell ( maxEdges nCells ) 0 - kiteIndexOnCell mesh - -
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -274,12 +274,22 @@
if (ip1 > n-1) ip1 = 1
iEdge = grid % EdgesOnCell % array (i,iCell)
- xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
- yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
- zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+
+ if(grid % on_a_sphere) then
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ else
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))
+ end if
if ( grid % on_a_sphere ) then
call ocn_arc_bisect( xv1, yv1, zv1, &
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -74,10 +74,13 @@
! Input: grid - grid metadata
! s - state: tracers
! k_displaced
- ! If k_displaced<=0, state % rho is returned with no displaced
- ! If k_displaced>0,the state % rhoDisplaced is returned, and is for
+ !
+ ! If k_displaced==0, state % rho is returned with no displacement
+ !
+ ! If k_displaced~=0,the state % rhoDisplaced is returned, and is for
! a parcel adiabatically displaced from its original level to level
- ! k_displaced. This does not effect the linear EOS.
+ ! k_displaced. When using the linear EOS, state % rhoDisplaced is
+ ! still filled, but depth (i.e. pressure) does not modify the output.
!
! Output: s - state: computed density
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -103,19 +106,19 @@
indexT = s % index_temperature
indexS = s % index_salinity
- if (linearEos) then
+ ! Choose to fill the array rho or rhoDisplaced
+ if (k_displaced == 0) then
rho => s % rho % array
+ else
+ rho => s % rhoDisplaced % array
+ endif
+ if (linearEos) then
+
call ocn_equation_of_state_linear_rho(grid, indexT, indexS, tracers, rho, err)
elseif (jmEos) then
- if(k_displaced == 0) then
- rho => s % rho % array
- else
- rho => s % rhoDisplaced % array
- endif
-
call ocn_equation_of_state_jm_rho(grid, k_displaced, displacement_type, indexT, indexS, tracers, rho, err)
endif
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -96,7 +96,7 @@
real (kind=RKIND), dimension(:), pointer :: &
- referenceBottomDepth, pRefEOS
+ refBottomDepth, pRefEOS
real (kind=RKIND), dimension(:,:), intent(inout) :: &
rho
real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers
@@ -197,7 +197,7 @@
nCells = grid % nCells
maxLevelCell => grid % maxLevelCell % array
nVertLevels = grid % nVertLevels
- referenceBottomDepth => grid % referenceBottomDepth % array
+ refBottomDepth => grid % refBottomDepth % array
! Jackett and McDougall
@@ -214,14 +214,14 @@
allocate(pRefEOS(nVertLevels),p(nVertLevels),p2(nVertLevels))
! This could be put in the init routine.
- ! Note I am using referenceBottomDepth, so pressure on top level does
+ ! Note I am using refBottomDepth, so pressure on top level does
! not include SSH contribution. I am not sure if that matters, but
! POP does it the same way.
- depth = 0.5*referenceBottomDepth(1)
+ depth = 0.5*refBottomDepth(1)
pRefEOS(1) = 0.059808*(exp(-0.025*depth) - 1.0) &
+ 0.100766*depth + 2.28405e-7*depth**2
do k = 2,nVertLevels
- depth = 0.5*(referenceBottomDepth(k)+referenceBottomDepth(k-1))
+ depth = 0.5*(refBottomDepth(k)+refBottomDepth(k-1))
pRefEOS(k) = 0.059808*(exp(-0.025*depth) - 1.0) &
+ 0.100766*depth + 2.28405e-7*depth**2
enddo
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -82,6 +82,13 @@
integer, intent(in) :: indexT, indexS
integer, intent(out) :: err
+ real (kind=RKIND), parameter :: rho_ref = 1025.022 ! kg / m^3
+ real (kind=RKIND), parameter :: alpha = 2.55e-1 ! kg / m^3 / K (dT/dRho)
+ real (kind=RKIND), parameter :: beta = 7.64e-1 ! kg / m^3 / psu (dS/dRho)
+ real (kind=RKIND), parameter :: T_ref = 19.0 ! K
+ real (kind=RKIND), parameter :: S_ref = 35.0 ! psu
+ real (kind=RKIND), parameter :: rho_prime_ref = rho_ref + alpha * T_ref - beta * S_ref
+
integer, dimension(:), pointer :: maxLevelCell
integer :: nCells, iCell, k
type (dm_info) :: dminfo
@@ -94,9 +101,8 @@
do iCell=1,nCells
do k=1,maxLevelCell(iCell)
! Linear equation of state
- rho(k,iCell) = 1000.0*( 1.0 &
- - 2.5e-4*tracers(indexT,k,iCell) &
- + 7.6e-4*tracers(indexS,k,iCell))
+ ! rho = rho_ref - alpha * (T - T_ref) + beta * (S - S_ref)
+ rho(k,iCell) = rho_prime_ref - alpha*tracers(indexT,k,iCell) + beta*tracers(indexS,k,iCell)
end do
end do
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_monthly_forcing.F (from rev 2274, trunk/mpas/src/core_ocean/mpas_ocn_monthly_forcing.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_monthly_forcing.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_monthly_forcing.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -0,0 +1,192 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_monthly_forcing
+!
+!> \brief MPAS ocean monthly forcing
+!> \author Doug Jacobsen
+!> \date 04/25/12
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for building the forcing arrays,
+!> if monthly forcing is used.
+!
+!-----------------------------------------------------------------------
+
+module ocn_monthly_forcing
+
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timekeeping
+
+ implicit none
+ private
+ save
+
+ !--------------------------------------------------------------------
+ !
+ ! Public parameters
+ !
+ !--------------------------------------------------------------------
+
+ !--------------------------------------------------------------------
+ !
+ ! Public member functions
+ !
+ !--------------------------------------------------------------------
+
+ public :: ocn_build_forcing_arrays, &
+ ocn_monthly_forcing_init
+
+ !--------------------------------------------------------------------
+ !
+ ! Private module variables
+ !
+ !--------------------------------------------------------------------
+
+ logical :: monthlyForcingOn !< Flag to turn on/off resotring
+
+!***********************************************************************
+
+contains
+
+!***********************************************************************
+!
+! routine ocn_build_forcing_arrays
+!
+!> \brief Determines the forcing array used for the monthly forcing.
+!> \author Doug Jacobsen
+!> \date 04/25/12
+!> \version SVN:$Id$
+!> \details
+!> This routine computes the forcing arrays used later in MPAS.
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_build_forcing_arrays(timeStamp, grid, err)!{{{
+
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+
+ type(MPAS_Time_type), intent(in) :: timeStamp
+
+ !-----------------------------------------------------------------
+ !
+ ! input/output variables
+ !
+ !-----------------------------------------------------------------
+
+ type (mesh_type), intent(inout) :: &
+ grid !< Input: grid information
+
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ integer, intent(out) :: err !< Output: Error flag
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+
+ real (kind=RKIND), dimension(:,:), pointer :: temperatureRestoreMonthly
+ real (kind=RKIND), dimension(:,:), pointer :: salinityRestoreMonthly
+ real (kind=RKIND), dimension(:,:), pointer :: windStressMonthly
+ real (kind=RKIND), dimension(:), pointer :: temperatureRestore
+ real (kind=RKIND), dimension(:), pointer :: salinityRestore
+ real (kind=RKIND), dimension(:,:), pointer :: u_src
+ integer :: iCell, iEdge, nCells, nEdges, nMonths, k
+ integer :: iMonth, iMonthP1, iDayInMonth, ierr
+ real (kind=RKIND) :: data, dataP1, weight, weightP1
+
+ err = 0
+
+ if(.not.monthlyForcingOn) return
+
+ nCells = grid % nCells
+ nEdges = grid % nEdges
+ nMonths = grid % nMonths
+
+ temperatureRestore => grid % temperatureRestore % array
+ salinityRestore => grid % salinityRestore % array
+ u_src => grid % u_src % array
+
+ temperatureRestoreMonthly => grid % temperatureRestoreMonthly % array
+ salinityRestoreMonthly => grid % salinityRestoreMonthly % array
+ windStressMonthly => grid % windStressMonthly % array
+
+ call mpas_get_time(timeStamp, MM = iMonth, DD = iDayInMonth, ierr = ierr)
+
+ err = ierr
+
+ iMonthP1 = mod(iMonth, nMonths) + 1
+
+ weight = 1.0 - (iDayInMonth-1) / 30.0
+ weightP1 = 1.0 - weight
+
+ do iCell=1,nCells
+ ! Interpolate between iMonth and iMonthP1 records, using iDayInMonth
+ data = temperatureRestoreMonthly(iMonth,iCell)
+ dataP1 = temperatureRestoreMonthly(iMonthP1,iCell)
+ temperatureRestore(iCell) = data * weight + dataP1 * weightP1
+ data = salinityRestoreMonthly(iMonth,iCell)
+ dataP1 = salinityRestoreMonthly(iMonthP1,iCell)
+ salinityRestore(iCell) = data * weight + dataP1 * weightP1
+ end do
+
+ do iEdge=1,nEdges
+ ! Interpolate between iMonth and iMonthP1 records, using iDayInMonth
+ data = windStressMonthly(iMonth,iEdge)
+ dataP1 = windStressMonthly(iMonthP1,iEdge)
+ u_src(1,iEdge) = data * weight + dataP1 * weightP1
+ end do
+
+ !--------------------------------------------------------------------
+
+ end subroutine ocn_build_forcing_arrays!}}}
+
+!***********************************************************************
+!
+! routine ocn_monthly_forcing_init
+!
+!> \brief Initializes monthly forcing module
+!> \author Doug Jacobsen
+!> \date 04/25/12
+!> \version SVN:$Id$
+!> \details
+!> This routine initializes the monthly forcing module.
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_monthly_forcing_init(err)!{{{
+
+ integer, intent(out) :: err !< Output: error flag
+
+ err = 0
+
+ monthlyForcingOn = .false.
+
+ if(config_use_monthly_forcing) then
+ monthlyForcingOn = .true.
+
+ write (0,'(a)') " Monthly forcing is on. Make sure monthly forcing variables include iro in Registry, and are in your initial condition or restart file."
+ end if
+
+ !--------------------------------------------------------------------
+
+ end subroutine ocn_monthly_forcing_init!}}}
+
+!***********************************************************************
+
+end module ocn_monthly_forcing
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -11,6 +11,8 @@
use ocn_time_integration
use ocn_tendency
+ use ocn_monthly_forcing
+
use ocn_vel_pressure_grad
use ocn_vel_vadv
use ocn_vel_hmix
@@ -93,6 +95,9 @@
call mpas_ocn_tracer_advection_init(err_tmp)
err = ior(err,err_tmp)
+ call ocn_monthly_forcing_init(err_tmp)
+ err = ior(err, err_tmp)
+
call mpas_timer_init(domain)
if(err.eq.1) then
@@ -101,37 +106,37 @@
if (.not. config_do_restart) call setup_sw_test_case(domain)
+ if (config_vert_grid_type.ne.'isopycnal') call ocn_init_vert_coord(domain)
+
call ocn_compute_max_level(domain)
- call ocn_init_z_level(domain)
-
if (config_enforce_zstar_at_restart) then
call ocn_init_h_zstar(domain)
endif
- call ocn_init_split_timestep(domain)
+ if (.not.config_do_restart) call ocn_init_split_timestep(domain)
- print *, ' Vertical grid type is: ',config_vert_grid_type
+ write (0,'(a,a10)'), ' Vertical grid type is: ',config_vert_grid_type
if (config_vert_grid_type.ne.'isopycnal'.and. &
config_vert_grid_type.ne.'zlevel'.and. &
config_vert_grid_type.ne.'zstar1'.and. &
config_vert_grid_type.ne.'zstar'.and. &
config_vert_grid_type.ne.'zstarWeights') then
- print *, ' Incorrect choice of config_vert_grid_type.'
+ write (0,*) ' Incorrect choice of config_vert_grid_type.'
call mpas_dmpar_abort(dminfo)
endif
- print *, ' Pressure type is: ',config_pressure_type
+ write (0,'(a,a10)'), ' Pressure type is: ',config_pressure_type
if (config_pressure_type.ne.'pressure'.and. &
config_pressure_type.ne.'MontgomeryPotential') then
- print *, ' Incorrect choice of config_pressure_type.'
+ write (0,*) ' Incorrect choice of config_pressure_type.'
call mpas_dmpar_abort(dminfo)
endif
if (config_filter_btr_mode.and. &
config_vert_grid_type.ne.'zlevel')then
- print *, 'filter_btr_mode has only been tested with'// &
+ write (0,*) 'filter_btr_mode has only been tested with'// &
' config_vert_grid_type=zlevel.'
call mpas_dmpar_abort(dminfo)
endif
@@ -244,6 +249,7 @@
integer :: i, iEdge, iCell, k
integer :: err1
+ call ocn_setup_sign_and_index_fields(mesh)
call ocn_initialize_advection_rk(mesh, err)
call mpas_ocn_tracer_advection_coefficients(mesh, err1)
err = ior(err, err1)
@@ -254,13 +260,11 @@
call ocn_diagnostic_solve(dt, block % state % time_levs(1) % state, mesh)
call mpas_timer_stop("diagnostic solve", initDiagSolveTimer)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
block % state % time_levs(1) % state % uTransport % array(:,:) &
= block % state % time_levs(1) % state % u % array(:,:) &
+ block % state % time_levs(1) % state % uBolusGM % array(:,:)
- call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(1) % state, mesh)
-
call ocn_compute_mesh_scaling(mesh)
call mpas_rbf_interp_initialize(mesh)
@@ -273,6 +277,16 @@
block % state % time_levs(1) % state % uReconstructMeridional % array &
)
+!TDR
+ call mpas_reconstruct(mesh, mesh % u_src % array, &
+ block % state % time_levs(1) % state % uSrcReconstructX % array, &
+ block % state % time_levs(1) % state % uSrcReconstructY % array, &
+ block % state % time_levs(1) % state % uSrcReconstructZ % array, &
+ block % state % time_levs(1) % state % uSrcReconstructZonal % array, &
+ block % state % time_levs(1) % state % uSrcReconstructMeridional % array &
+ )
+!TDR
+
! initialize velocities and tracers on land to be -1e34
! The reconstructed velocity on land will have values not exactly
! -1e34 due to the interpolation of reconstruction.
@@ -336,7 +350,9 @@
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Initial time ', trim(timeStamp)
- call ocn_write_output_frame(output_obj, output_frame, domain)
+ if (config_write_output_on_startup) then
+ call ocn_write_output_frame(output_obj, output_frame, domain)
+ endif
block_ptr => domain % blocklist
do while(associated(block_ptr))
@@ -355,17 +371,28 @@
currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Doing timestep ', trim(timeStamp)
+
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ call ocn_build_forcing_arrays(currTime, block_ptr % mesh, ierr)
+ block_ptr => block_ptr % next
+ end do
call mpas_timer_start("time integration", .false., timeIntTimer)
call mpas_timestep(domain, itimestep, dt, timeStamp)
call mpas_timer_stop("time integration", timeIntTimer)
! Move time level 2 fields back into time level 1 for next time step
- call mpas_shift_time_levels_state(domain % blocklist % state)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ call mpas_shift_time_levels_state(block_ptr % state)
+ block_ptr => block_ptr % next
+ end do
if (mpas_is_alarm_ringing(clock, outputAlarmID, ierr=ierr)) then
call mpas_reset_clock_alarm(clock, outputAlarmID, ierr=ierr)
- ! output_frame will always be > 1 here unless it was reset after the maximum number of frames per outfile was reached
+ ! output_frame will always be > 1 here unless it was reset after the
+ ! maximum number of frames per outfile was reached.
if(output_frame == 1) then
call mpas_output_state_finalize(output_obj, domain % dminfo)
call mpas_output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp))
@@ -506,8 +533,9 @@
end subroutine mpas_timestep!}}}
- subroutine ocn_init_z_level(domain)!{{{
- ! Initialize zlevel-type variables
+ subroutine ocn_init_vert_coord(domain)!{{{
+ ! Initialize zlevel-type variables and adjust initial conditions for
+ ! partial bottom cells.
use mpas_grid_types
use mpas_configure
@@ -515,43 +543,55 @@
implicit none
type (domain_type), intent(inout) :: domain
+ type (dm_info) :: dminfo
- integer :: i, iCell, iEdge, iVertex, k
+ integer :: i, iCell, iEdge, iVertex, k, nCells, num_tracers
type (block_type), pointer :: block
integer :: iTracer, cell, cell1, cell2
- real (kind=RKIND) :: uhSum, hSum, hEdge1
- real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth, &
- referenceBottomDepthTopOfCell, zstarWeight, hZLevel
+ real (kind=RKIND) :: uhSum, hSum, hEdge1, zMidPBC
+
+ integer, dimension(:), pointer :: maxLevelCell
+ real (kind=RKIND), dimension(:), pointer :: refBottomDepth, &
+ refBottomDepthTopOfCell, zstarWeight, hZLevel, bottomDepth
+ real (kind=RKIND), dimension(:), allocatable :: minBottomDepth, minBottomDepthMid, zMidZLevel
real (kind=RKIND), dimension(:,:), pointer :: h
+ real (kind=RKIND), dimension(:,:,:), pointer :: tracers
integer :: nVertLevels
+ logical :: consistentSSH
! Initialize z-level grid variables from h, read in from input file.
block => domain % blocklist
do while (associated(block))
h => block % state % time_levs(1) % state % h % array
- referenceBottomDepth => block % mesh % referenceBottomDepth % array
- referenceBottomDepthTopOfCell => block % mesh % referenceBottomDepthTopOfCell % array
+ tracers => block % state % time_levs(1) % state % tracers % array
+ refBottomDepth => block % mesh % refBottomDepth % array
+ refBottomDepthTopOfCell => block % mesh % refBottomDepthTopOfCell % array
+ bottomDepth => block % mesh % bottomDepth % array
zstarWeight => block % mesh % zstarWeight % array
hZLevel => block % mesh % hZLevel % array
+ maxLevelCell => block % mesh % maxLevelCell % array
+
+ nCells = block % mesh % nCells
nVertLevels = block % mesh % nVertLevels
+ num_tracers = size(tracers, dim=1)
! mrp 120208 right now hZLevel is in the grid.nc file.
- ! We would like to transition to using referenceBottomDepth
+ ! We would like to transition to using refBottomDepth
! as the defining variable instead, and will transition soon.
! When the transition is done, hZLevel can be removed from
! registry and the following four lines deleted.
- referenceBottomDepth(1) = hZLevel(1)
+ refBottomDepth(1) = hZLevel(1)
do k = 2,nVertLevels
- referenceBottomDepth(k) = referenceBottomDepth(k-1) + hZLevel(k)
+ refBottomDepth(k) = refBottomDepth(k-1) + hZLevel(k)
end do
! TopOfCell needed where zero depth for the very top may be referenced.
- referenceBottomDepthTopOfCell(1) = 0.0
+ refBottomDepthTopOfCell(1) = 0.0
do k = 1,nVertLevels
- referenceBottomDepthTopOfCell(k+1) = referenceBottomDepth(k)
+ refBottomDepthTopOfCell(k+1) = refBottomDepth(k)
end do
! Initialization of zstarWeights. This determines how SSH perturbations
@@ -563,9 +603,7 @@
elseif (config_vert_grid_type.eq.'zstar') then
- do k = 1,nVertLevels
- zstarWeight(k) = hZLevel(k)
- enddo
+ zstarWeight = 1.0
elseif (config_vert_grid_type.eq.'zstarWeights') then
@@ -580,10 +618,125 @@
endif
+ ! Initial condition files (ocean.nc, produced by basin) include a realistic
+ ! bottomDepth variable and h,T,S variables for full thickness cells.
+ ! If running with pbcs, set config_alter_ICs_for_pbc='zlevel_pbcs_on'. Then thin pbc cells
+ ! will be changed, and h,T,S will be altered to match the pbcs.
+ ! If running without pbcs, set config_alter_ICs_for_pbc='zlevel_pbcs_off'. Then
+ ! bottomDepth will be altered so it is full cells everywhere.
+ ! If your input file does not include bottomDepth, the false option will
+ ! initialize bottomDepth correctly for a non-pbc run.
+
+
+ if (.not.config_do_restart) then
+
+ if (config_alter_ICs_for_pbcs.eq.'zlevel_pbcs_on') then
+
+ write (0,'(a)') ' Altering bottomDepth to avoid very thin cells.'
+ write (0,'(a)') ' Altering h and tracer initial conditions to conform with partial bottom cells.'
+
+ allocate(minBottomDepth(nVertLevels),minBottomDepthMid(nVertLevels),zMidZLevel(nVertLevels))
+
+ ! min_pbc_fraction restricts pbcs from being too small.
+ ! A typical value is 10%, so pbcs must occupy at least 10% of the cell thickness.
+ ! If min_pbc_fraction = 0.0, bottomDepth gives the actual depth for that cell.
+ ! If min_pbc_fraction = 1.0, bottomDepth reverts to discrete z-level depths, same
+ ! as partial_bottom_cells = .false.
+
+ do k=1,nVertLevels
+ minBottomDepth(k) = refBottomDepth(k) - (1.0-config_min_pbc_fraction)*hZLevel(k)
+ minBottomDepthMid(k) = 0.5*(minBottomDepth(k) + refBottomDepthTopOfCell(k))
+ zMidZLevel(k) = - 0.5*(refBottomDepth(k) + refBottomDepthTopOfCell(k))
+ enddo
+
+ do iCell=1,nCells
+ k = maxLevelCell(iCell)
+
+ if (bottomDepth(iCell).lt.minBottomDepthMid(k)) then
+ ! Round up to cell above
+ maxLevelCell(iCell) = maxLevelCell(iCell) - 1
+ bottomDepth(iCell) = refBottomDepth(maxLevelCell(iCell))
+ elseif (bottomDepth(iCell).lt.minBottomDepth(k)) then
+ ! Round down cell to the min_pbc_fraction.
+ bottomDepth(iCell) = minBottomDepth(k)
+ endif
+ k = maxLevelCell(iCell)
+
+ ! Alter thickness of bottom level to account for PBC
+ h(k,iCell) = bottomDepth(iCell) - refBottomDepthTopOfCell(k)
+
+ ! Linearly interpolate the initial T&S for new location of bottom cell for PBCs
+ zMidPBC = -0.5*(bottomDepth(iCell) + refBottomDepthTopOfCell(k))
+
+ do iTracer=1,num_tracers
+ tracers(iTracer,k,iCell) = tracers(iTracer,k,iCell) &
+ + (tracers(iTracer,k-1,iCell) - tracers(iTracer,k,iCell)) &
+ /(zMidZLevel(k-1)-zMidZLevel(k)) &
+ *(zMidPBC - zMidZLevel(k))
+ enddo
+
+ enddo
+
+ deallocate(minBottomDepth,zMidZLevel)
+
+ elseif (config_alter_ICs_for_pbcs.eq.'zlevel_pbcs_off') then
+
+ do iCell = 1,nCells
+ bottomDepth(iCell) = refBottomDepth(maxLevelCell(iCell))
+ enddo
+
+ elseif (config_alter_ICs_for_pbcs.eq.'off') then
+ ! No action taken. This is for isopycnal or sigma coordinates,
+ ! or if ICs were already altered upon start-up.
+
+ else
+
+ write (0,*) ' Incorrect choice of config_alter_ICs_for_pbcs.'
+ call mpas_dmpar_abort(dminfo)
+
+ endif
+ endif
+
+ if (config_check_ssh_consistency) then
+ consistentSSH = .true.
+ do iCell = 1,nCells
+ ! Check if abs(ssh)>2m. If so, print warning.
+ if (abs(sum(h(1:maxLevelCell(iCell),iCell))-bottomDepth(iCell))>2.0) then
+ consistentSSH = .false.
+#ifdef MPAS_DEBUG
+ write (0,'(a)') ' Warning: abs(sum(h)-bottomDepth)>2m. Most likely, initial h does not match bottomDepth.'
+ write (0,*) ' iCell, K=maxLevelCell(iCell), bottomDepth(iCell),sum(h),bottomDepth,hZLevel(K),h(K): ', &
+ iCell, maxLevelCell(iCell), bottomDepth(iCell),sum(h(1:maxLevelCell(iCell),iCell)),bottomDepth(iCell), &
+ hZLevel(maxLevelCell(iCell)), h(maxLevelCell(iCell),iCell)
+#endif
+ endif
+ enddo
+
+ if (.not. consistentSSH) then
+ write(0,*) 'Warning: SSH is not consistent. Most likely, initial h does not match bottomDepth.'
+ end if
+ endif
+
+ if (config_check_zlevel_consistency) then
+ do iCell = 1,nCells
+ ! Check that bottomDepth and maxLevelCell match. Some older grids do not have the bottomDepth variable.
+ if (bottomDepth(iCell) > refBottomDepth(maxLevelCell(iCell)).or. &
+ bottomDepth(iCell) < refBottomDepthTopOfCell(maxLevelCell(iCell))) then
+ write (0,'(a)') ' fatal error: bottomDepth and maxLevelCell do not match:'
+ write (0,'(a,2i5,10f10.2)') ' iCell, maxLevelCell(iCell), bottomDepth(iCell): ', &
+ iCell, maxLevelCell(iCell), bottomDepth(iCell)
+ write (0,'(a,10f10.2)') ' refBottomDepth(maxLevelCell(iCell)), refBottomDepthTopOfCell(maxLevelCell(iCell)): ', &
+ refBottomDepth(maxLevelCell(iCell)), refBottomDepthTopOfCell(maxLevelCell(iCell))
+ call mpas_dmpar_abort(dminfo)
+ endif
+
+ enddo
+ endif
+
block => block % next
end do
- end subroutine ocn_init_z_level!}}}
+ end subroutine ocn_init_vert_coord!}}}
subroutine ocn_init_split_timestep(domain)!{{{
! Initialize splitting variables
@@ -600,7 +753,7 @@
integer :: iTracer, cell, cell1, cell2
real (kind=RKIND) :: uhSum, hSum, hEdge1
- real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
+ real (kind=RKIND), dimension(:), pointer :: refBottomDepth
real (kind=RKIND), dimension(:,:), pointer :: h
integer :: nVertLevels
@@ -610,7 +763,7 @@
do while (associated(block))
h => block % state % time_levs(1) % state % h % array
- referenceBottomDepth => block % mesh % referenceBottomDepth % array
+ refBottomDepth => block % mesh % refBottomDepth % array
nVertLevels = block % mesh % nVertLevels
! Compute barotropic velocity at first timestep
@@ -626,7 +779,7 @@
if (config_filter_btr_mode) then
do iCell=1,block % mesh % nCells
block % state % time_levs(1) % state % h % array(1,iCell) &
- = block % mesh % referenceBottomDepth % array(1)
+ = block % mesh % refBottomDepth % array(1)
enddo
endif
@@ -710,7 +863,7 @@
real (kind=RKIND) :: hSum, sumZstarWeights
real (kind=RKIND), dimension(:), pointer :: hZLevel, zstarWeight, &
- referenceBottomDepth
+ refBottomDepth
real (kind=RKIND), dimension(:,:), pointer :: h
! Initialize z-level grid variables from h, read in from input file.
@@ -722,7 +875,7 @@
hZLevel => block % mesh % hZLevel % array
maxLevelCell => block % mesh % maxLevelCell % array
zstarWeight => block % mesh % zstarWeight % array
- referenceBottomDepth => block % mesh % referenceBottomDepth % array
+ refBottomDepth => block % mesh % refBottomDepth % array
do iCell=1,block % mesh % nCells
! Compute the total column thickness, hSum, and the sum of zstar weights.
@@ -737,7 +890,7 @@
! where zeta is SSH and W_k are weights
do k = 1,maxLevelCell(iCell)
h(k,iCell) = hZLevel(k) &
- + (hSum - referenceBottomDepth(maxLevelCell(iCell))) &
+ + (hSum - refBottomDepth(maxLevelCell(iCell))) &
* zstarWeight(k)/sumZstarWeights
enddo
@@ -762,10 +915,6 @@
integer :: i, iCell, iEdge, iVertex, k
type (block_type), pointer :: block
- real (kind=RKIND), dimension(:,:), pointer :: h, u, u_src, rho
- real (kind=RKIND), dimension(:,:,:), pointer :: tracers
- real (kind=RKIND) :: delta_rho, pi, latCenter, lonCenter, dist
- real (kind=RKIND) :: centerx, centery
integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree
integer, dimension(:), pointer :: &
@@ -935,6 +1084,72 @@
end subroutine ocn_compute_mesh_scaling!}}}
+ subroutine ocn_setup_sign_and_index_fields(mesh)!{{{
+
+ type (mesh_type), intent(inout) :: mesh
+
+ integer, dimension(:), pointer :: nEdgesOnCell
+ integer, dimension(:,:), pointer :: edgesOnCell, edgesOnVertex, cellsOnVertex, cellsOnEdge, verticesOnCell, verticesOnEdge
+ integer, dimension(:,:), pointer :: edgeSignOnCell, edgeSignOnVertex, kiteIndexOnCell
+
+ integer :: nCells, nEdges, nVertices, vertexDegree
+ integer :: iCell, iEdge, iVertex, i, j, k
+
+ nCells = mesh % nCells
+ nEdges = mesh % nEdges
+ nVertices = mesh % nVertices
+ vertexDegree = mesh % vertexDegree
+
+ nEdgesOnCell => mesh % nEdgesOnCell % array
+ edgesOnCell => mesh % edgeSOnCell % array
+ edgesOnVertex => mesh % edgesOnVertex % array
+ cellsOnVertex => mesh % cellsOnVertex % array
+ cellsOnEdge => mesh % cellsOnEdge % array
+ verticesOnCell => mesh % verticesOnCell % array
+ verticesOnEdge => mesh % verticesOnEdge % array
+ edgeSignOnCell => mesh % edgeSignOnCell % array
+ edgeSignOnVertex => mesh % edgeSignOnVertex % array
+ kiteIndexOnCell => mesh % kiteIndexOnCell % array
+
+ edgeSignOnCell = 0.0_RKIND
+ edgeSignOnVertex = 0.0_RKIND
+ kiteIndexOnCell = 0.0_RKIND
+
+ do iCell = 1, nCells
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ iVertex = verticesOnCell(i, iCell)
+
+ ! Vector points from cell 1 to cell 2
+ if(iCell == cellsOnEdge(1, iEdge)) then
+ edgeSignOnCell(i, iCell) = -1
+ else
+ edgeSignOnCell(i, iCell) = 1
+ end if
+
+ do j = 1, vertexDegree
+ if(cellsOnVertex(j, iVertex) == iCell) then
+ kiteIndexOnCell(i, iCell) = j
+ end if
+ end do
+ end do
+ end do
+
+ do iVertex = 1, nVertices
+ do i = 1, vertexDegree
+ iEdge = edgesOnVertex(i, iVertex)
+
+ ! Vector points from vertex 1 to vertex 2
+ if(iVertex == verticesOnEdge(1, iEdge)) then
+ edgeSignOnVertex(i, iVertex) = -1
+ else
+ edgeSignOnVertex(i, iVertex) = 1
+ end if
+ end do
+ end do
+
+ end subroutine ocn_setup_sign_and_index_fields!}}}
+
end module mpas_core
! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -70,7 +70,9 @@
ocn_diagnostic_solve, &
ocn_wtop, &
ocn_fuperp, &
- ocn_tendency_init
+ ocn_tendency_init, &
+ ocn_filter_btr_mode_u, &
+ ocn_filter_btr_mode_tend_u
!--------------------------------------------------------------------
!
@@ -170,7 +172,7 @@
real (kind=RKIND), dimension(:,:), pointer :: &
h_edge, h, u, rho, zMid, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
+ tend_u, circulation, vorticity, viscosity, ke, ke_edge, Vor_edge, &
MontPot, wTop, divergence, vertViscTopOfEdge
real (kind=RKIND), dimension(:,:), pointer :: u_src
@@ -184,6 +186,7 @@
wTop => s % wTop % array
zMid => s % zMid % array
h_edge => s % h_edge % array
+ viscosity => s % viscosity % array
vorticity => s % vorticity % array
divergence => s % divergence % array
ke => s % ke % array
@@ -235,7 +238,7 @@
! strictly only valid for config_h_mom_eddy_visc2 == constant
!
call mpas_timer_start("hmix", .false., velHmixTimer)
- call ocn_vel_hmix_tend(grid, divergence, vorticity, tend_u, err)
+ call ocn_vel_hmix_tend(grid, divergence, vorticity, viscosity, tend_u, err)
call mpas_timer_stop("hmix", velHmixTimer)
!
@@ -402,15 +405,14 @@
maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
maxLevelVertexBot
integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
- verticesOnEdge, edgesOnEdge, edgesOnVertex,boundaryCell
+ verticesOnEdge, edgesOnEdge, edgesOnVertex,boundaryCell, kiteIndexOnCell, verticesOnCell, edgeSignOnVertex, edgeSignOnCell, edgesOnCell
real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2, coef_3rd_order, r_tmp, invAreaCell1, invAreaCell2, invAreaTri1, invAreaTri2, invLength, h_vertex
real (kind=RKIND), dimension(:), allocatable:: pTop
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, dvEdge, dcEdge, areaCell, areaTriangle, &
- referenceBottomDepth, ssh
+ bottomDepth, fVertex, dvEdge, dcEdge, areaCell, areaTriangle, ssh
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure,&
circulation, vorticity, ke, ke_edge, MontPot, wTop, zMid, &
@@ -433,11 +435,11 @@
kev => s % kev % array
kevc => s % kevc % array
ke_edge => s % ke_edge % array
- Vor_edge => s % Vor_edge % array
- Vor_vertex => s % Vor_vertex % array
- Vor_cell => s % Vor_cell % array
- gradVor_n => s % gradVor_n % array
- gradVor_t => s % gradVor_t % array
+ Vor_edge => s % Vor_edge % array
+ Vor_vertex => s % Vor_vertex % array
+ Vor_cell => s % Vor_cell % array
+ gradVor_n => s % gradVor_n % array
+ gradVor_t => s % gradVor_t % array
rho => s % rho % array
MontPot => s % MontPot % array
pressure => s % pressure % array
@@ -452,20 +454,22 @@
verticesOnEdge => grid % verticesOnEdge % array
nEdgesOnCell => grid % nEdgesOnCell % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
+ edgesOnCell => grid % edgesOnCell % array
edgesOnEdge => grid % edgesOnEdge % array
edgesOnVertex => grid % edgesOnVertex % array
dcEdge => grid % dcEdge % array
dvEdge => grid % dvEdge % array
areaCell => grid % areaCell % array
areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
+ bottomDepth => grid % bottomDepth % array
fVertex => grid % fVertex % array
- referenceBottomDepth => grid % referenceBottomDepth % array
deriv_two => grid % deriv_two % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelEdgeBot => grid % maxLevelEdgeBot % array
maxLevelVertexBot => grid % maxLevelVertexBot % array
+ kiteIndexOnCell => grid % kiteIndexOnCell % array
+ verticesOnCell => grid % verticesOnCell % array
nCells = grid % nCells
nEdges = grid % nEdges
@@ -475,7 +479,10 @@
boundaryCell => grid % boundaryCell % array
+ edgeSignOnVertex => grid % edgeSignOnVertex % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+
!
! Compute height on cell edges at velocity locations
! Namelist options control the order of accuracy of the reconstructed h_edge value
@@ -576,40 +583,33 @@
divergence(:,:) = 0.0
ke(:,:) = 0.0
v(:,:) = 0.0
- do iEdge=1,nEdges
- vertex1 = verticesOnEdge(1,iEdge)
- vertex2 = verticesOnEdge(2,iEdge)
+ do iVertex = 1, nVertices
+ invAreaTri1 = 1.0 / areaTriangle(iVertex)
+ do i = 1, vertexDegree
+ iEdge = edgesOnVertex(i, iVertex)
+ do k = 1, maxLevelVertexBot(iVertex)
+ r_tmp = dcEdge(iEdge) * u(k, iEdge)
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ circulation(k, iVertex) = circulation(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp
+ vorticity(k, iVertex) = vorticity(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp * invAreaTri1
+ end do
+ end do
+ end do
- invAreaTri1 = 1.0 / areaTriangle(vertex1)
- invAreaTri2 = 1.0 / areaTriangle(vertex2)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ do k = 1, maxLevelCell(iCell)
+ r_tmp = dvEdge(iEdge) * u(k, iEdge) * invAreaCell1
- !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
- invAreaCell1 = 1.0 / max(areaCell(cell1), 1.0)
- invAreaCell2 = 1.0 / max(areaCell(cell2), 1.0)
-
- do k=1,maxLevelEdgeBot(iEdge)
- ! Compute circulation and relative vorticity at each vertex
- r_tmp = dcEdge(iEdge) * u(k,iEdge)
- circulation(k,vertex1) = circulation(k,vertex1) - r_tmp
- circulation(k,vertex2) = circulation(k,vertex2) + r_tmp
-
- vorticity(k, vertex1) = vorticity(k, vertex1) - r_tmp * invAreaTri1
- vorticity(k, vertex2) = vorticity(k, vertex2) + r_tmp * invAreaTri2
-
- ! Compute the divergence at each cell center
- r_tmp = dvEdge(iEdge) * u(k, iEdge)
- divergence(k,cell1) = divergence(k,cell1) + r_tmp * invAreaCell1
- divergence(k,cell2) = divergence(k,cell2) - r_tmp * invAreaCell2
-
- ! Compute kinetic energy in each cell
- r_tmp = r_tmp * dcEdge(iEdge) * u(k,iEdge)
- ke(k,cell1) = ke(k,cell1) + 0.25 * r_tmp * invAreaCell1
- ke(k,cell2) = ke(k,cell2) + 0.25 * r_tmp * invAreaCell2
+ divergence(k, iCell) = divergence(k, iCell) - edgeSignOnCell(i, iCell) * r_tmp
+ ke(k, iCell) = ke(k, iCell) + 0.25 * r_tmp * dcEdge(iEdge) * u(k,iEdge)
+ end do
end do
+ end do
+ do iEdge=1,nEdges
! Compute v (tangential) velocities
do i=1,nEdgesOnEdge(iEdge)
eoe = edgesOnEdge(i,iEdge)
@@ -626,29 +626,27 @@
! Compute kinetic energy in each vertex
!
kev(:,:) = 0.0; kevc(:,:) = 0.0
- do iEdge=1,nEdges*ke_vertex_flag
- do k=1,nVertLevels
- r_tmp = dcEdge(iEdge) * dvEdge(iEdge) * u(k, iEdge)**2
- kev(k,verticesOnEdge(1,iEdge)) = kev(k,verticesOnEdge(1,iEdge)) + r_tmp
- kev(k,verticesOnEdge(2,iEdge)) = kev(k,verticesOnEdge(2,iEdge)) + r_tmp
- end do
+ do iVertex = 1, nVertices*ke_vertex_flag
+ do i = 1, vertexDegree
+ iEdge = edgesOnVertex(i, iVertex)
+ r_tmp = dcEdge(iEdge) * dvEdge(iEdge) * 0.25 / areaTriangle(iVertex)
+ do k = 1, nVertLevels
+ kev(k, iVertex) = kev(k, iVertex) + r_tmp * u(k, iEdge)**2
+ end do
+ end do
end do
- do iVertex = 1,nVertices*ke_vertex_flag
- do k=1,nVertLevels
- kev(k,iVertex) = kev(k,iVertex) / areaTriangle(iVertex) * 0.25
- enddo
- enddo
- do iVertex = 1, nVertices*ke_vertex_flag
- do i=1,grid % vertexDegree
- iCell = cellsOnVertex(i,iVertex)
- !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
- invAreaCell1 = 1.0 / max(areaCell(iCell), 1.0)
- do k=1,nVertLevels
- kevc(k,iCell) = kevc(k,iCell) + kiteAreasOnVertex(i, iVertex) * kev(k, iVertex) * invAreaCell1
- enddo
- enddo
- enddo
+ do iCell = 1, nCells*ke_vertex_flag
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ j = kiteIndexOnCell(i, iCell)
+ iVertex = verticesOnCell(i, iCell)
+ do k = 1, nVertLevels
+ kevc(k, iCell) = kevc(k, iCell) + kiteAreasOnVertex(j, iVertex) * kev(k, iVertex) * invAreaCell1
+ end do
+ end do
+ end do
+
!
! Compute kinetic energy in each cell by blending ke and kevc
!
@@ -690,30 +688,26 @@
Vor_cell(:,:) = 0.0
Vor_edge(:,:) = 0.0
- do iVertex = 1,nVertices
- do i=1,vertexDegree
- iCell = cellsOnVertex(i,iVertex)
- iEdge = edgesOnVertex(i,iVertex)
+ do iEdge = 1, nEdges
+ vertex1 = verticesOnEdge(1, iEdge)
+ vertex2 = verticesOnEdge(2, iEdge)
+ do k = 1, maxLevelEdgeBot(iEdge)
+ Vor_edge(k, iEdge) = 0.5 * (Vor_vertex(k, vertex1) + Vor_vertex(k, vertex2))
+ end do
+ end do
- !dwj: 02/23/12 arraCell(nCells+1) is still 0, this is a temporary fix
- invAreaCell1 = 1.0 / max(areaCell(iCell), 1.0)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
- ! Compute pv at cell centers
- ! ( this computes Vor_cell for all real cells and distance-1 ghost cells )
- do k = 1,maxLevelCell(iCell)
- Vor_cell(k,iCell) = Vor_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * Vor_vertex(k, iVertex) * invAreaCell1
- enddo
+ do i = 1, nEdgesOnCell(iCell)
+ j = kiteIndexOnCell(i, iCell)
+ iVertex = verticesOnCell(i, iCell)
+ do k = 1, maxLevelCell(iCell)
+ Vor_cell(k, iCell) = Vor_cell(k, iCell) + kiteAreasOnVertex(j, iVertex) * Vor_vertex(k, iVertex) * invAreaCell1
+ end do
+ end do
+ end do
- ! Compute pv at the edges
- ! ( this computes Vor_edge at all edges bounding real cells )
- do k=1,maxLevelEdgeBot(iEdge)
- Vor_edge(k,iEdge) = Vor_edge(k,iEdge) + 0.5 * Vor_vertex(k,iVertex)
- enddo
- enddo
- enddo
-
-! gradVor_n(:,:) = 0.0
-! gradVor_t(:,:) = 0.0
do iEdge = 1,nEdges
cell1 = cellsOnEdge(1, iEdge)
cell2 = cellsOnEdge(2, iEdge)
@@ -777,9 +771,9 @@
pTop(1) = 0.0
! For isopycnal mode, p is the Montgomery Potential.
! At top layer it is g*SSH, where SSH may be off by a
- ! constant (ie, h_s can be relative to top or bottom)
+ ! constant (ie, bottomDepth can be relative to top or bottom)
MontPot(1,iCell) = gravity &
- * (h_s(iCell) + sum(h(1:nVertLevels,iCell)))
+ * (bottomDepth(iCell) + sum(h(1:nVertLevels,iCell)))
do k=2,nVertLevels
pTop(k) = pTop(k-1) + rho(k-1,iCell)*gravity* h(k-1,iCell)
@@ -808,10 +802,10 @@
! Compute zMid, the z-coordinate of the middle of the layer.
! This is used for the rho g grad z momentum term.
- ! Note the negative sign, since referenceBottomDepth is positive
+ ! Note the negative sign, since bottomDepth is positive
! and z-coordinates are negative below the surface.
k = maxLevelCell(iCell)
- zMid(k:nVertLevels,iCell) = -referenceBottomDepth(k) + 0.5*h(k,iCell)
+ zMid(k:nVertLevels,iCell) = -bottomDepth(iCell) + 0.5*h(k,iCell)
do k=maxLevelCell(iCell)-1, 1, -1
zMid(k,iCell) = zMid(k+1,iCell) &
@@ -828,13 +822,11 @@
!
do iCell=1,nCells
! Start at the bottom where we know the depth, and go up.
- ! The bottom depth for this cell is
- ! referenceBottomDepth(maxLevelCell(iCell)).
- ! Note the negative sign, since referenceBottomDepth is positive
+ ! The bottom depth for this cell is bottomDepth(iCell).
+ ! Note the negative sign, since bottomDepth is positive
! and z-coordinates are negative below the surface.
- ssh(iCell) = -referenceBottomDepth(maxLevelCell(iCell)) &
- + sum(h(1:maxLevelCell(iCell),iCell))
+ ssh(iCell) = - bottomDepth(iCell) + sum(h(1:maxLevelCell(iCell),iCell))
end do
@@ -862,39 +854,68 @@
!> This routine computes the vertical velocity in the top layer for the ocean
!
!-----------------------------------------------------------------------
- subroutine ocn_wtop(s1,s2, grid)!{{{
- implicit none
+ subroutine ocn_wtop(grid,h,h_edge,u,wTop, err)!{{{
- type (state_type), intent(inout) :: s1 !< Input/Output: State 1 information
- type (state_type), intent(inout) :: s2 !< Input/Output: State 2 information
- type (mesh_type), intent(in) :: grid !< Input: Grid information
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+ type (mesh_type), intent(in) :: &
+ grid !< Input: grid information
+
+ real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h !< Input: thickness
+
+ real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h_edge !< Input: h interpolated to an edge
+
+ real (kind=RKIND), dimension(:,:), intent(in) :: &
+ u !< Input: velocity
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ real (kind=RKIND), dimension(:,:), intent(out) :: &
+ wTop !< Output: vertical transport at top edge
+
+ integer, intent(out) :: err !< Output: error flag
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum
+ real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum, invAreaCell
integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, zstarWeight
- real (kind=RKIND), dimension(:,:), pointer :: uTransport,h,wTop, h_edge
- real (kind=RKIND), dimension(:,:), allocatable:: div_hu
- real (kind=RKIND), dimension(:), allocatable:: div_hu_btr, h_tend_col
+ dvEdge, areaCell, zstarWeight
+ real (kind=RKIND), dimension(:), allocatable:: div_hu, h_tend_col
+ real (kind=RKIND) :: div_hu_btr
integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &
- boundaryEdge, boundaryCell
+ boundaryEdge, boundaryCell, edgeSignOnCell
integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
maxLevelVertexBot, maxLevelVertexTop
- h => s1 % h % array
- h_edge => s1 % h_edge % array
- uTransport => s2 % uTransport % array
- wTop => s2 % wTop % array
+ err = 0
+ nEdgesOnCell => grid % nEdgesOnCell % array
areaCell => grid % areaCell % array
cellsOnEdge => grid % cellsOnEdge % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeBot => grid % maxLevelEdgeBot % array
dvEdge => grid % dvEdge % array
@@ -904,64 +925,57 @@
nEdges = grid % nEdges
nVertLevels = grid % nVertLevels
- allocate(div_hu(nVertLevels,nCells+1), div_hu_btr(nCells+1), &
- h_tend_col(nVertLevels))
+ if (config_vert_grid_type.eq.'isopycnal') then
+ ! set vertical velocity to zero in isopycnal case
+ wTop=0.0_RKIND
+ return
+ end if
+
+ allocate(div_hu(nVertLevels), h_tend_col(nVertLevels))
+
!
! Compute div(h^{edge} u) for each cell
! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
!
- div_hu(:,:) = 0.0
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeBot(iEdge)
- flux = uTransport(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
- div_hu(k,cell1) = div_hu(k,cell1) + flux
- div_hu(k,cell2) = div_hu(k,cell2) - flux
- end do
- end do
do iCell=1,nCells
- div_hu_btr(iCell) = 0.0
- do k=1,maxLevelCell(iCell)
- div_hu(k,iCell) = div_hu(k,iCell) / areaCell(iCell)
- div_hu_btr(iCell) = div_hu_btr(iCell) + div_hu(k,iCell)
- end do
- end do
+ div_hu(:) = 0.0_RKIND
+ div_hu_btr = 0.0_RKIND
+ hSum = 0.0_RKIND
+ invAreaCell = 1.0_RKIND / areaCell(iCell)
- !
- ! vertical velocity through layer interface
- !
- !dwj: 10/25/2011 - Need to explore isopycnal vs zlevel flags
- if (config_vert_grid_type.eq.'isopycnal') then
- ! set vertical velocity to zero in isopycnal case
- wTop=0.0
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
- else ! zlevel or zstar type vertical grid
+ do k = 1, maxLevelEdgeBot(iEdge)
+ flux = u(k, iEdge) * dvEdge(iEdge) * h_edge(k, iEdge)
+ flux = edgeSignOnCell(i, iCell) * flux * invAreaCell
+ div_hu(k) = div_hu(k) - flux
+ div_hu_btr = div_hu_btr - flux
+ end do
+ end do
- do iCell=1,nCells
+ do k = 1, maxLevelCell(iCell)
+ h_tend_col(k) = - zstarWeight(k) * h(k, iCell) * div_hu_btr
+ hSum = hSum + zstarWeight(k) * h(k, iCell)
+ end do
- hSum = 0.0
- do k=1,maxLevelCell(iCell)
- h_tend_col(k) = - zstarWeight(k)*h(k,iCell)*div_hu_btr(iCell)
- hSum = hSum + zstarWeight(k)*h(k,iCell)
- end do
+ if(hSum > 0.0) then
h_tend_col = h_tend_col / hSum
+ end if
- ! Vertical velocity through layer interface at top and
- ! bottom is zero.
- wTop(1,iCell) = 0.0
- wTop(maxLevelCell(iCell)+1,iCell) = 0.0
- do k=maxLevelCell(iCell),2,-1
- wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
- end do
+ ! Vertical velocity through layer interface at top and
+ ! bottom is zero.
+ wTop(1,iCell) = 0.0_RKIND
+ wTop(maxLevelCell(iCell)+1,iCell) = 0.0_RKIND
+ do k=maxLevelCell(iCell),2,-1
+ wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k) - h_tend_col(k)
end do
+ end do
- endif
+ deallocate(div_hu, h_tend_col)
- deallocate(div_hu, div_hu_btr, h_tend_col)
-
end subroutine ocn_wtop!}}}
!***********************************************************************
@@ -1031,9 +1045,116 @@
end subroutine ocn_fuperp!}}}
+!***********************************************************************
+!
+! routine ocn_filter_btr_mode_u
+!
+!> \brief filters barotropic mode out of the velocity variable.
+!> \author Mark Petersen
+!> \date 23 September 2011
+!> \version SVN:$Id$
+!> \details
+!> This routine filters barotropic mode out of the velocity variable.
+!
+!-----------------------------------------------------------------------
+ subroutine ocn_filter_btr_mode_u(s, grid)!{{{
+ implicit none
+ type (state_type), intent(inout) :: s
+ type (mesh_type), intent(in) :: grid
+
+ integer :: iEdge, k, nEdges
+ real (kind=RKIND) :: vertSum, uhSum, hSum
+ real (kind=RKIND), dimension(:,:), pointer :: h_edge, u
+ integer, dimension(:), pointer :: maxLevelEdgeTop
+
+ call mpas_timer_start("ocn_filter_btr_mode_u")
+
+ u => s % u % array
+ h_edge => s % h_edge % array
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ nEdges = grid % nEdges
+
+ do iEdge=1,nEdges
+
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = h_edge(1,iEdge) * u(1,iEdge)
+ hSum = h_edge(1,iEdge)
+
+ do k=2,maxLevelEdgeTop(iEdge)
+ uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
+ enddo
+
+ vertSum = uhSum/hSum
+ do k=1,maxLevelEdgeTop(iEdge)
+ u(k,iEdge) = u(k,iEdge) - vertSum
+ enddo
+ enddo ! iEdge
+
+ call mpas_timer_stop("ocn_filter_btr_mode_u")
+
+ end subroutine ocn_filter_btr_mode_u!}}}
+
!***********************************************************************
!
+! routine ocn_filter_btr_mode_tend_u
+!
+!> \brief ocn_filters barotropic mode out of the u tendency
+!> \author Mark Petersen
+!> \date 23 September 2011
+!> \version SVN:$Id$
+!> \details
+!> This routine filters barotropic mode out of the u tendency.
+!
+!-----------------------------------------------------------------------
+ subroutine ocn_filter_btr_mode_tend_u(tend, s, grid)!{{{
+ implicit none
+
+ type (tend_type), intent(inout) :: tend
+ type (state_type), intent(in) :: s
+ type (mesh_type), intent(in) :: grid
+
+ integer :: iEdge, k, nEdges
+ real (kind=RKIND) :: vertSum, uhSum, hSum
+ real (kind=RKIND), dimension(:,:), pointer :: h_edge, tend_u
+
+ integer, dimension(:), pointer :: maxLevelEdgeTop
+
+ call mpas_timer_start("ocn_filter_btr_mode_tend_u")
+
+ tend_u => tend % u % array
+ h_edge => s % h_edge % array
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ nEdges = grid % nEdges
+
+ do iEdge=1,nEdges
+
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = h_edge(1,iEdge) * tend_u(1,iEdge)
+ hSum = h_edge(1,iEdge)
+
+ do k=2,maxLevelEdgeTop(iEdge)
+ uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
+ enddo
+
+ vertSum = uhSum/hSum
+ do k=1,maxLevelEdgeTop(iEdge)
+ tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
+ enddo
+ enddo ! iEdge
+
+ call mpas_timer_stop("ocn_filter_btr_mode_tend_u")
+
+ end subroutine ocn_filter_btr_mode_tend_u!}}}
+
+!***********************************************************************
+!
! routine ocn_tendency_init
!
!> \brief Initializes flags used within tendency routines.
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_test_cases.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_test_cases.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_test_cases.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -342,10 +342,10 @@
do iCell=1,grid % nCells
if (grid % lonCell % array(iCell) < 0.0) grid % lonCell % array(iCell) = grid % lonCell % array(iCell) + 2.0 * pii
r = sqrt(min(rr**2.0, (grid % lonCell % array(iCell) - lambda_c)**2.0 + (grid % latCell % array(iCell) - theta_c)**2.0))
- grid % h_s % array(iCell) = hs0 * (1.0 - r/rr)
+ grid % bottomDepth % array(iCell) = hs0 * (1.0 - r/rr)
end do
! output about mountain
-print *, 'h_s',minval(grid % h_s % array),sum(grid % h_s % array)/grid % nCells, maxval(grid % h_s % array)
+print *, 'bottomDepth',minval(grid % bottomDepth % array),sum(grid % bottomDepth % array)/grid % nCells, maxval(grid % bottomDepth % array)
!
! Initialize tracer fields
@@ -372,7 +372,7 @@
)**2.0 &
) / &
gravity
- state % h % array(1,iCell) = state % h % array(1,iCell) - grid % h_s % array(iCell)
+ state % h % array(1,iCell) = state % h % array(1,iCell) - grid % bottomDepth % array(iCell)
end do
end subroutine sw_test_case_5
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -101,13 +101,13 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, cell1, cell2, nVertLevels, k
+ integer :: iEdge, nEdges, cell1, cell2, nVertLevels, k, i
integer :: iCell, nCells
- integer, dimension(:), pointer :: maxLevelEdgeBot, MaxLevelCell
- integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:), pointer :: maxLevelEdgeBot, MaxLevelCell, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnCell, edgeSignOnCell
- real (kind=RKIND) :: flux, invAreaCell1, invAreaCell2
+ real (kind=RKIND) :: flux, invAreaCell, invAreaCell1, invAreaCell2
real (kind=RKIND), dimension(:), pointer :: dvEdge, areaCell
!-----------------------------------------------------------------
@@ -130,20 +130,20 @@
dvEdge => grid % dvEdge % array
areaCell => grid % areaCell % array
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeBot(iEdge)
- flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
- tend(k,cell1) = tend(k,cell1) - flux
- tend(k,cell2) = tend(k,cell2) + flux
- end do
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+
+ do iCell = 1, nCells
+ invAreaCell = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ do k = 1, maxLevelEdgeBot(iEdge)
+ flux = u(k, iEdge) * dvEdge(iEdge) * h_edge(k, iEdge)
+ tend(k, iCell) = tend(k, iCell) + edgeSignOnCell(i, iCell) * flux * invAreaCell
+ end do
+ end do
end do
- do iCell=1,nCells
- do k=1,maxLevelCell(iCell)
- tend(k,iCell) = tend(k,iCell) / areaCell(iCell)
- end do
- end do
!--------------------------------------------------------------------
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_average.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_average.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_average.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -116,14 +116,16 @@
acc_u => state % acc_u % array
acc_uVar => state % acc_uVar % array
- acc_ssh = acc_ssh / nAccumulate
- acc_sshVar = acc_sshVar / nAccumulate
- acc_uReconstructZonal = acc_uReconstructZonal / nAccumulate
- acc_uReconstructMeridional = acc_uReconstructMeridional / nAccumulate
- acc_uReconstructZonalVar = acc_uReconstructZonalVar / nAccumulate
- acc_uReconstructMeridionalVar = acc_uReconstructMeridionalVar / nAccumulate
- acc_u = acc_u / nAccumulate
- acc_uVar = acc_uVar / nAccumulate
+ if(nAccumulate > 0) then
+ acc_ssh = acc_ssh / nAccumulate
+ acc_sshVar = acc_sshVar / nAccumulate
+ acc_uReconstructZonal = acc_uReconstructZonal / nAccumulate
+ acc_uReconstructMeridional = acc_uReconstructMeridional / nAccumulate
+ acc_uReconstructZonalVar = acc_uReconstructZonalVar / nAccumulate
+ acc_uReconstructMeridionalVar = acc_uReconstructMeridionalVar / nAccumulate
+ acc_u = acc_u / nAccumulate
+ acc_uVar = acc_uVar / nAccumulate
+ end if
end subroutine ocn_time_average_normalize!}}}
end module ocn_time_average
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -78,10 +78,8 @@
integer :: iCell, k, i, err
type (block_type), pointer :: block
- type (state_type), target :: provis
- type (state_type), pointer :: provis_ptr
- integer :: rk_step, iEdge, cell1, cell2
+ integer :: rk_step
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
@@ -95,15 +93,8 @@
real (kind=RKIND), dimension(:), allocatable:: A,C,uTemp
real (kind=RKIND), dimension(:,:), allocatable:: tracersTemp
+ call mpas_setup_provis_states(domain % blocklist)
- block => domain % blocklist
- call mpas_allocate_state(block, provis, &
- block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
- block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels )
-
- provis_ptr => provis
- call mpas_create_state_links(provis_ptr)
-
!
! Initialize time_levs(2) with state at current time
! Initialize first RK state
@@ -112,19 +103,18 @@
!
block => domain % blocklist
do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ do iCell=1,block % mesh % nCells ! couple tracers to h
+ do k=1,block % mesh % maxLevelCell % array(iCell)
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell)
+ end do
+ end do
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
- do iCell=1,block % mesh % nCells ! couple tracers to h
- do k=1,block % mesh % maxLevelCell % array(iCell)
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell)
- end do
- end do
+ call mpas_copy_state(block % provis, block % state % time_levs(1) % state)
- call mpas_copy_state(provis, block % state % time_levs(1) % state)
-
- block => block % next
+ block => block % next
end do
rk_weights(1) = dt/6.
@@ -146,10 +136,10 @@
! --- update halos for diagnostic variables
call mpas_timer_start("RK4-diagnostic halo update")
- call mpas_dmpar_exch_halo_field(provis % Vor_edge)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % Vor_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field(provis % divergence)
- call mpas_dmpar_exch_halo_field(provis % vorticity)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % vorticity)
end if
call mpas_timer_stop("RK4-diagnostic halo update")
@@ -159,22 +149,24 @@
block => domain % blocklist
do while (associated(block))
- ! mrp 111206 put ocn_wtop call at top for ALE
- call ocn_wtop(provis, provis, block % mesh)
-
if (.not.config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, provis, block % diagnostics, err)
+ call ocn_vmix_coefs(block % mesh, block % provis, block % diagnostics, err)
end if
- call ocn_tend_h(block % tend, provis, block % mesh)
- call ocn_tend_u(block % tend, provis, block % diagnostics, block % mesh)
- ! mrp 110718 filter btr mode out of u_tend
- ! still got h perturbations with just this alone. Try to set uBtr=0 after full u computation
+ ! advection of u uses u, while advection of h and tracers use uTransport.
+ call ocn_wtop(block % mesh, block % provis % h % array, block % provis % h_edge % array, &
+ block % provis % u % array, block % provis % wTop % array, err)
+ call ocn_tend_u(block % tend, block % provis, block % diagnostics, block % mesh)
+
+ call ocn_wtop(block % mesh, block % provis % h % array, block % provis % h_edge % array, &
+ block % provis % uTransport % array, block % provis % wTop % array, err)
+ call ocn_tend_h(block % tend, block % provis, block % mesh)
+
if (config_rk_filter_btr_mode) then
- call filter_btr_mode_tend_u(block % tend, provis, block % diagnostics, block % mesh)
+ call ocn_filter_btr_mode_tend_u(block % tend, block % provis, block % mesh)
endif
- call ocn_tend_scalar(block % tend, provis, block % diagnostics, block % mesh, dt)
+ call ocn_tend_scalar(block % tend, block % provis, block % diagnostics, block % mesh, dt)
block => block % next
end do
call mpas_timer_stop("RK4-tendency computations")
@@ -194,47 +186,44 @@
block => domain % blocklist
do while (associated(block))
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
do iCell=1,block % mesh % nCells
do k=1,block % mesh % maxLevelCell % array(iCell)
- provis % tracers % array(:,k,iCell) = ( &
- block % state % time_levs(1) % state % h % array(k,iCell) * &
- block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
- ) / provis % h % array(k,iCell)
+ block % provis % tracers % array(:,k,iCell) = ( block % state % time_levs(1) % state % h % array(k,iCell) * &
+ block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
+ ) / block % provis % h % array(k,iCell)
end do
end do
if (config_test_case == 1) then ! For case 1, wind field should be fixed
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
if (config_prescribe_velocity) then
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
if (config_prescribe_thickness) then
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
end if
- call ocn_diagnostic_solve(dt, provis, block % mesh)
+ call ocn_diagnostic_solve(dt, block % provis, block % mesh)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
- provis % uTransport % array(:,:) &
- = provis % u % array(:,:) &
- + provis % uBolusGM % array(:,:)
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
+ block % provis % uTransport % array(:,:) &
+ = block % provis % u % array(:,:) &
+ + block % provis % uBolusGM % array(:,:)
block => block % next
end do
end if
call mpas_timer_stop("RK4-update diagnostic variables")
-
-
!--- accumulate update (for RK4)
call mpas_timer_start("RK4-RK4 accumulate update")
@@ -249,8 +238,8 @@
do iCell=1,block % mesh % nCells
do k=1,block % mesh % maxLevelCell % array(iCell)
block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
- + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
+ + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
end do
end do
@@ -268,54 +257,51 @@
! A little clean up at the end: decouple new scalar fields and compute diagnostics for new state
!
call mpas_timer_start("RK4-cleaup phase")
+
+ ! Rescale tracers
block => domain % blocklist
- do while (associated(block))
+ do while(associated(block))
+ do iCell = 1, block % mesh % nCells
+ do k = 1, block % mesh % maxLevelCell % array(iCell)
+ block % state % time_levs(2) % state % tracers % array(:, k, iCell) = block % state % time_levs(2) % state % tracers % array(:, k, iCell) &
+ / block % state % time_levs(2) % state % h % array(k, iCell)
+ end do
+ end do
+ block => block % next
+ end do
- u => block % state % time_levs(2) % state % u % array
- tracers => block % state % time_levs(2) % state % tracers % array
- h => block % state % time_levs(2) % state % h % array
- h_edge => block % state % time_levs(2) % state % h_edge % array
- ke_edge => block % state % time_levs(2) % state % ke_edge % array
- num_tracers = block % state % time_levs(2) % state % num_tracers
- vertViscTopOfEdge => block % diagnostics % vertViscTopOfEdge % array
- vertDiffTopOfCell => block % diagnostics % vertDiffTopOfCell % array
- maxLevelCell => block % mesh % maxLevelCell % array
- maxLevelEdgeTop => block % mesh % maxLevelEdgeTop % array
-
- nCells = block % mesh % nCells
- nEdges = block % mesh % nEdges
- nVertLevels = block % mesh % nVertLevels
- do iCell=1,nCells
- do k=1,maxLevelCell(iCell)
- tracers(:,k,iCell) = tracers(:,k,iCell) / h(k,iCell)
- end do
- end do
+ if (config_implicit_vertical_mix) then
+ call mpas_timer_start("RK4-implicit vert mix")
+ block => domain % blocklist
+ do while(associated(block))
- if (config_implicit_vertical_mix) then
- call mpas_timer_start("RK4-implicit vert mix")
+ ! Call ocean diagnostic solve in preparation for vertical mixing. Note
+ ! it is called again after vertical mixing, because u and tracers change.
+ ! For Richardson vertical mixing, only rho, h_edge, and ke_edge need to
+ ! be computed. For kpp, more variables may be needed. Either way, this
+ ! could be made more efficient by only computing what is needed for the
+ ! implicit vmix routine that follows. mrp 121023.
+ call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
+ call ocn_vmix_implicit(dt, block % mesh, block % diagnostics, block % state % time_levs(2) % state, err)
+ block => block % next
+ end do
- !
- ! Implicit vertical solve for momentum
- !
- call ocn_vel_vmix_tend_implicit(block % mesh, dt, ke_edge, vertvisctopofedge, h, h_edge, u, err)
+ ! Update halo on u and tracers, which were just updated for implicit vertical mixing. If not done,
+ ! this leads to lack of volume conservation. It is required because halo updates in RK4 are only
+ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to
+ ! communicate the change due to implicit vertical mixing across the boundary.
+ call mpas_timer_start("RK4-implicit vert mix halos")
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % tracers)
+ call mpas_timer_stop("RK4-implicit vert mix halos")
- ! mrp 110718 filter btr mode out of u
- if (config_rk_filter_btr_mode) then
- call filter_btr_mode_u(block % state % time_levs(2) % state, block % mesh)
- !block % tend % h % array(:,:) = 0.0 ! I should not need this
- endif
+ call mpas_timer_stop("RK4-implicit vert mix")
+ end if
- !
- ! Implicit vertical solve for tracers
- !
-
- call ocn_tracer_vmix_tend_implicit(block % mesh, dt, vertdifftopofcell, h, tracers, err)
- call mpas_timer_stop("RK4-implicit vert mix")
- end if
-
+ block => domain % blocklist
+ do while (associated(block))
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
@@ -330,7 +316,7 @@
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
block % state % time_levs(2) % state % uTransport % array(:,:) &
= block % state % time_levs(2) % state % u % array(:,:) &
+ block % state % time_levs(2) % state % uBolusGM % array(:,:)
@@ -343,244 +329,26 @@
block % state % time_levs(2) % state % uReconstructMeridional % array &
)
+!TDR
+ call mpas_reconstruct(block % mesh, block % mesh % u_src % array, &
+ block % state % time_levs(2) % state % uSrcReconstructX % array, &
+ block % state % time_levs(2) % state % uSrcReconstructY % array, &
+ block % state % time_levs(2) % state % uSrcReconstructZ % array, &
+ block % state % time_levs(2) % state % uSrcReconstructZonal % array, &
+ block % state % time_levs(2) % state % uSrcReconstructMeridional % array &
+ )
+!TDR
+
call ocn_time_average_accumulate(block % state % time_levs(2) % state, block % state % time_levs(1) % state)
block => block % next
end do
call mpas_timer_stop("RK4-cleaup phase")
- call mpas_deallocate_state(provis)
+ call mpas_deallocate_provis_states(domain % blocklist)
end subroutine ocn_time_integrator_rk4!}}}
- subroutine filter_btr_mode_tend_u(tend, s, d, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Filter and remove barotropic mode from the tendencies
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- implicit none
-
- type (tend_type), intent(inout) :: tend
- type (state_type), intent(in) :: s
- type (diagnostics_type), intent(in) :: d
- type (mesh_type), intent(in) :: grid
-
-! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
-! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
-
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, dvEdge, dcEdge, areaCell, areaTriangle, &
- meshScalingDel2, meshScalingDel4
- real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
- type (dm_info) :: dminfo
-
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
-
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
- real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
-
- call mpas_timer_start("filter_btr_mode_tend_u")
-
- h => s % h % array
- u => s % u % array
- v => s % v % array
- wTop => s % wTop % array
- h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- Vor_edge => s % Vor_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
- vertViscTopOfEdge => d % vertViscTopOfEdge % array
-
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
-
- tend_u => tend % u % array
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
- u_src => grid % u_src % array
-
- do iEdge=1,grid % nEdges
-
- uhSum = (h_edge(1,iEdge)) * tend_u(1,iEdge)
- hSum = h_edge(1,iEdge)
-
- do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
- hSum = hSum + h_edge(k,iEdge)
- enddo
-
- vertSum = uhSum/hSum
-
- do k=1,grid % maxLevelEdgeTop % array(iEdge)
- tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
- enddo
-
- enddo ! iEdge
-
- call mpas_timer_stop("filter_btr_mode_tend_u")
-
- end subroutine filter_btr_mode_tend_u!}}}
-
- subroutine filter_btr_mode_u(s, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Filter and remove barotropic mode.
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- implicit none
-
- type (state_type), intent(inout) :: s
- type (mesh_type), intent(in) :: grid
-
-! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
-! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
-
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, dvEdge, dcEdge, areaCell, areaTriangle, &
- meshScalingDel2, meshScalingDel4
- real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, Vor_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
- type (dm_info) :: dminfo
-
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
-
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
- real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
-
- call mpas_timer_start("filter_btr_mode_u")
-
- h => s % h % array
- u => s % u % array
- v => s % v % array
- wTop => s % wTop % array
- h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- Vor_edge => s % Vor_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
-
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
- u_src => grid % u_src % array
-
- do iEdge=1,grid % nEdges
-
- uhSum = (h_edge(1,iEdge)) * u(1,iEdge)
- hSum = h_edge(1,iEdge)
-
- do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
- hSum = hSum + h_edge(k,iEdge)
- enddo
-
- vertSum = uhSum/hSum
- do k=1,grid % maxLevelEdgeTop % array(iEdge)
- u(k,iEdge) = u(k,iEdge) - vertSum
- enddo
-
- enddo ! iEdge
-
- call mpas_timer_stop("filter_btr_mode_u")
-
- end subroutine filter_btr_mode_u!}}}
-
end module ocn_time_integration_rk4
! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -86,9 +86,9 @@
type (dm_info) :: dminfo
integer :: iCell, i,k,j, iEdge, cell1, cell2, split_explicit_step, split, &
eoe, oldBtrSubcycleTime, newBtrSubcycleTime, uPerpTime, BtrCorIter, &
- n_bcl_iter(config_n_ts_iter)
+ n_bcl_iter(config_n_ts_iter), stage1_tend_time
type (block_type), pointer :: block
- real (kind=RKIND) :: uhSum, hSum, flux, sshEdge, &
+ real (kind=RKIND) :: uhSum, hSum, flux, sshEdge, hEdge1, &
CoriolisTerm, uCorr, temp, temp_h, coef, FBtr_coeff, sshCell1, sshCell2
integer :: num_tracers, ucorr_coef, err
real (kind=RKIND), dimension(:,:), pointer :: &
@@ -117,6 +117,9 @@
! The baroclinic velocity needs be recomputed at the beginning of a
! timestep because the implicit vertical mixing is conducted on the
! total u. We keep uBtr from the previous timestep.
+ ! Note that uBcl may now include a barotropic component, because the
+ ! weights h have changed. That is OK, because the GBtrForcing variable
+ ! subtracts out the barotropic component from the baroclinic.
block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
= block % state % time_levs(1) % state % u % array(k,iEdge) &
- block % state % time_levs(1) % state % uBtr % array( iEdge)
@@ -181,10 +184,22 @@
block => domain % blocklist
do while (associated(block))
+
+ stage1_tend_time = min(split_explicit_step,2)
+
if (.not.config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
+ call ocn_vmix_coefs(block % mesh, block % state % time_levs(stage1_tend_time) % state, block % diagnostics, err)
end if
- call ocn_tend_u(block % tend, block % state % time_levs(2) % state, block % diagnostics, block % mesh)
+
+ ! compute wTop. Use u (rather than uTransport) for momentum advection.
+ ! Use the most recent time level available.
+ call ocn_wtop(block % mesh, block % state % time_levs(stage1_tend_time) % state % h % array, &
+ block % state % time_levs(stage1_tend_time) % state % h_edge % array, &
+ block % state % time_levs(stage1_tend_time) % state % u % array, &
+ block % state % time_levs(stage1_tend_time) % state % wTop % array, err)
+
+ call ocn_tend_u(block % tend, block % state % time_levs(stage1_tend_time) % state, block % diagnostics, block % mesh)
+
block => block % next
end do
@@ -246,6 +261,7 @@
= 0.5*( &
block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
+ uTemp(k) - dt * block % state % time_levs(1) % state % GBtrForcing % array(iEdge))
+
enddo
enddo ! iEdge
@@ -293,7 +309,7 @@
! uTranport = uBcl + uBolus
! This is u used in advective terms for h and tracers
- ! in tendancy calls in stage 3.
+ ! in tendency calls in stage 3.
block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
= block % mesh % edgeMask % array(k,iEdge) &
*( block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
@@ -408,21 +424,63 @@
! config_btr_gam1_uWt1=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
! config_btr_gam1_uWt1= 0 flux = uBtrOld*H
! mrp 120201 efficiency: could we combine the following edge and cell loops?
+
+ do iCell = 1, block % mesh % nCells
+ do i = 1, block % mesh % nEdgesOnCell % array(iCell)
+ iEdge = block % mesh % edgesOnCell % array(i, iCell)
+
+ cell1 = block % mesh % cellsOnEdge % array(1, iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2, iEdge)
+
+ sshEdge = 0.5 * (block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ + block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
+
+ ! method 0: orig, works only without pbc:
+ !hSum = sshEdge + block % mesh % refBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ ! method 1, matches method 0 without pbcs, works with pbcs.
+ hSum = sshEdge + min(block % mesh % bottomDepth % array(cell1), &
+ block % mesh % bottomDepth % array(cell2))
+
+ ! method 2: may be better than method 1.
+ ! Take average of full thickness at two neighboring cells.
+ !hSum = sshEdge + 0.5 *( block % mesh % bottomDepth % array(cell1) &
+ ! + block % mesh % bottomDepth % array(cell2) )
+
+
+ flux = ((1.0-config_btr_gam1_uWt1) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + config_btr_gam1_uWt1 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
+ * hSum
+
+ block % tend % ssh % array(iCell) = block % tend % ssh % array(iCell) + block % mesh % edgeSignOncell % array(i, iCell) * flux &
+ * block % mesh % dvEdge % array(iEdge)
+
+ end do
+ end do
+
do iEdge=1,block % mesh % nEdges
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
-
+
sshEdge = 0.5 * (block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
- hSum = sshEdge + block % mesh % referenceBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
-
+
+ ! method 0: orig, works only without pbc:
+ !hSum = sshEdge + block % mesh % refBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ ! method 1, matches method 0 without pbcs, works with pbcs.
+ hSum = sshEdge + min(block % mesh % bottomDepth % array(cell1), &
+ block % mesh % bottomDepth % array(cell2))
+
+ ! method 2: may be better than method 1.
+ ! take average of full thickness at two neighboring cells
+ !hSum = sshEdge + 0.5 *( block % mesh % bottomDepth % array(cell1) &
+ ! + block % mesh % bottomDepth % array(cell2) )
+
flux = ((1.0-config_btr_gam1_uWt1) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ config_btr_gam1_uWt1 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
* hSum
-
- block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) - flux * block % mesh % dvEdge % array(iEdge)
- block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) + flux * block % mesh % dvEdge % array(iEdge)
-
+
block % state % time_levs(1) % state % FBtr % array(iEdge) = block % state % time_levs(1) % state % FBtr % array(iEdge) &
+ FBtr_coeff*flux
end do
@@ -452,6 +510,8 @@
block => domain % blocklist
do while (associated(block))
+ allocate(utemp(block % mesh % nEdges+1))
+ uTemp(:) = block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:)
do iEdge=1,block % mesh % nEdges
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
@@ -461,7 +521,8 @@
do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
eoe = block % mesh % edgesOnEdge % array(i,iEdge)
CoriolisTerm = CoriolisTerm + block % mesh % weightsOnEdge % array(i,iEdge) &
- * block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
+ !* block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
+ * uTemp(eoe) &
* block % mesh % fEdge % array(eoe)
end do
@@ -478,6 +539,7 @@
+ dt/config_n_btr_subcycles *(CoriolisTerm - gravity *(sshCell2 - sshCell1) /block % mesh % dcEdge % array(iEdge) &
+ block % state % time_levs(1) % state % GBtrForcing % array(iEdge))) * block % mesh % edgeMask % array(1,iEdge)
end do
+ deallocate(uTemp)
block => block % next
end do ! block
@@ -502,6 +564,45 @@
! config_btr_gam3_uWt2=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
! config_btr_gam3_uWt2= 0 flux = uBtrOld*H
! mrp 120201 efficiency: could we combine the following edge and cell loops?
+
+ do iCell = 1, block % mesh % nCells
+ do i = 1, block % mesh % nEdgesOnCell % array(iCell)
+ iEdge = block % mesh % edgesOnCell % array(i, iCell)
+
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+
+ ! SSH is a linear combination of SSHold and SSHnew.
+ sshCell1 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell1)
+ sshCell2 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
+
+ sshEdge = 0.5 * (sshCell1 + sshCell2)
+
+ ! method 0: orig, works only without pbc:
+ !hSum = sshEdge + block % mesh % refBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ ! method 1, matches method 0 without pbcs, works with pbcs.
+ hSum = sshEdge + min(block % mesh % bottomDepth % array(cell1), &
+ block % mesh % bottomDepth % array(cell2))
+
+ ! method 2: may be better than method 1.
+ ! take average of full thickness at two neighboring cells
+ !hSum = sshEdge + 0.5 *( block % mesh % bottomDepth % array(cell1) &
+ ! + block % mesh % bottomDepth % array(cell2) )
+
+
+ flux = ((1.0-config_btr_gam3_uWt2) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + config_btr_gam3_uWt2 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
+ * hSum
+
+ block % tend % ssh % array(iCell) = block % tend % ssh % array(iCell) + block % mesh % edgeSignOnCell % array(i, iCell) * flux &
+ * block % mesh % dvEdge % array(iEdge)
+
+ end do
+ end do
+
do iEdge=1,block % mesh % nEdges
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
@@ -511,17 +612,24 @@
+ config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell1)
sshCell2 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
+ sshEdge = 0.5 * (sshCell1 + sshCell2)
- sshEdge = 0.5 * (sshCell1 + sshCell2)
- hSum = sshEdge + block % mesh % referenceBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+ ! method 0: orig, works only without pbc:
+ !hSum = sshEdge + block % mesh % refBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ ! method 1, matches method 0 without pbcs, works with pbcs.
+ hSum = sshEdge + min(block % mesh % bottomDepth % array(cell1), &
+ block % mesh % bottomDepth % array(cell2))
+
+ ! method 2, better, I think.
+ ! take average of full thickness at two neighboring cells
+ !hSum = sshEdge + 0.5 *( block % mesh % bottomDepth % array(cell1) &
+ ! + block % mesh % bottomDepth % array(cell2) )
flux = ((1.0-config_btr_gam3_uWt2) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ config_btr_gam3_uWt2 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
* hSum
- block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) - flux * block % mesh % dvEdge % array(iEdge)
- block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) + flux * block % mesh % dvEdge % array(iEdge)
-
block % state % time_levs(1) % state % FBtr % array(iEdge) = block % state % time_levs(1) % state % FBtr % array(iEdge) + flux
end do
@@ -639,7 +747,7 @@
! uTranport = uBtr + uBcl + uBolus + uCorrection
! This is u used in advective terms for h and tracers
- ! in tendancy calls in stage 3.
+ ! in tendency calls in stage 3.
block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
= block % mesh % edgeMask % array(k,iEdge) &
*( block % state % time_levs(2) % state % uBtr % array( iEdge) &
@@ -675,8 +783,14 @@
! dwj: 02/22/12 splitting thickness and tracer tendency computations and halo updates to allow monotonic advection.
block => domain % blocklist
do while (associated(block))
- call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(2) % state, block % mesh)
+ ! compute wTop. Use uTransport for advection of h and tracers.
+ ! Use time level 1 values of h and h_edge because h has not yet been computed for time level 2.
+ call ocn_wtop(block % mesh, block % state % time_levs(1) % state % h % array, &
+ block % state % time_levs(1) % state % h_edge % array, &
+ block % state % time_levs(2) % state % uTransport % array, &
+ block % state % time_levs(2) % state % wTop % array, err)
+
call ocn_tend_h(block % tend, block % state % time_levs(2) % state, block % mesh)
block => block % next
end do
@@ -825,37 +939,39 @@
! END large iteration loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- block => domain % blocklist
- do while (associated(block))
+ if (config_implicit_vertical_mix) then
+ call mpas_timer_start("se implicit vert mix")
+ block => domain % blocklist
+ do while(associated(block))
+ ! Call ocean diagnostic solve in preparation for vertical mixing. Note
+ ! it is called again after vertical mixing, because u and tracers change.
+ ! For Richardson vertical mixing, only rho, h_edge, and ke_edge need to
+ ! be computed. For kpp, more variables may be needed. Either way, this
+ ! could be made more efficient by only computing what is needed for the
+ ! implicit vmix routine that follows. mrp 121023.
+ call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Implicit vertical mixing, done after timestep is complete
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ call ocn_vmix_implicit(dt, block % mesh, block % diagnostics, block % state % time_levs(2) % state, err)
- u => block % state % time_levs(2) % state % u % array
- tracers => block % state % time_levs(2) % state % tracers % array
- h => block % state % time_levs(2) % state % h % array
- h_edge => block % state % time_levs(2) % state % h_edge % array
- ke_edge => block % state % time_levs(2) % state % ke_edge % array
- num_tracers = block % state % time_levs(2) % state % num_tracers
- vertViscTopOfEdge => block % diagnostics % vertViscTopOfEdge % array
- vertDiffTopOfCell => block % diagnostics % vertDiffTopOfCell % array
- maxLevelCell => block % mesh % maxLevelCell % array
- maxLevelEdgeTop => block % mesh % maxLevelEdgeTop % array
+ block => block % next
+ end do
- if (config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
+ ! Update halo on u and tracers, which were just updated for implicit vertical mixing. If not done,
+ ! this leads to lack of volume conservation. It is required because halo updates in stage 3 are only
+ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to
+ ! communicate the change due to implicit vertical mixing across the boundary.
+ call mpas_timer_start("se implicit vert mix halos")
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % tracers)
+ call mpas_timer_stop("se implicit vert mix halos")
- ! Implicit vertical solve for momentum
- call ocn_vel_vmix_tend_implicit(block % mesh, dt, ke_edge, vertvisctopofedge, h, h_edge, u, err)
-
- ! Implicit vertical solve for tracers
- call ocn_tracer_vmix_tend_implicit(block % mesh, dt, vertdifftopofcell, h, tracers, err)
- end if
+ call mpas_timer_stop("se implicit vert mix")
+ end if
+ block => domain % blocklist
+ do while (associated(block))
+
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
@@ -869,147 +985,45 @@
end if
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
+
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
+ block % state % time_levs(2) % state % uTransport % array(:,:) &
+ = block % state % time_levs(2) % state % u % array(:,:) &
+ + block % state % time_levs(2) % state % uBolusGM % array(:,:)
+
+!!!!!!!!!!!!!!!!!!!!!!!!!! mrp
+ ! Recompute wTop with all updated information. The wTop in stage 3 used
+ ! the previous h and u before implicit vertical mixing.
+!!!!!!
+! remove: call ocn_wtop(block % state % time_levs(2) % state,block % state % time_levs(2) % state, block % mesh)
+
call mpas_reconstruct(block % mesh, block % state % time_levs(2) % state % u % array, &
- block % state % time_levs(2) % state % uReconstructX % array, &
- block % state % time_levs(2) % state % uReconstructY % array, &
- block % state % time_levs(2) % state % uReconstructZ % array, &
- block % state % time_levs(2) % state % uReconstructZonal % array, &
- block % state % time_levs(2) % state % uReconstructMeridional % array)
+ block % state % time_levs(2) % state % uReconstructX % array, &
+ block % state % time_levs(2) % state % uReconstructY % array, &
+ block % state % time_levs(2) % state % uReconstructZ % array, &
+ block % state % time_levs(2) % state % uReconstructZonal % array, &
+ block % state % time_levs(2) % state % uReconstructMeridional % array &
+ )
+!TDR
+ call mpas_reconstruct(block % mesh, block % mesh % u_src % array, &
+ block % state % time_levs(2) % state % uSrcReconstructX % array, &
+ block % state % time_levs(2) % state % uSrcReconstructY % array, &
+ block % state % time_levs(2) % state % uSrcReconstructZ % array, &
+ block % state % time_levs(2) % state % uSrcReconstructZonal % array, &
+ block % state % time_levs(2) % state % uSrcReconstructMeridional % array &
+ )
+!TDR
+
call ocn_time_average_accumulate(block % state % time_levs(2) % state, block % state % time_levs(1) % state)
-
block => block % next
end do
+
call mpas_timer_stop("se timestep", timer_main)
-
end subroutine ocn_time_integrator_split!}}}
- subroutine filter_btr_mode_tend_u(tend, s, d, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Filter and remove barotropic mode from the tendencies
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- implicit none
-
- type (tend_type), intent(inout) :: tend
- type (state_type), intent(in) :: s
- type (diagnostics_type), intent(in) :: d
- type (mesh_type), intent(in) :: grid
-
- integer :: iEdge, k
-
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum
- real (kind=RKIND), dimension(:,:), pointer :: &
- h_edge, h, u,tend_u
- type (dm_info) :: dminfo
-
- integer, dimension(:), pointer :: maxLevelEdgeTop
-
- call mpas_timer_start("filter_btr_mode_tend_u")
-
- h => s % h % array
- u => s % u % array
- h_edge => s % h_edge % array
-
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
-
- tend_u => tend % u % array
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nVertLevels = grid % nVertLevels
-
- do iEdge=1,nEdges
-
- ! hSum is initialized outside the loop because on land boundaries
- ! maxLevelEdgeTop=0, but I want to initialize hSum with a
- ! nonzero value to avoid a NaN.
- uhSum = h_edge(1,iEdge) * tend_u(1,iEdge)
- hSum = h_edge(1,iEdge)
-
- do k=2,maxLevelEdgeTop(iEdge)
- uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
- hSum = hSum + h_edge(k,iEdge)
- enddo
-
- vertSum = uhSum/hSum
- do k=1,maxLevelEdgeTop(iEdge)
- tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
- enddo
- enddo ! iEdge
-
- call mpas_timer_stop("filter_btr_mode_tend_u")
-
- end subroutine filter_btr_mode_tend_u!}}}
-
- subroutine filter_btr_mode_u(s, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Filter and remove barotropic mode.
- !
- ! Input: s - current model state
- ! grid - grid metadata
- !
- ! Output: tend - computed tendencies for prognostic variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- implicit none
-
- type (state_type), intent(inout) :: s
- type (mesh_type), intent(in) :: grid
-
- integer :: iEdge, k
-
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum
- real (kind=RKIND), dimension(:,:), pointer :: &
- h_edge, h, u
- type (dm_info) :: dminfo
-
- integer, dimension(:), pointer :: maxLevelEdgeTop
-
- call mpas_timer_start("filter_btr_mode_u")
-
- h => s % h % array
- u => s % u % array
- h_edge => s % h_edge % array
-
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nVertLevels = grid % nVertLevels
-
- do iEdge=1,nEdges
-
- ! hSum is initialized outside the loop because on land boundaries
- ! maxLevelEdgeTop=0, but I want to initialize hSum with a
- ! nonzero value to avoid a NaN.
- uhSum = h_edge(1,iEdge) * u(1,iEdge)
- hSum = h_edge(1,iEdge)
-
- do k=2,maxLevelEdgeTop(iEdge)
- uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
- hSum = hSum + h_edge(k,iEdge)
- enddo
-
- vertSum = uhSum/hSum
- do k=1,maxLevelEdgeTop(iEdge)
- u(k,iEdge) = u(k,iEdge) - vertSum
- enddo
- enddo ! iEdge
-
- call mpas_timer_stop("filter_btr_mode_u")
-
- end subroutine filter_btr_mode_u!}}}
-
end module ocn_time_integration_split
! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -18,6 +18,8 @@
use mpas_kind_types
use mpas_grid_types
use mpas_configure
+ use mpas_sort
+ use mpas_hash
use mpas_ocn_tracer_advection_std
use mpas_ocn_tracer_advection_mono
@@ -58,10 +60,13 @@
integer, dimension(:,:), pointer :: cellsOnCell, cellsOnEdge, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask, boundaryCell
integer, dimension(:), pointer :: nEdgesOnCell, nAdvCellsForEdge, maxLevelCell
- integer, dimension(:), pointer :: cell_list, ordered_cell_list
- integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell, k, nVertLevels
+ integer, dimension(:), pointer :: cell_indices
+ integer, dimension(:,:), pointer :: sorted_cell_indices
+ integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell, k, nVertLevels, nCells
logical :: addcell, highOrderAdvection
+ type (hashtable) :: cell_hash
+
deriv_two => grid % deriv_two % array
adv_coefs => grid % adv_coefs % array
adv_coefs_2nd => grid % adv_coefs_2nd % array
@@ -76,24 +81,21 @@
maxLevelCell => grid % maxLevelCell % array
nAdvCellsForEdge => grid % nAdvCellsForEdge % array
+ nCells = grid % nCells
nVertLevels = grid % nVertLevels
- allocate(cell_list(grid % maxEdges2 + 2))
- allocate(ordered_cell_list(grid % maxEdges2 + 2))
+ allocate(cell_indices(grid % maxEdges2 + 2))
+ allocate(sorted_cell_indices(2, grid % maxEdges2 + 2))
err = 0
highOrderAdvectionMask = 0
lowOrderAdvectionMask = 0
- if(config_horiz_tracer_adv_order == 2) then
-
- end if
do iEdge = 1, grid % nEdges
nAdvCellsForEdge(iEdge) = 0
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
-
do k = 1, nVertLevels
if (boundaryCell(k, cell1) == 1 .or. boundaryCell(k, cell2) == 1) then
@@ -108,131 +110,108 @@
!
! do only if this edge flux is needed to update owned cells
!
- if (cell1 <= grid%nCells .or. cell2 <= grid%nCells) then
+ if (cell1 <= grid % nCells .and. cell2 <= grid % nCells) then
+ ! Insert cellsOnEdge to list of advection cells
+ call mpas_hash_init(cell_hash)
+ call mpas_hash_insert(cell_hash, cell1)
+ call mpas_hash_insert(cell_hash, cell2)
+ cell_indices(1) = cell1
+ cell_indices(2) = cell2
+ sorted_cell_indices(1, 1) = grid % indexToCellID % array(cell1)
+ sorted_cell_indices(2, 1) = cell1
+ sorted_cell_indices(1, 2) = grid % indexToCellID % array(cell2)
+ sorted_cell_indices(2, 2) = cell2
+ n = 2
- cell_list(1) = cell1
- cell_list(2) = cell2
- n = 2
+ ! Build unique list of cells used for advection on edge
+ do i = 1, nEdgesOnCell(cell1)
+ if(.not. mpas_hash_search(cell_hash, cellsOnCell(i, cell1))) then
+ n = n + 1
+ cell_indices(n) = cellsOnCell(i, cell1)
+ sorted_cell_indices(1, n) = grid % indexToCellID % array(cellsOnCell(i, cell1))
+ sorted_cell_indices(2, n) = cellsOnCell(i, cell1)
+ call mpas_hash_insert(cell_hash, cellsOnCell(i, cell1))
+ end if
+ end do ! loop over i
- ! add cells surrounding cell 1. n is number of cells currently in list
- do i = 1, nEdgesOnCell(cell1)
- if(cellsOnCell(i,cell1) /= cell2) then
- n = n + 1
- cell_list(n) = cellsOnCell(i,cell1)
- end if
- end do
+ do i = 1, nEdgesOnCell(cell2)
+ if(.not. mpas_hash_search(cell_hash, cellsOnCell(i, cell2))) then
+ n = n + 1
+ cell_indices(n) = cellsOnCell(i, cell2)
+ sorted_cell_indices(1, n) = grid % indexToCellID % array(cellsOnCell(i, cell2))
+ sorted_cell_indices(2, n) = cellsOnCell(i, cell2)
+ call mpas_hash_insert(cell_hash, cellsOnCell(i, cell2))
+ end if
+ end do ! loop over i
- ! add cells surrounding cell 2 (brute force approach)
- do iCell = 1, nEdgesOnCell(cell2)
- addcell = .true.
- do i=1,n
- if(cell_list(i) == cellsOnCell(iCell,cell2)) addcell = .false.
- end do
- if(addcell) then
- n = n+1
- cell_list(n) = cellsOnCell(iCell,cell2)
- end if
- end do
+ call mpas_hash_destroy(cell_hash)
- ! order the list by increasing cell number (brute force approach)
+ call mpas_quicksort(n, sorted_cell_indices)
- do i=1,n
- ordered_cell_list(i) = grid % nCells + 2
- j_in = 1
- do j=1,n
- if(ordered_cell_list(i) > cell_list(j) ) then
- j_in = j
- ordered_cell_list(i) = cell_list(j)
- end if
- end do
-! ordered_cell_list(i) = cell_list(j_in)
- cell_list(j_in) = grid % nCells + 3
- end do
+ nAdvCellsForEdge(iEdge) = n
+ do iCell = 1, nAdvCellsForEdge(iEdge)
+ advCellsForEdge(iCell, iEdge) = sorted_cell_indices(2, iCell)
+ end do ! loop over iCell
- nAdvCellsForEdge(iEdge) = n
- do iCell = 1, nAdvCellsForEdge(iEdge)
- advCellsForEdge(iCell,iEdge) = ordered_cell_list(iCell)
- end do
+ adv_coefs(:,iEdge) = 0.
+ adv_coefs_2nd(:,iEdge) = 0.
+ adv_coefs_3rd(:,iEdge) = 0.
- ! we have the ordered list, now construct coefficients
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell1))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(1,1,iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(1,1,iEdge)
+ end if
- adv_coefs(:,iEdge) = 0.
- adv_coefs_2nd(:,iEdge) = 0.
- adv_coefs_3rd(:,iEdge) = 0.
-
- ! pull together third and fourth order contributions to the flux
- ! first from cell1
+ do iCell = 1, nEdgesOnCell(cell1)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cellsOnCell(iCell,cell1)))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(iCell+1, 1, iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(iCell+1, 1, iEdge)
+ end if
+ end do ! loop over iCell
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell1 ) j_in = j
- end do
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,1,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(1,1,iEdge)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell2))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(1,2,iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(1,2,iEdge)
+ end if
- do iCell = 1, nEdgesOnCell(cell1)
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cellsOnCell(iCell,cell1) ) j_in = j
- end do
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
- end do
+ do iCell = 1, nEdgesOnCell(cell2)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cellsOnCell(iCell,cell2)))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(iCell+1, 2, iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(iCell+1, 2, iEdge)
+ end if
+ end do ! loop over iCell
- ! pull together third and fourth order contributions to the flux
- ! now from cell2
+ do iCell = 1,nAdvCellsForEdge(iEdge)
+ adv_coefs (iCell,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs (iCell,iEdge) / 12.
+ adv_coefs_3rd(iCell,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs_3rd(iCell,iEdge) / 12.
+ end do ! loop over iCell
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell2 ) j_in = j
- enddo
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,2,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(1,2,iEdge)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell1))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + 0.5
+ adv_coefs_2nd(k, iEdge) = adv_coefs_2nd(k, iEdge) + 0.5
+ end if
- do iCell = 1, nEdgesOnCell(cell2)
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cellsOnCell(iCell,cell2) ) j_in = j
- enddo
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,2,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(iCell+1,2,iEdge)
- end do
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell2))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + 0.5
+ adv_coefs_2nd(k, iEdge) = adv_coefs_2nd(k, iEdge) + 0.5
+ end if
- do j = 1,n
- adv_coefs (j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs (j,iEdge) / 12.
- adv_coefs_3rd(j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs_3rd(j,iEdge) / 12.
- end do
-
- ! 2nd order centered contribution - place this in the main flux weights
-
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell1 ) j_in = j
- enddo
- adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
- adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
-
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell2 ) j_in = j
- enddo
- adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
- adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
-
- ! multiply by edge length - thus the flux is just dt*ru times the results of the vector-vector multiply
-
- do j=1,n
- adv_coefs (j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs (j,iEdge)
- adv_coefs_2nd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_2nd(j,iEdge)
- adv_coefs_3rd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_3rd(j,iEdge)
- end do
-
- end if ! only do for edges of owned-cells
-
+ do iCell=1,nAdvCellsForEdge(iEdge)
+ adv_coefs (iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs (iCell,iEdge)
+ adv_coefs_2nd(iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_2nd(iCell,iEdge)
+ adv_coefs_3rd(iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_3rd(iCell,iEdge)
+ end do ! loop over iCell
+ end if
end do ! end loop over edges
- deallocate(cell_list)
- deallocate(ordered_cell_list)
+ deallocate(cell_indices)
+ deallocate(sorted_cell_indices)
! If 2nd order advection, set masks appropriately.
if(config_horiz_tracer_adv_order == 2) then
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_advection_mono.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -64,7 +64,7 @@
integer :: i, iCell, iEdge, k, iTracer, cell1, cell2
integer :: nVertLevels, num_tracers, nCells, nEdges, nCellsSolve
integer, dimension(:), pointer :: nAdvCellsForEdge, maxLevelCell, maxLevelEdgeTop, nEdgesOnCell
- integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask
+ integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask, edgeSignOnCell, edgesOnCell
real (kind=RKIND) :: flux_upwind, tracer_min_new, tracer_max_new, tracer_upwind_new, scale_factor
real (kind=RKIND) :: flux, tracer_weight, invDvEdge, invAreaCell1, invAreaCell2
@@ -77,6 +77,8 @@
real (kind=RKIND), parameter :: eps = 1.e-10
+ type (field2dReal), pointer :: high_order_horiz_flux_field
+
! Initialize pointers
dvEdge => grid % dvEdge % array
cellsOnEdge => grid % cellsOnEdge % array
@@ -93,6 +95,8 @@
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
highOrderAdvectionMask => grid % highOrderAdvectionMask % array
lowOrderAdvectionMask => grid % lowOrderAdvectionMask % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
@@ -100,19 +104,30 @@
nVertLevels = grid % nVertLevels
num_tracers = size(tracers,dim=1)
+ allocate(high_order_horiz_flux_field)
+ nullify(high_order_horiz_flux_field % next)
+ high_order_horiz_flux_field % block => grid % block
+ high_order_horiz_flux_field % sendList => grid % xEdge % sendList
+ high_order_horiz_flux_field % recvList => grid % xEdge % recvList
+ high_order_horiz_flux_field % copyList => grid % xEdge % copyList
+ high_order_horiz_flux_field % dimSizes(1) = nVertLevels
+ high_order_horiz_flux_field % dimSizes(2) = nEdges+1
+ allocate(high_order_horiz_flux_field % array(high_order_horiz_flux_field % dimSizes(1), high_order_horiz_flux_field % dimSizes(2)))
+ high_order_horiz_flux => high_order_horiz_flux_field % array
+
! allocate nCells arrays
- allocate(tracer_new(nVertLevels, nCells))
- allocate(tracer_cur(nVertLevels, nCells))
- allocate(upwind_tendency(nVertLevels, nCells))
- allocate(inv_h_new(nVertLevels, nCells))
- allocate(tracer_max(nVertLevels, nCells))
- allocate(tracer_min(nVertLevels, nCells))
- allocate(flux_incoming(nVertLevels, nCells))
- allocate(flux_outgoing(nVertLevels, nCells))
+ allocate(tracer_new(nVertLevels, nCells+1))
+ allocate(tracer_cur(nVertLevels, nCells+1))
+ allocate(upwind_tendency(nVertLevels, nCells+1))
+ allocate(inv_h_new(nVertLevels, nCells+1))
+ allocate(tracer_max(nVertLevels, nCells+1))
+ allocate(tracer_min(nVertLevels, nCells+1))
+ allocate(flux_incoming(nVertLevels, nCells+1))
+ allocate(flux_outgoing(nVertLevels, nCells+1))
! allocate nEdges arrays
- allocate(high_order_horiz_flux(nVertLevels, nEdges))
+! allocate(high_order_horiz_flux(nVertLevels, nEdges))
! allocate nVertLevels+1 and nCells arrays
allocate(high_order_vert_flux(nVertLevels+1, nCells))
@@ -192,6 +207,8 @@
end do ! i loop over nAdvCellsForEdge
end do ! iEdge loop
+ call mpas_dmpar_exch_halo_field(high_order_horiz_flux_field)
+
! low order upwind vertical flux (monotonic and diffused)
! Remove low order flux from the high order flux.
! Store left over high order flux in high_order_vert_flux array.
@@ -215,8 +232,8 @@
! flux_incoming (k,iCell) = -(min(0.,high_order_vert_flux(k+1,iCell))-max(0.,high_order_vert_flux(k,iCell)))
! flux_outgoing(k,iCell) = -(max(0.,high_order_vert_flux(k+1,iCell))-min(0.,high_order_vert_flux(k,iCell)))
- flux_incoming (k, iCell) = max(0.0, high_order_vert_flux(k+1, iCell)) - min(0.0, high_order_vert_flux(k, iCell))
- flux_outgoing(k, iCell) = min(0.0, high_order_vert_flux(k+1, iCell)) - max(0.0, high_order_vert_flux(k, iCell))
+ flux_incoming (k, iCell) = max(0.0_RKIND, high_order_vert_flux(k+1, iCell)) - min(0.0_RKIND, high_order_vert_flux(k, iCell))
+ flux_outgoing(k, iCell) = min(0.0_RKIND, high_order_vert_flux(k+1, iCell)) - max(0.0_RKIND, high_order_vert_flux(k, iCell))
end do ! k Loop
end do ! iCell Loop
@@ -234,18 +251,27 @@
do k = 1, maxLevelEdgeTop(iEdge)
flux_upwind = dvEdge(iEdge) * (max(0.,uh(k,iEdge))*tracer_cur(k,cell1) + min(0.,uh(k,iEdge))*tracer_cur(k,cell2))
high_order_horiz_flux(k,iEdge) = high_order_horiz_flux(k,iEdge) - flux_upwind
-
- upwind_tendency(k,cell1) = upwind_tendency(k,cell1) - flux_upwind * invAreaCell1
- upwind_tendency(k,cell2) = upwind_tendency(k,cell2) + flux_upwind * invAreaCell2
-
- ! Accumulate remaining high order fluxes
- flux_outgoing(k,cell1) = flux_outgoing(k,cell1) - max(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell1
- flux_incoming (k,cell1) = flux_incoming (k,cell1) - min(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell1
- flux_outgoing(k,cell2) = flux_outgoing(k,cell2) + min(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell2
- flux_incoming (k,cell2) = flux_incoming (k,cell2) + max(0.,high_order_horiz_flux(k,iEdge)) * invAreaCell2
end do ! k loop
end do ! iEdge loop
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k = 1, maxLevelEdgeTop(iEdge)
+ flux_upwind = dvEdge(iEdge) * (max(0.0_RKIND,uh(k,iEdge))*tracer_cur(k,cell1) + min(0.0_RKIND,uh(k,iEdge))*tracer_cur(k,cell2))
+
+ upwind_tendency(k,iCell) = upwind_tendency(k,iCell) + edgeSignOncell(i, iCell) * flux_upwind * invAreaCell1
+
+ ! Accumulate remaining high order fluxes
+ flux_outgoing(k,iCell) = flux_outgoing(k,iCell) + edgeSignOnCell(i, iCell) * high_order_horiz_flux(k, iEdge) * invAreaCell1
+ flux_incoming(k,iCell) = flux_incoming(k,iCell) - edgeSignOnCell(i, iCell) * high_order_horiz_flux(k, iEdge) * invAreaCell1
+ end do
+ end do
+ end do
+
! Build the factors for the FCT
! Computed using the bounds that were computed previously, and the bounds on the newly updated value
! Factors are placed in the flux_incoming and flux_outgoing arrays
@@ -289,24 +315,20 @@
end do ! iCell loop
! Accumulate the scaled high order horizontal tendencies
- do iEdge = 1, nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ do k = 1, maxLevelEdgeTop(iEdge)
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + edgeSignOnCell(i, iCell) * high_order_horiz_flux(k, iEdge) * invAreaCell1
- invAreaCell1 = 1.0 / areaCell(cell1)
- invAreaCell2 = 1.0 / areaCell(cell2)
- do k=1,maxLevelEdgeTop(iEdge)
- tend(iTracer, k, cell1) = tend(iTracer, k, cell1) - high_order_horiz_flux(k, iEdge) * invAreaCell1
- tend(iTracer, k, cell2) = tend(iTracer, k, cell2) + high_order_horiz_flux(k, iEdge) * invAreaCell2
+ if(config_check_monotonicity) then
+ tracer_new(k, iCell) = tracer_new(k, iCell) + edgeSignOnCell(i, iCell) * high_order_horiz_flux(k, iEdge) * invAreaCell1
+ end if
+ end do
+ end do
+ end do
- if (config_check_monotonicity) then
- !tracer_new holds a tendency for now.
- tracer_new(k, cell1) = tracer_new(k, cell1) - high_order_horiz_flux(k, iEdge) * invAreaCell1
- tracer_new(k, cell2) = tracer_new(k, cell2) + high_order_horiz_flux(k, iEdge) * invAreaCell2
- end if
- end do ! k loop
- end do ! iEdge loop
-
! Accumulate the scaled high order vertical tendencies, and the upwind tendencies
do iCell = 1, nCellsSolve
do k = 1,maxLevelCell(iCell)
@@ -348,6 +370,7 @@
deallocate(flux_outgoing)
deallocate(high_order_horiz_flux)
deallocate(high_order_vert_flux)
+ deallocate(high_order_horiz_flux_field)
end subroutine mpas_ocn_tracer_advection_mono_tend!}}}
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -107,13 +107,13 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, nVertLevels, cell1, cell2
- integer :: k, iTracer, num_tracers
+ integer :: iCell, iEdge, nCells, nEdges, nVertLevels, cell1, cell2
+ integer :: i, k, iTracer, num_tracers
integer, dimension(:,:), allocatable :: boundaryMask
- integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge, edgeMask
+ integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgeMask, edgesOnCell, edgeSignOnCell
real (kind=RKIND) :: invAreaCell1, invAreaCell2
real (kind=RKIND) :: tracer_turb_flux, flux, r_tmp
@@ -134,6 +134,7 @@
if (.not.del2On) return
nEdges = grid % nEdges
+ nCells = grid % nCells
nVertLevels = grid % nVertLevels
num_tracers = size(tracers, dim=1)
@@ -145,31 +146,37 @@
dcEdge => grid % dcEdge % array
meshScalingDel2 => grid % meshScalingDel2 % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+
!
! compute a boundary mask to enforce insulating boundary conditions in the horizontal
!
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- invAreaCell1 = 1.0/areaCell(cell1)
- invAreaCell2 = 1.0/areaCell(cell2)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOncell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
- r_tmp = meshScalingDel2(iEdge) * eddyDiff2 * dvEdge(iEdge) / dcEdge(iEdge)
-
- do k=1,maxLevelEdgeTop(iEdge)
- do iTracer=1,num_tracers
+ r_tmp = meshScalingDel2(iEdge) * eddyDiff2 * dvEdge(iEdge) / dcEdge(iEdge)
+
+ do k = 1, maxLevelEdgeTop(iEdge)
+ do iTracer = 1, num_tracers
! \kappa_2 </font>
<font color="red">abla \phi on edge
- tracer_turb_flux = tracers(iTracer,k,cell2) - tracers(iTracer,k,cell1)
+ tracer_turb_flux = tracers(iTracer, k, cell2) - tracers(iTracer, k, cell1)
! div(h \kappa_2 </font>
<font color="gray">abla \phi) at cell center
- flux = h_edge(k,iEdge) * tracer_turb_flux * edgeMask(k, iEdge) * r_tmp
+ flux = h_edge(k, iEdge) * tracer_turb_flux * edgeMask(k, iEdge) * r_tmp
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) + flux * invAreaCell1
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) - flux * invAreaCell2
- end do
- end do
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) - edgeSignOnCell(i, iCell) * flux * invAreaCell1
+ end do
+ end do
+ end do
end do
+
!--------------------------------------------------------------------
end subroutine ocn_tracer_hmix_del2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -108,10 +108,10 @@
!-----------------------------------------------------------------
integer :: iEdge, nEdges, num_tracers, nVertLevels, nCells
- integer :: iTracer, k, iCell, cell1, cell2
+ integer :: iTracer, k, iCell, cell1, cell2, i
- integer, dimension(:), pointer :: maxLevelEdgeTop, maxLevelCell
- integer, dimension(:,:), pointer :: edgeMask, cellsOnEdge
+ integer, dimension(:), pointer :: maxLevelEdgeTop, maxLevelCell, nEdgesOnCell
+ integer, dimension(:,:), pointer :: edgeMask, cellsOnEdge, edgesOnCell, edgeSignOnCell
real (kind=RKIND) :: invAreaCell1, invAreaCell2, tracer_turb_flux, flux, invdcEdge, r_tmp1, r_tmp2
@@ -148,56 +148,55 @@
edgeMask => grid % edgeMask % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+
allocate(delsq_tracer(num_tracers,nVertLevels, nCells+1))
delsq_tracer(:,:,:) = 0.0
! first del2: div(h </font>
<font color="red">abla \phi) at cell center
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ invdcEdge = dvEdge(iEdge) / dcEdge(iEdge)
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
- invdcEdge = 1.0 / dcEdge(iEdge)
+ do k = 1, maxLevelEdgeTop(iEdge)
+ do iTracer = 1, num_tracers * edgeMask(k, iEdge)
- invAreaCell1 = 1.0 / areaCell(cell1)
- invAreaCell2 = 1.0 / areaCell(cell2)
+ r_tmp1 = invdcEdge * h_edge(k, iEdge) * tracers(iTracer, k, cell1)
+ r_tmp2 = invdcEdge * h_edge(k, iEdge) * tracers(iTracer, k, cell2)
- do k=1,maxLevelEdgeTop(iEdge)
- do iTracer=1,num_tracers * edgeMask(k, iEdge)
-
- r_tmp1 = dvEdge(iEdge) * h_edge(k,iEdge) * invdcEdge
-
- r_tmp2 = r_tmp1 * tracers(iTracer,k,cell2)
- r_tmp1 = r_tmp1 * tracers(iTracer,k,cell1)
-
- delsq_tracer(iTracer,k,cell1) = delsq_tracer(iTracer,k,cell1) + (r_tmp2 - r_tmp1) * invAreaCell1
- delsq_tracer(iTracer,k,cell2) = delsq_tracer(iTracer,k,cell2) - (r_tmp2 - r_tmp1) * invAreaCell2
- end do
- end do
+ delsq_tracer(iTracer, k, iCell) = delsq_tracer(iTracer, k, iCell) - edgeSignOnCell(i, iCell) * (r_tmp2 - r_tmp1) * invAreaCell1
+ end do
+ end do
+ end do
end do
! second del2: div(h </font>
<font color="gray">abla [delsq_tracer]) at cell center
- do iEdge=1,grid % nEdges
- cell1 = grid % cellsOnEdge % array(1,iEdge)
- cell2 = grid % cellsOnEdge % array(2,iEdge)
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ cell1 = cellsOnEdge(1, iEdge)
+ cell2 = cellsOnedge(2, iEdge)
- invAreaCell1 = 1.0 / areaCell(cell1)
- invAreaCell2 = 1.0 / areaCell(cell2)
+ invdcEdge = meshScalingDel4(iEdge) * dvEdge(iEdge) * eddyDiff4 / dcEdge(iEdge)
- invdcEdge = 1.0 / dcEdge(iEdge)
+ do k = 1, maxLevelEdgeTop(iEdge)
+ do iTracer = 1, num_tracers * edgeMask(k, iEdge)
+ tracer_turb_flux = (delsq_tracer(iTracer, k, cell2) - delsq_tracer(iTracer, k, cell1))
+
+ flux = tracer_turb_flux * invdcEdge
- do k=1,maxLevelEdgeTop(iEdge)
- do iTracer=1,num_tracers * edgeMask(k,iEdge)
- tracer_turb_flux = meshScalingDel4(iEdge) * eddyDiff4 &
- * (delsq_tracer(iTracer,k,cell2) - delsq_tracer(iTracer,k,cell1)) &
- * invdcEdge
-
- flux = dvEdge (iEdge) * tracer_turb_flux
-
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux * invAreaCell1
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux * invAreaCell2
- enddo
- enddo
+ tend(iTracer, k, iCell) = tend(iTracer, k, iCell) + edgeSignOnCell(i, iCell) * invAreaCell1
+ end do
+ end do
+ end do
end do
deallocate(delsq_tracer)
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -43,9 +43,7 @@
!--------------------------------------------------------------------
logical :: windStressOn
- real (kind=RKIND) :: rho_ref
-
!***********************************************************************
contains
@@ -125,7 +123,6 @@
edgeMask => grid % edgeMask % array
do iEdge=1,nEdgesSolve
-
! efficiency note: it would be nice to avoid this
! if within a do. This could be done with
! k = max(maxLevelEdgeTop(iEdge),1)
@@ -133,7 +130,7 @@
do k = 1,min(maxLevelEdgeTop(iEdge),1)
! forcing in top layer only
- tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (u_src(k,iEdge)/rho_ref/h_edge(k,iEdge))
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (u_src(k,iEdge) / config_rho0 / h_edge(k,iEdge))
enddo
enddo
@@ -170,7 +167,6 @@
integer, intent(out) :: err !< Output: error flag
windStressOn = .true.
- rho_ref = 1000.0
err = 0
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -20,6 +20,7 @@
use mpas_configure
use mpas_timer
use ocn_vel_hmix_del2
+ use ocn_vel_hmix_leith
use ocn_vel_hmix_del4
implicit none
@@ -47,7 +48,7 @@
!
!--------------------------------------------------------------------
- type (timer_node), pointer :: del2Timer, del4Timer
+ type (timer_node), pointer :: del2Timer, leithTimer, del4Timer
!***********************************************************************
@@ -72,7 +73,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_vel_hmix_tend(grid, divergence, vorticity, tend, err)!{{{
+ subroutine ocn_vel_hmix_tend(grid, divergence, vorticity, viscosity, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -98,6 +99,9 @@
real (kind=RKIND), dimension(:,:), intent(inout) :: &
tend !< Input/Output: velocity tendency
+ real (kind=RKIND), dimension(:,:), intent(inout) :: &
+ viscosity !< Input: viscosity
+
!-----------------------------------------------------------------
!
! output variables
@@ -112,7 +116,7 @@
!
!-----------------------------------------------------------------
- integer :: err1, err2
+ integer :: err1, err2, err3
!-----------------------------------------------------------------
!
@@ -122,14 +126,21 @@
!
!-----------------------------------------------------------------
+ viscosity = 0.0
+
call mpas_timer_start("del2", .false., del2Timer)
- call ocn_vel_hmix_del2_tend(grid, divergence, vorticity, tend, err1)
+ call ocn_vel_hmix_del2_tend(grid, divergence, vorticity, viscosity, tend, err1)
call mpas_timer_stop("del2", del2Timer)
+
+ call mpas_timer_start("leith", .false., leithTimer)
+ call ocn_vel_hmix_leith_tend(grid, divergence, vorticity, viscosity, tend, err2)
+ call mpas_timer_stop("leith", leithTimer)
+
call mpas_timer_start("del4", .false., del4Timer)
- call ocn_vel_hmix_del4_tend(grid, divergence, vorticity, tend, err2)
+ call ocn_vel_hmix_del4_tend(grid, divergence, vorticity, tend, err3)
call mpas_timer_stop("del4", del4Timer)
- err = ior(err1, err2)
+ err = ior(ior(err1, err2),err3)
!--------------------------------------------------------------------
@@ -163,12 +174,13 @@
integer, intent(out) :: err !< Output: error flag
- integer :: err1, err2
+ integer :: err1, err2, err3
call ocn_vel_hmix_del2_init(err1)
- call ocn_vel_hmix_del4_init(err2)
+ call ocn_vel_hmix_leith_init(err2)
+ call ocn_vel_hmix_del4_init(err3)
- err = ior(err1, err2)
+ err = ior(ior(err1, err2),err3)
!--------------------------------------------------------------------
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -70,7 +70,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_vel_hmix_del2_tend(grid, divergence, vorticity, tend, err)!{{{
+ subroutine ocn_vel_hmix_del2_tend(grid, divergence, vorticity, viscosity, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -96,6 +96,8 @@
real (kind=RKIND), dimension(:,:), intent(inout) :: &
tend !< Input/Output: velocity tendency
+ real (kind=RKIND), dimension(:,:), intent(inout) :: &
+ viscosity !< Input: viscosity
!-----------------------------------------------------------------
!
@@ -111,12 +113,11 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdgesSolve, cell1, cell2, vertex1, vertex2
- integer :: k
+ integer :: iEdge, nEdgesSolve, cell1, cell2, vertex1, vertex2, k
integer, dimension(:), pointer :: maxLevelEdgeTop
integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask
- real (kind=RKIND) :: u_diffusion, invLength1, invLength2
+ real (kind=RKIND) :: u_diffusion, invLength1, invLength2, visc2
real (kind=RKIND), dimension(:), pointer :: meshScalingDel2, &
dcEdge, dvEdge
@@ -158,10 +159,12 @@
-viscVortCoef &
*( vorticity(k,vertex2) - vorticity(k,vertex1) ) * invLength2
- u_diffusion = meshScalingDel2(iEdge) * eddyVisc2 * u_diffusion
+ visc2 = meshScalingDel2(iEdge) * eddyVisc2
- tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * u_diffusion
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * visc2 * u_diffusion
+ viscosity(k,iEdge) = viscosity(k,iEdge) + visc2
+
end do
end do
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -112,22 +112,22 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, cell1, cell2, vertex1, vertex2, k
+ integer :: iEdge, cell1, cell2, vertex1, vertex2, k, i
integer :: iCell, iVertex
- integer :: nVertices, nVertLevels, nCells, nEdges, nEdgesSolve
+ integer :: nVertices, nVertLevels, nCells, nEdges, nEdgesSolve, vertexDegree
integer, dimension(:), pointer :: maxLevelEdgeTop, maxLevelVertexBot, &
- maxLevelCell
- integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask
+ maxLevelCell, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask, edgesOnVertex, edgesOnCell, edgeSignOnVertex, edgeSignOnCell
real (kind=RKIND) :: u_diffusion, invAreaCell1, invAreaCell2, invAreaTri1, &
- invAreaTri2, invDcEdge, invDvEdge, r_tmp, delsq_u
+ invAreaTri2, invDcEdge, invDvEdge, r_tmp
real (kind=RKIND), dimension(:), pointer :: dcEdge, dvEdge, areaTriangle, &
meshScalingDel4, areaCell
real (kind=RKIND), dimension(:,:), allocatable :: delsq_divergence, &
- delsq_circulation, delsq_vorticity
+ delsq_circulation, delsq_vorticity, delsq_u
err = 0
@@ -138,6 +138,8 @@
nEdgesSolve = grid % nEdgessolve
nVertices = grid % nVertices
nVertLevels = grid % nVertLevels
+ vertexDegree = grid % vertexDegree
+
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelVertexBot => grid % maxLevelVertexBot % array
maxLevelCell => grid % maxLevelCell % array
@@ -149,43 +151,57 @@
areaCell => grid % areaCell % array
meshScalingDel4 => grid % meshScalingDel4 % array
edgeMask => grid % edgeMask % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnVertex => grid % edgesOnVertex % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnVertex => grid % edgeSignOnVertex % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
+ allocate(delsq_u(nVertLEvels, nEdges+1))
allocate(delsq_divergence(nVertLevels, nCells+1))
allocate(delsq_vorticity(nVertLevels, nVertices+1))
+ delsq_u(:,:) = 0.0
delsq_vorticity(:,:) = 0.0
delsq_divergence(:,:) = 0.0
- do iEdge=1,nEdges
+ !Compute delsq_u
+ do iEdge = 1, nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
vertex1 = verticesOnEdge(1,iEdge)
vertex2 = verticesOnEdge(2,iEdge)
- invAreaTri1 = 1.0 / areaTriangle(vertex1)
- invAreaTri2 = 1.0 / areaTriangle(vertex2)
-
- invAreaCell1 = 1.0 / areaCell(cell1)
- invAreaCell2 = 1.0 / areaCell(cell2)
-
invDcEdge = 1.0 / dcEdge(iEdge)
invDvEdge = 1.0 / dvEdge(iEdge)
do k=1,maxLevelEdgeTop(iEdge)
! Compute </font>
<font color="black">abla^2 u = </font>
<font color="black">abla divergence + k \times </font>
<font color="red">abla vorticity
- delsq_u = ( divergence(k,cell2) - divergence(k,cell1) ) * invDcEdge &
+ delsq_u(k, iEdge) = ( divergence(k,cell2) - divergence(k,cell1) ) * invDcEdge &
-viscVortCoef *( vorticity(k,vertex2) - vorticity(k,vertex1)) * invDcEdge * sqrt(3.0) ! TDR
+ end do
+ end do
- ! vorticity using </font>
<font color="red">abla^2 u
- r_tmp = dcEdge(iEdge) * delsq_u
- delsq_vorticity(k,vertex1) = delsq_vorticity(k,vertex1) - r_tmp * invAreaTri1
- delsq_vorticity(k,vertex2) = delsq_vorticity(k,vertex2) + r_tmp * invAreaTri2
+ ! Compute delsq_vorticity
+ do iVertex = 1, nVertices
+ invAreaTri1 = 1.0 / areaTriangle(iVertex)
+ do i = 1, vertexDegree
+ iEdge = edgesOnVertex(i, iVertex)
+ do k = 1, maxLevelVertexBot(iVertex)
+ delsq_vorticity(k, iVertex) = delsq_vorticity(k, iVertex) + edgeSignOnVertex(i, iVertex) * dcEdge(iEdge) * delsq_u(k, iEdge) * invAreaTri1
+ end do
+ end do
+ end do
- ! Divergence using </font>
<font color="gray">abla^2 u
- r_tmp = dvEdge(iEdge) * delsq_u
- delsq_divergence(k, cell1) = delsq_divergence(k,cell1) + r_tmp * invAreaCell1
- delsq_divergence(k, cell2) = delsq_divergence(k,cell2) - r_tmp * invAreaCell2
+ ! Compute delsq_divergence
+ do iCell = 1, nCells
+ invAreaCell1 = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+ do k = 1, maxLevelCell(iCell)
+ delsq_divergence(k, iCell) = delsq_divergence(k, iCell) - edgeSignOnCell(i, iCell) * dvEdge(iEdge) * delsq_u(k, iEdge) * invAreaCell1
+ end do
end do
end do
@@ -209,6 +225,7 @@
end do
end do
+ deallocate(delsq_u)
deallocate(delsq_divergence)
deallocate(delsq_vorticity)
Copied: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_leith.F (from rev 2274, trunk/mpas/src/core_ocean/mpas_ocn_vel_hmix_leith.F)
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_leith.F         (rev 0)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_leith.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -0,0 +1,235 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+! ocn_vel_hmix_leith
+!
+!> \brief Ocean horizontal mixing - Leith parameterization
+!> \author Mark Petersen
+!> \date 22 October 2012
+!> \version SVN:$Id:$
+!> \details
+!> This module contains routines for computing horizontal mixing
+!> tendencies using the Leith parameterization.
+!
+!-----------------------------------------------------------------------
+
+module ocn_vel_hmix_leith
+
+ use mpas_grid_types
+ use mpas_configure
+
+ implicit none
+ private
+ save
+
+ !--------------------------------------------------------------------
+ !
+ ! Public parameters
+ !
+ !--------------------------------------------------------------------
+
+ !--------------------------------------------------------------------
+ !
+ ! Public member functions
+ !
+ !--------------------------------------------------------------------
+
+ public :: ocn_vel_hmix_leith_tend, &
+ ocn_vel_hmix_leith_init
+
+ !-------------------------------------------------------------------
+ !
+ ! Private module variables
+ !
+ !--------------------------------------------------------------------
+
+ logical :: hmixLeithOn !< integer flag to determine whether leith chosen
+
+ real (kind=RKIND) :: &
+ viscVortCoef
+
+!***********************************************************************
+
+contains
+
+!***********************************************************************
+!
+! routine ocn_vel_hmix_leith_tend
+!
+!> \brief Computes tendency term for horizontal momentum mixing with Leith parameterization
+!> \author Mark Petersen, Todd Ringler
+!> \date 22 October 2012
+!> \version SVN:$Id$
+!> \details
+!> This routine computes the horizontal mixing tendency for momentum
+!> based on the Leith closure. The Leith closure is the
+!> enstrophy-cascade analogy to the Smagorinsky (1963) energy-cascade
+!> closure, i.e. Leith (1996) assumes an inertial range of enstrophy flux
+!> moving toward the grid scale. The assumption of an enstrophy cascade
+!> and dimensional analysis produces right-hand-side dissipation,
+!> $\bf{D}$, of velocity of the form
+!> $ {\bf D} = </font>
<font color="black">abla \cdot \left( </font>
<font color="black">u_\ast </font>
<font color="blue">abla {\bf u} \right)
+!> = </font>
<font color="black">abla \cdot \left( \gamma \left| </font>
<font color="blue">abla \omega \right|
+!> \left( \Delta x \right)^3 </font>
<font color="blue">abla \bf{u} \right)
+!> where $\omega$ is the relative vorticity and $\gamma$ is a non-dimensional,
+!> $O(1)$ parameter. We set $\gamma=1$.
+
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_vel_hmix_leith_tend(grid, divergence, vorticity, viscosity, tend, err)!{{{
+
+ !-----------------------------------------------------------------
+ !
+ ! input variables
+ !
+ !-----------------------------------------------------------------
+
+ real (kind=RKIND), dimension(:,:), intent(in) :: &
+ divergence !< Input: velocity divergence
+
+ real (kind=RKIND), dimension(:,:), intent(in) :: &
+ vorticity !< Input: vorticity
+
+ type (mesh_type), intent(in) :: &
+ grid !< Input: grid information
+
+ !-----------------------------------------------------------------
+ !
+ ! input/output variables
+ !
+ !-----------------------------------------------------------------
+
+ real (kind=RKIND), dimension(:,:), intent(inout) :: &
+ tend !< Input/Output: velocity tendency
+
+ real (kind=RKIND), dimension(:,:), intent(inout) :: &
+ viscosity !< Input: viscosity
+
+ !-----------------------------------------------------------------
+ !
+ ! output variables
+ !
+ !-----------------------------------------------------------------
+
+ integer, intent(out) :: err !< Output: error flag
+
+ !-----------------------------------------------------------------
+ !
+ ! local variables
+ !
+ !-----------------------------------------------------------------
+
+ integer :: iEdge, nEdgesSolve, cell1, cell2, vertex1, vertex2, k
+ integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask
+
+ real (kind=RKIND) :: u_diffusion, invLength1, invLength2, visc2
+ real (kind=RKIND), dimension(:), pointer :: meshScaling, &
+ dcEdge, dvEdge
+
+ !-----------------------------------------------------------------
+ !
+ ! exit if this mixing is not selected
+ !
+ !-----------------------------------------------------------------
+
+ err = 0
+
+ if(.not.hmixLeithOn) return
+
+ nEdgesSolve = grid % nEdgesSolve
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ cellsOnEdge => grid % cellsOnEdge % array
+ verticesOnEdge => grid % verticesOnEdge % array
+ meshScaling => grid % meshScaling % array
+ edgeMask => grid % edgeMask % array
+ dcEdge => grid % dcEdge % array
+ dvEdge => grid % dvEdge % array
+
+ do iEdge=1,nEdgesSolve
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ vertex1 = verticesOnEdge(1,iEdge)
+ vertex2 = verticesOnEdge(2,iEdge)
+
+ invLength1 = 1.0 / dcEdge(iEdge)
+ invLength2 = 1.0 / dvEdge(iEdge)
+
+ do k=1,maxLevelEdgeTop(iEdge)
+
+ ! Here -( vorticity(k,vertex2) - vorticity(k,vertex1) ) / dvEdge(iEdge)
+ ! is - </font>
<font color="blue">abla vorticity pointing from vertex 2 to vertex 1, or equivalently
+ ! + k \times </font>
<font color="blue">abla vorticity pointing from cell1 to cell2.
+
+ u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) * invLength1 &
+ -viscVortCoef &
+ *( vorticity(k,vertex2) - vorticity(k,vertex1) ) * invLength2
+
+ ! Here the first line is (\delta x)^3
+ ! the second line is |</font>
<font color="blue">abla \omega|
+ ! and u_diffusion is </font>
<font color="gray">abla^2 u (see formula for $\bf{D}$ above).
+ visc2 = ( config_leith_parameter * config_leith_dx * meshScaling(iEdge) / 3.14)**3 &
+ * abs( vorticity(k,vertex2) - vorticity(k,vertex1) ) * invLength1 * sqrt(3.0)
+ visc2 = min(visc2, config_leith_visc2_max)
+
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * visc2 * u_diffusion
+
+ viscosity(k,iEdge) = viscosity(k,iEdge) + visc2
+
+ end do
+ end do
+
+ !--------------------------------------------------------------------
+
+ end subroutine ocn_vel_hmix_leith_tend!}}}
+
+!***********************************************************************
+!
+! routine ocn_vel_hmix_leith_init
+!
+!> \brief Initializes ocean momentum horizontal mixing with Leith parameterization
+!> \author Mark Petersen
+!> \date 22 October 2012
+!> \version SVN:$Id$
+!> \details
+!> This routine initializes a variety of quantities related to
+!> Leith parameterization for horizontal momentum mixing in the ocean.
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_vel_hmix_leith_init(err)!{{{
+
+
+ integer, intent(out) :: err !< Output: error flag
+
+ !--------------------------------------------------------------------
+ !
+ ! set some local module variables based on input config choices
+ !
+ !--------------------------------------------------------------------
+
+ err = 0
+
+ hmixLeithOn = .false.
+
+ if (config_use_leith_del2) then
+ hmixLeithOn = .true.
+
+ if (config_visc_vorticity_term) then
+ viscVortCoef = config_visc_vorticity_visc2_scale
+ else
+ viscVortCoef = 0.0
+ endif
+
+ endif
+
+ !--------------------------------------------------------------------
+
+ end subroutine ocn_vel_hmix_leith_init!}}}
+
+!***********************************************************************
+
+end module ocn_vel_hmix_leith
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -47,7 +47,8 @@
ocn_tracer_vmix_tend_explicit, &
ocn_vel_vmix_tend_implicit, &
ocn_tracer_vmix_tend_implicit, &
- ocn_vmix_init
+ ocn_vmix_init, &
+ ocn_vmix_implicit
!--------------------------------------------------------------------
!
@@ -288,13 +289,13 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, k, cell1, cell2, nVertLevels
+ integer :: iEdge, nEdges, k, cell1, cell2, nVertLevels, N
integer, dimension(:), pointer :: maxLevelEdgeTop
integer, dimension(:,:), pointer :: cellsOnEdge
- real (kind=RKIND), dimension(:), allocatable :: A, C, uTemp
+ real (kind=RKIND), dimension(:), allocatable :: A, B, C, uTemp
err = 0
@@ -305,43 +306,55 @@
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
- allocate(A(nVertLevels),C(nVertLevels),uTemp(nVertLevels))
+ allocate(A(nVertLevels),B(nVertLevels),C(nVertLevels),uTemp(nVertLevels))
+ A(1)=0
do iEdge=1,nEdges
- if (maxLevelEdgeTop(iEdge).gt.0) then
+ N=maxLevelEdgeTop(iEdge)
+ if (N.gt.0) then
- ! Compute A(k), C(k) for momentum
- ! mrp 110315 efficiency note: for z-level, could precompute
- ! -2.0*dt/(h(k)_h(k+1))/h(k) in setup
- ! h_edge is computed in compute_solve_diag, and is not available yet.
+ ! Compute A(k), B(k), C(k)
+ ! h_edge is computed in compute_solve_diag, and is not available yet,
+ ! so recompute h_edge here.
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
+ do k=1,N
h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
end do
- do k=1,maxLevelEdgeTop(iEdge)-1
- A(k) = -2.0*dt*vertViscTopOfEdge(k+1,iEdge) &
+ ! A is lower diagonal term
+ do k=2,N
+ A(k) = -2.0*dt*vertViscTopOfEdge(k,iEdge) &
+ / (h_edge(k-1,iEdge) + h_edge(k,iEdge)) &
+ / h_edge(k,iEdge)
+ enddo
+
+ ! C is upper diagonal term
+ do k=1,N-1
+ C(k) = -2.0*dt*vertViscTopOfEdge(k+1,iEdge) &
/ (h_edge(k,iEdge) + h_edge(k+1,iEdge)) &
/ h_edge(k,iEdge)
enddo
- A(maxLevelEdgeTop(iEdge)) = -dt*config_bottom_drag_coeff &
- *sqrt(2.0*ke_edge(k,iEdge))/h_edge(k,iEdge)
- C(1) = 1 - A(1)
- do k=2,maxLevelEdgeTop(iEdge)
- C(k) = 1 - A(k) - A(k-1)
+ ! B is diagonal term
+ B(1) = 1 - C(1)
+ do k=2,N-1
+ B(k) = 1 - A(k) - C(k)
enddo
- call tridiagonal_solve(A,C,A,u(:,iEdge),uTemp,maxLevelEdgeTop(iEdge))
+ ! Apply bottom drag boundary condition on the viscous term
+ B(N) = 1 - A(N) + dt*config_bottom_drag_coeff &
+ *sqrt(2.0*ke_edge(k,iEdge))/h_edge(k,iEdge)
- u(1:maxLevelEdgeTop(iEdge),iEdge) = uTemp(1:maxLevelEdgeTop(iEdge))
- u(maxLevelEdgeTop(iEdge)+1:nVertLevels,iEdge) = 0.0
+ call tridiagonal_solve(A(2:N),B,C(1:N-1),u(:,iEdge),uTemp,N)
+ u(1:N,iEdge) = uTemp(1:N)
+ u(N+1:nVertLevels,iEdge) = 0.0
+
end if
end do
- deallocate(A,C,uTemp)
+ deallocate(A,B,C,uTemp)
!--------------------------------------------------------------------
@@ -444,8 +457,7 @@
+ fluxVertTop(iTracer,k) - fluxVertTop(iTracer,k+1)
enddo
enddo
-!print '(a,50e12.2)', 'fluxVertTop',fluxVertTop(3,1:maxLevelCell(iCell)+1)
-!print '(a,50e12.2)', 'tend_tr ',tend_tr(3,1,1:maxLevelCell(iCell))
+
enddo ! iCell loop
deallocate(fluxVertTop)
!--------------------------------------------------------------------
@@ -509,11 +521,11 @@
!
!-----------------------------------------------------------------
- integer :: iCell, nCells, k, nVertLevels, num_tracers
+ integer :: iCell, nCells, k, nVertLevels, num_tracers, N
integer, dimension(:), pointer :: maxLevelCell
- real (kind=RKIND), dimension(:), allocatable :: A, C
+ real (kind=RKIND), dimension(:), allocatable :: A,B,C
real (kind=RKIND), dimension(:,:), allocatable :: tracersTemp
err = 0
@@ -525,32 +537,39 @@
num_tracers = size(tracers, dim=1)
maxLevelCell => grid % maxLevelCell % array
- allocate(A(nVertLevels),C(nVertLevels), tracersTemp(num_tracers,nVertLevels))
+ allocate(A(nVertLevels),B(nVertLevels),C(nVertLevels),tracersTemp(num_tracers,nVertLevels))
do iCell=1,nCells
- ! Compute A(k), C(k) for tracers
- ! mrp 110315 efficiency note: for z-level, could precompute
- ! -2.0*dt/(h(k)_h(k+1))/h(k) in setup
- do k=1,maxLevelCell(iCell)-1
- A(k) = -2.0*dt*vertDiffTopOfCell(k+1,iCell) &
+ ! Compute A(k), B(k), C(k) for tracers
+ N = maxLevelCell(iCell)
+
+ ! A is lower diagonal term
+ A(1)=0
+ do k=2,N
+ A(k) = -2.0*dt*vertDiffTopOfCell(k,iCell) &
+ / (h(k-1,iCell) + h(k,iCell)) / h(k,iCell)
+ enddo
+
+ ! C is upper diagonal term
+ do k=1,N-1
+ C(k) = -2.0*dt*vertDiffTopOfCell(k+1,iCell) &
/ (h(k,iCell) + h(k+1,iCell)) / h(k,iCell)
enddo
+ C(N) = 0.0
- A(maxLevelCell(iCell)) = 0.0
-
- C(1) = 1 - A(1)
- do k=2,maxLevelCell(iCell)
- C(k) = 1 - A(k) - A(k-1)
+ ! B is diagonal term
+ do k=1,N
+ B(k) = 1 - A(k) - C(k)
enddo
- call tridiagonal_solve_mult(A,C,A,tracers(:,:,iCell), &
- tracersTemp, maxLevelCell(iCell), nVertLevels,num_tracers)
+ call tridiagonal_solve_mult(A(2:N),B,C(1:N-1),tracers(:,:,iCell), &
+ tracersTemp, N, nVertLevels,num_tracers)
- tracers(:,1:maxLevelCell(iCell),iCell) = tracersTemp(:,1:maxLevelCell(iCell))
- tracers(:,maxLevelCell(iCell)+1:nVertLevels,iCell) = -1e34
+ tracers(:,1:N,iCell) = tracersTemp(:,1:N)
+ tracers(:,N+1:nVertLevels,iCell) = -1e34
end do
- deallocate(A,C,tracersTemp)
+ deallocate(A,B,C,tracersTemp)
!--------------------------------------------------------------------
@@ -558,6 +577,61 @@
!***********************************************************************
!
+! routine ocn_vmix_implicit
+!
+!> \brief Driver for implicit vertical mixing
+!> \author Doug Jacobsen
+!> \date 19 September 2011
+!> \version SVN:$Id$
+!> \details
+!> This routine is a driver for handling implicit vertical mixing
+!> of both momentum and tracers for a block. It's intended to reduce
+!> redundant code.
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_vmix_implicit(dt, grid, diagnostics, state, err)!{{{
+ real (kind=RKIND), intent(in) :: dt
+ type (mesh_type), intent(in) :: grid
+ type (diagnostics_type), intent(inout) :: diagnostics
+ type (state_type), intent(inout) :: state
+ integer, intent(out) :: err
+
+ integer :: nCells
+ real (kind=RKIND), dimension(:,:), pointer :: u, h, h_edge, vertViscTopOfEdge, vertDiffTopOfCell, ke_edge
+ real (kind=RKIND), dimension(:,:,:), pointer :: tracers
+ integer, dimension(:), pointer :: maxLevelCell
+
+ err = 0
+
+ u => state % u % array
+ tracers => state % tracers % array
+ h => state % h % array
+ h_edge => state % h_edge % array
+ ke_edge => state % ke_edge % array
+ vertViscTopOfEdge => diagnostics % vertViscTopOfEdge % array
+ vertDiffTopOfCell => diagnostics % vertDiffTopOfCell % array
+ maxLevelCell => grid % maxLevelCell % array
+
+ nCells = grid % nCells
+
+ call ocn_vmix_coefs(grid, state, diagnostics, err)
+
+ !
+ ! Implicit vertical solve for momentum
+ !
+ call ocn_vel_vmix_tend_implicit(grid, dt, ke_edge, vertViscTopOfEdge, h, h_edge, u, err)
+
+ !
+ ! Implicit vertical solve for tracers
+ !
+
+ call ocn_tracer_vmix_tend_implicit(grid, dt, vertDiffTopOfCell, h, tracers, err)
+
+ end subroutine ocn_vmix_implicit!}}}
+
+!***********************************************************************
+!
! routine ocn_vmix_init
!
!> \brief Initializes ocean vertical mixing quantities
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -327,7 +327,7 @@
maxLevelCell => grid % maxLevelCell % array
vertDiffTopOfCell = 0.0
- coef = -gravity/1000.0/2.0
+ coef = -gravity/config_rho0/2.0
do iCell = 1,nCells
do k = 2,maxLevelCell(iCell)
! mrp 110324 efficiency note: this if is inside iCell and k loops.
@@ -427,13 +427,13 @@
!
!-----------------------------------------------------------------
- integer :: nVertLevels, nCells, nEdges, iCell, iEdge, k
+ integer :: nVertLevels, nCells, nEdges, iCell, iEdge, k, i
integer :: cell1, cell2
- integer, dimension(:), pointer :: maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot
- integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:), pointer :: maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgesOncell, edgeSignOnCell
- real (kind=RKIND) :: coef
+ real (kind=RKIND) :: coef, invAreaCell
real (kind=RKIND), dimension(:), pointer :: dcEdge, dvEdge, areaCell
real (kind=RKIND), dimension(:,:), allocatable :: drhoTopOfCell, du2TopOfCell, &
drhoTopOfEdge, du2TopOfEdge
@@ -453,6 +453,9 @@
dvEdge => grid % dvEdge % array
dcEdge => grid % dcEdge % array
areaCell => grid % areaCell % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ edgesOnCell => grid % edgesOnCell % array
+ edgeSignOnCell => grid % edgeSignOnCell % array
allocate( &
drhoTopOfCell(nVertLevels+1,nCells+1), drhoTopOfEdge(nVertLevels+1,nEdges), &
@@ -472,7 +475,7 @@
drhoTopOfCell = 0.0
do iCell=1,nCells
do k=2,maxLevelCell(iCell)
- drhoTopOfCell(k,iCell) = rho(k-1,iCell) - rhoDisplaced(k-1,iCell)
+ drhoTopOfCell(k,iCell) = rhoDisplaced(k-1,iCell) - rhoDisplaced(k,iCell)
end do
end do
@@ -498,26 +501,21 @@
! interpolate du2TopOfEdge to du2TopOfCell
du2TopOfCell = 0.0
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=2,maxLevelEdgeBot(iEdge)
- du2TopOfCell(k,cell1) = du2TopOfCell(k,cell1) &
- + 0.5 * dcEdge(iEdge) * dvEdge(iEdge) * du2TopOfEdge(k,iEdge)
- du2TopOfCell(k,cell2) = du2TopOfCell(k,cell2) &
- + 0.5 * dcEdge(iEdge) * dvEdge(iEdge) * du2TopOfEdge(k,iEdge)
- end do
+ do iCell = 1, nCells
+ invAreaCell = 1.0 / areaCell(iCell)
+ do i = 1, nEdgesOnCell(iCell)
+ iEdge = edgesOnCell(i, iCell)
+
+ do k = 2, maxLevelEdgeBot(iEdge)
+ du2TopOfCell(k, iCell) = du2TopOfCell(k, iCell) + 0.5 * dcEdge(iEdge) * dvEdge(iEdge) * du2TopOfEdge(k, iEdge) * invAreaCell
+ end do
+ end do
end do
- do iCell = 1,nCells
- do k = 2,maxLevelCell(iCell)
- du2TopOfCell(k,iCell) = du2TopOfCell(k,iCell) / areaCell(iCell)
- end do
- end do
! compute RiTopOfEdge using drhoTopOfEdge and du2TopOfEdge
! coef = -g/rho_0/2
RiTopOfEdge = 0.0
- coef = -gravity/1000.0/2.0
+ coef = -gravity/config_rho0/2.0
do iEdge = 1,nEdges
do k = 2,maxLevelEdgeTop(iEdge)
RiTopOfEdge(k,iEdge) = coef*drhoTopOfEdge(k,iEdge) &
@@ -529,7 +527,6 @@
! compute RiTopOfCell using drhoTopOfCell and du2TopOfCell
! coef = -g/rho_0/2
RiTopOfCell = 0.0
- coef = -gravity/1000.0/2.0
do iCell = 1,nCells
do k = 2,maxLevelCell(iCell)
RiTopOfCell(k,iCell) = coef*drhoTopOfCell(k,iCell) &
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -177,22 +177,22 @@
integer :: k, nVertLevels
- real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
+ real (kind=RKIND), dimension(:), pointer :: refBottomDepth
err = 0
if(.not.tanhViscOn) return
nVertLevels = grid % nVertLevels
- referenceBottomDepth => grid % referenceBottomDepth % array
+ refBottomDepth => grid % refBottomDepth % array
- ! referenceBottomDepth is used here for simplicity. Using zMid and h, which
+ ! refBottomDepth is used here for simplicity. Using zMid and h, which
! vary in time, would give the exact location of the top, but it
! would only change the diffusion value very slightly.
vertViscTopOfEdge = 0.0
do k=2,nVertLevels
vertViscTopOfEdge(k,:) = -(config_max_visc_tanh-config_min_visc_tanh)/2.0 &
- *tanh((referenceBottomDepth(k-1)+config_ZMid_tanh) &
+ *tanh((refBottomDepth(k-1)+config_ZMid_tanh) &
/config_zWidth_tanh) &
+ (config_max_visc_tanh+config_min_visc_tanh)/2
end do
@@ -250,22 +250,22 @@
integer :: k, nVertLevels
- real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
+ real (kind=RKIND), dimension(:), pointer :: refBottomDepth
err = 0
if(.not.tanhDiffOn) return
nVertLevels = grid % nVertLevels
- referenceBottomDepth => grid % referenceBottomDepth % array
+ refBottomDepth => grid % refBottomDepth % array
- ! referenceBottomDepth is used here for simplicity. Using zMid and h, which
+ ! refBottomDepth is used here for simplicity. Using zMid and h, which
! vary in time, would give the exact location of the top, but it
! would only change the diffusion value very slightly.
vertDiffTopOfCell = 0.0
do k=2,nVertLevels
vertDiffTopOfCell(k,:) = -(config_max_diff_tanh-config_min_diff_tanh)/2.0 &
- *tanh((referenceBottomDepth(k-1)+config_ZMid_tanh) &
+ *tanh((refBottomDepth(k-1)+config_ZMid_tanh) &
/config_zWidth_tanh) &
+ (config_max_diff_tanh+config_min_diff_tanh)/2
end do
Modified: branches/atmos_physics/src/core_sw/Registry
===================================================================
--- branches/atmos_physics/src/core_sw/Registry        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_sw/Registry        2012-10-27 00:29:44 UTC (rev 2281)
@@ -21,6 +21,7 @@
namelist logical sw_model config_wind_stress false
namelist logical sw_model config_bottom_drag false
namelist real sw_model config_apvm_upwinding 0.5
+namelist integer sw_model config_num_halos 2
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
Modified: branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_sw/mpas_sw_mpas_core.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -187,7 +187,11 @@
call mpas_timer_stop("time integration")
! Move time level 2 fields back into time level 1 for next time step
- call mpas_shift_time_levels_state(domain % blocklist % state)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ call mpas_shift_time_levels_state(block_ptr % state)
+ block_ptr => block_ptr % next
+ end do
!TODO: mpas_get_clock_ringing_alarms is probably faster than multiple mpas_is_alarm_ringing...
Modified: branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/core_sw/mpas_sw_time_integration.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -70,122 +70,114 @@
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
- block => domain % blocklist
- call mpas_allocate_state(block, provis, &
- block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
- block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, &
- block % mesh % nTracers)
-
- provis_ptr => provis
- call mpas_create_state_links(provis_ptr)
+ call mpas_setup_provis_states(domain % blocklist)
+
+ !
+ ! Initialize time_levs(2) with state at current time
+ ! Initialize first RK state
+ ! Couple tracers time_levs(2) with h in time-levels
+ ! Initialize RK weights
+ !
+ block => domain % blocklist
+ do while (associated(block))
- !
- ! Initialize time_levs(2) with state at current time
- ! Initialize first RK state
- ! Couple tracers time_levs(2) with h in time-levels
- ! Initialize RK weights
- !
- block => domain % blocklist
- do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ do iCell=1,block % mesh % nCells ! couple tracers to h
+ do k=1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell)
+ end do
+ end do
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
- do iCell=1,block % mesh % nCells ! couple tracers to h
- do k=1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell)
- end do
- end do
+ call mpas_copy_state(block % provis, block % state % time_levs(1) % state)
- call mpas_copy_state(provis, block % state % time_levs(1) % state)
+ block => block % next
+ end do
- block => block % next
- end do
+ rk_weights(1) = dt/6.
+ rk_weights(2) = dt/3.
+ rk_weights(3) = dt/3.
+ rk_weights(4) = dt/6.
- rk_weights(1) = dt/6.
- rk_weights(2) = dt/3.
- rk_weights(3) = dt/3.
- rk_weights(4) = dt/6.
+ rk_substep_weights(1) = dt/2.
+ rk_substep_weights(2) = dt/2.
+ rk_substep_weights(3) = dt
+ rk_substep_weights(4) = 0.
- rk_substep_weights(1) = dt/2.
- rk_substep_weights(2) = dt/2.
- rk_substep_weights(3) = dt
- rk_substep_weights(4) = 0.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! BEGIN RK loop
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do rk_step = 1, 4
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! BEGIN RK loop
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do rk_step = 1, 4
+! --- update halos for diagnostic variables
-! --- update halos for diagnostic variables
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % pv_edge)
- call mpas_dmpar_exch_halo_field(provis % pv_edge)
-
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field(provis % divergence)
- call mpas_dmpar_exch_halo_field(provis % vorticity)
- end if
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % vorticity)
+ end if
-! --- compute tendencies
+! --- compute tendencies
- block => domain % blocklist
- do while (associated(block))
- call sw_compute_tend(block % tend, provis, block % mesh)
- call sw_compute_scalar_tend(block % tend, provis, block % mesh)
- call sw_enforce_boundary_edge(block % tend, block % mesh)
- block => block % next
- end do
+ block => domain % blocklist
+ do while (associated(block))
+ call sw_compute_tend(block % tend, block % provis, block % mesh)
+ call sw_compute_scalar_tend(block % tend, block % provis, block % mesh)
+ call sw_enforce_boundary_edge(block % tend, block % mesh)
+ block => block % next
+ end do
-! --- update halos for prognostic variables
+! --- update halos for prognostic variables
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
-! --- compute next substep state
+! --- compute next substep state
- if (rk_step < 4) then
- block => domain % blocklist
- do while (associated(block))
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
- do iCell=1,block % mesh % nCells
- do k=1,block % mesh % nVertLevels
- provis % tracers % array(:,k,iCell) = ( &
- block % state % time_levs(1) % state % h % array(k,iCell) * &
- block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
- ) / provis % h % array(k,iCell)
- end do
- end do
- if (config_test_case == 1) then ! For case 1, wind field should be fixed
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- end if
- call sw_compute_solve_diagnostics(dt, provis, block % mesh)
- block => block % next
- end do
- end if
+ if (rk_step < 4) then
+ block => domain % blocklist
+ do while (associated(block))
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % nVertLevels
+ block % provis % tracers % array(:,k,iCell) = ( block % state % time_levs(1) % state % h % array(k,iCell) * &
+ block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
+ ) / block % provis % h % array(k,iCell)
+ end do
+ end do
+ if (config_test_case == 1) then ! For case 1, wind field should be fixed
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ end if
+ call sw_compute_solve_diagnostics(dt, block % provis, block % mesh)
+ block => block % next
+ end do
+ end if
!--- accumulate update (for RK4)
- block => domain % blocklist
- do while (associated(block))
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % u % array(:,:) &
- + rk_weights(rk_step) * block % tend % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(2) % state % h % array(:,:) &
- + rk_weights(rk_step) * block % tend % h % array(:,:)
- do iCell=1,block % mesh % nCells
- do k=1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
- + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
- end do
- end do
- block => block % next
- end do
+ block => domain % blocklist
+ do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % u % array(:,:) &
+ + rk_weights(rk_step) * block % tend % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(2) % state % h % array(:,:) &
+ + rk_weights(rk_step) * block % tend % h % array(:,:)
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
+ + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
+ end do
+ end do
+ block => block % next
+ end do
end do
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -223,7 +215,7 @@
block => block % next
end do
- call mpas_deallocate_state(provis)
+ call mpas_deallocate_provis_states(domain % blocklist)
end subroutine sw_rk4
Modified: branches/atmos_physics/src/framework/Makefile
===================================================================
--- branches/atmos_physics/src/framework/Makefile        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/Makefile        2012-10-27 00:29:44 UTC (rev 2281)
@@ -16,6 +16,7 @@
mpas_hash.o \
mpas_sort.o \
mpas_block_decomp.o \
+         mpas_block_creator.o \
mpas_dmpar.o \
mpas_io.o \
mpas_io_streams.o \
@@ -41,7 +42,7 @@
mpas_grid_types.o: mpas_kind_types.o mpas_dmpar_types.o mpas_attlist.o
-mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o
+mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o mpas_hash.o
mpas_sort.o: mpas_kind_types.o
@@ -51,11 +52,13 @@
mpas_block_decomp.o: mpas_grid_types.o mpas_hash.o mpas_configure.o
+mpas_block_creator.o: mpas_dmpar.o mpas_hash.o mpas_sort.o mpas_configure.o
+
mpas_io.o: mpas_dmpar_types.o
mpas_io_streams.o: mpas_attlist.o mpas_grid_types.o mpas_timekeeping.o mpas_io.o
-mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o $(ZOLTANOBJ)
+mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_block_creator.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o $(ZOLTANOBJ)
mpas_io_output.o: mpas_grid_types.o mpas_dmpar.o mpas_sort.o mpas_configure.o mpas_io_streams.o
Copied: branches/atmos_physics/src/framework/mpas_block_creator.F (from rev 2274, trunk/mpas/src/framework/mpas_block_creator.F)
===================================================================
--- branches/atmos_physics/src/framework/mpas_block_creator.F         (rev 0)
+++ branches/atmos_physics/src/framework/mpas_block_creator.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -0,0 +1,1243 @@
+!***********************************************************************
+!
+! mpas_block_creator
+!
+!> \brief This module is responsible for the intial creation and setup of the block data structures.
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This module provides routines for the creation of blocks, with both an
+!> arbitrary number of blocks per processor and an arbitrary number of halos for
+!> each block. The provided routines also setup the exchange lists for each
+!> block.
+!
+!-----------------------------------------------------------------------
+
+module mpas_block_creator
+
+ use mpas_dmpar
+ use mpas_dmpar_types
+ use mpas_block_decomp
+ use mpas_hash
+ use mpas_sort
+ use mpas_grid_types
+ use mpas_configure
+
+ contains
+
+!***********************************************************************
+!
+! routine mpas_block_creator_setup_blocks_and_0halo_cells
+!
+!> \brief Initializes the list of blocks, and determines 0 halo cell indices.
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine sets up the linked list of blocks, and creates the
+!> indexToCellID field for the 0 halo. The information required to setup these
+!> structures is provided as input in cellList, blockID, blockStart, and
+!> blockCount.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_setup_blocks_and_0halo_cells(domain, indexToCellID, cellList, blockID, blockStart, blockCount)!{{{
+ type (domain_type), pointer :: domain !< Input: Domain information
+ type (field1dInteger), pointer :: indexToCellID !< Input/Output: indexToCellID field
+ integer, dimension(:), intent(in) :: cellList !< Input: List of cell indices owned by this processor
+ integer, dimension(:), intent(in) :: blockID !< Input: List of block indices owned by this processor
+ integer, dimension(:), intent(in) :: blockStart !< Input: Indices of starting cell id in cellList for each block
+ integer, dimension(:), intent(in) :: blockCount !< Input: Number of cells from cellList owned by each block.
+
+ integer :: nHalos
+ type (block_type), pointer :: blockCursor
+ type (field1dInteger), pointer :: fieldCursor
+
+ integer :: i, iHalo
+ integer :: nBlocks
+
+ nBlocks = size(blockID)
+ nHalos = config_num_halos
+
+ ! Setup first block
+ allocate(domain % blocklist)
+ nullify(domain % blocklist % prev)
+ nullify(domain % blocklist % next)
+
+ ! Setup first block field
+ allocate(indexToCellID)
+ nullify(indexToCellID % next)
+
+ ! Loop over blocks
+ blockCursor => domain % blocklist
+ fieldCursor => indexToCellID
+ do i = 1, nBlocks
+ ! Initialize block information
+ blockCursor % blockID = blockID(i)
+ blockCursor % localBlockID = i - 1
+ blockCursor % domain => domain
+
+ ! Link to block, and setup array size
+ fieldCursor % block => blockCursor
+ fieldCursor % dimSizes(1) = blockCount(i)
+ nullify(fieldCursor % ioinfo)
+
+ ! Initialize exchange lists
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % sendList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % recvList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % copyList, nHalos)
+
+ ! Allocate array, and copy indices into array
+ allocate(fieldCursor % array(fieldCursor % dimSizes(1)))
+ fieldCursor % array(:) = cellList(blockStart(i)+1:blockStart(i)+blockCount(i))
+ call mpas_quicksort(fieldCursor % dimSizes(1), fieldCursor % array)
+
+ ! Advance cursors, and create new blocks as needed
+ if(i < nBlocks) then
+ allocate(blockCursor % next)
+ allocate(fieldCursor % next)
+
+ blockCursor % next % prev => blockCursor
+
+ blockCursor => blockCursor % next
+ fieldCursor => fieldCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(blockCursor % next)
+ nullify(fieldCursor % next)
+ end do
+ end subroutine mpas_block_creator_setup_blocks_and_0halo_cells!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_0halo_cell_fields
+!
+!> \brief Initializes 0 halo cell based fields requried to work out halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell field, and the blocks of
+!> data read in by other routhers to determine all of the connectivity for the 0
+!> halo cell fields on all blocks on a processor.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_0halo_cell_fields(indexToCellIDBlock, nEdgesOnCellBlock, cellsOnCellBlock, verticesOnCellBlock, edgesOnCellBlock, indexToCellID_0Halo, nEdgesOnCell_0Halo, cellsOnCell_0Halo, verticesOnCell_0Halo, edgesOnCell_0Halo)!{{{
+ type(field1dInteger), pointer :: indexToCellIDBlock !< Input: Block of read in indexToCellID field
+ type(field1dInteger), pointer :: nEdgesOnCellBlock !< Input: Block of read in nEdgesOnCell field
+ type(field2dInteger), pointer :: cellsOnCellBlock !< Input: Block of read in cellsOnCell field
+ type(field2dInteger), pointer :: verticesOnCellBlock !< Input: Block of read in verticesOnCell field
+ type(field2dInteger), pointer :: edgesOnCellBlock !< Input: Block of read in edgesOnCellField
+
+ type(field1dInteger), pointer :: indexToCellID_0Halo !< Input: 0-Halo indices for indexToCellID field
+ type(field1dInteger), pointer :: nEdgesOnCell_0Halo !< Output: nEdgesOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: cellsOnCell_0Halo !< Output: cellsOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: verticesOnCell_0Halo !< Output: verticesOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: edgesOnCell_0Halo !< Output: edgesOnCell field for 0-Halo cells
+
+ type(field1dInteger), pointer :: indexCursor, nEdgesCursor
+ type(field2dInteger), pointer :: cellsOnCellCursor, verticesOnCellCursor, edgesOnCellCursor
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer :: nCellsInBlock, maxEdges, nHalos
+ integer :: i, iHalo
+
+ nHalos = config_num_halos
+
+ ! Only sending from halo layer 1 for setup
+ allocate(sendingHaloLayers(1))
+ sendingHaloLayers(1) = 1
+
+ maxEdges = cellsOnCellBlock % dimSizes(1)
+
+ ! Build exchange list from the block of read in data to each block's index fields.
+ call mpas_dmpar_get_exch_list(1, indexToCellIDBlock, indexToCellID_0Halo)
+
+ ! Setup header fields if at least 1 block exists
+ allocate(nEdgesOnCell_0Halo)
+ nullify(nEdgesOncell_0Halo % next)
+
+ allocate(cellsOnCell_0Halo)
+ nullify(cellsOnCell_0Halo % next)
+
+ allocate(verticesOnCell_0Halo)
+ nullify(verticesOnCell_0Halo % next)
+
+ allocate(edgesOnCell_0Halo)
+ nullify(edgesOnCell_0Halo % next)
+
+ ! Loop over blocks
+ indexCursor => indexToCellID_0Halo
+ nEdgesCursor => nEdgesOnCell_0Halo
+ cellsOnCellCursor => cellsOnCell_0Halo
+ verticesOnCellCursor => verticesOnCell_0Halo
+ edgesOnCellCursor => edgesOnCell_0Halo
+ do while(associated(indexCursor))
+ nCellsInBlock = indexCursor % dimSizes(1)
+
+ ! Link to block structure
+ nEdgesCursor % block => indexCursor % block
+ cellsOnCellCursor % block => indexCursor % block
+ verticesOnCellCursor % block => indexCursor % block
+ edgesOnCellCursor % block => indexCursor % block
+
+ ! Nullify ioinfo, since this data is not read in
+ nullify(nEdgesCursor % ioinfo)
+ nullify(cellsOnCellCursor % ioinfo)
+ nullify(verticesOnCellCursor % ioinfo)
+ nullify(edgesOnCellCursor % ioinfo)
+
+ ! Setup array sizes
+ nEdgesCursor % dimSizes(1) = nCellsInBlock
+ cellsOnCellCursor % dimSizes(1) = maxEdges
+ cellsOnCellCursor % dimSizes(2) = nCellsInBlock
+ verticesOnCellCursor % dimSizes(1) = maxEdges
+ verticesOnCellCursor % dimSizes(2) = nCellsInBlock
+ edgesOnCellCursor % dimSizes(1) = maxEdges
+ edgesOnCellCursor % dimSizes(2) = nCellsInBlock
+
+ ! Link exchange lists
+ nEdgesCursor % sendList => indexCursor % sendList
+ nEdgesCursor % recvList => indexCursor % recvList
+ nEdgesCursor % copyList => indexCursor % copyList
+ cellsOnCellCursor % sendList => indexCursor % sendList
+ cellsOnCellCursor % recvList => indexCursor % recvList
+ cellsOnCellCursor % copyList => indexCursor % copyList
+ verticesOnCellCursor % sendList => indexCursor % sendList
+ verticesOnCellCursor % recvList => indexCursor % recvList
+ verticesOnCellCursor % copyList => indexCursor % copyList
+ edgesOnCellCursor % sendList => indexCursor % sendList
+ edgesOnCellCursor % recvList => indexCursor % recvList
+ edgesOnCellCursor % copyList => indexCursor % copyList
+
+ ! Allocate arrays
+ allocate(nEdgesCursor % array(nEdgesCursor % dimSizes(1)))
+ allocate(cellsOnCellCursor % array(cellsOnCellCursor % dimSizes(1), cellsOnCellCursor % dimSizes(2)))
+ allocate(verticesOnCellCursor % array(verticesOnCellCursor % dimSizes(1), verticesOnCellCursor % dimSizes(2)))
+ allocate(edgesOnCellCursor % array(edgesOnCellCursor % dimSizes(1), edgesOnCellCursor % dimSizes(2)))
+
+ ! Create new blocks and advance cursors as needed
+ indexCursor => indexCursor % next
+ if(associated(indexCursor)) then
+ allocate(nEdgesCursor % next)
+ allocate(cellsOnCellCursor % next)
+ allocate(verticesOnCellCursor % next)
+ allocate(edgesOnCellCursor % next)
+
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+
+ end if
+
+ ! Nullify next pointers
+ nullify(nEdgesCursor % next)
+ nullify(cellsOnCellCursor % next)
+ nullify(verticesOnCellCursor % next)
+ nullify(edgesOnCellCursor % next)
+ end do ! indexCursor loop over blocks
+
+ ! Communicate data from read in blocks to each block's fields
+ call mpas_dmpar_alltoall_field(nEdgesOnCellBlock, nEdgesOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(cellsOnCellBlock, cellsOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(verticesOnCellBlock, verticesOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(edgesOnCellBlock, edgesOnCell_0Halo, sendingHaloLayers)
+ end subroutine mpas_block_creator_build_0halo_cell_fields!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_0_and_1halo_edge_fields
+!
+!> \brief Initializes 0 and 1 halo edge based fields requried to work out halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell fields, and the blocks of
+!> data read in by other routhers to determine which edges are in a blocks
+!> 0 and 1 halo for all blocks on a processor.
+!> NOTE: This routine can be used on either edges or edges
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_0_and_1halo_edge_fields(indexToEdgeIDBlock, cellsOnEdgeBlock, indexToCellID_0Halo, nEdgesOnCell_0Halo, edgesOnCell_0Halo, indexToEdgeID_0Halo, cellsOnEdge_0Halo, nEdgesSolve)!{{{
+ type (field1dInteger), pointer :: indexToEdgeIDBlock !< Input: indexToEdgeID read in field
+ type (field2dInteger), pointer :: cellsOnEdgeBlock !< Input: cellsOnEdge read in field
+ type (field1dInteger), pointer :: indexToCellID_0Halo !< Input: indexToCellID field on 0 halo
+ type (field1dInteger), pointer :: nEdgesOnCell_0Halo !< Input: nEdgesOnCell field on 0 halo
+ type (field2dInteger), pointer :: edgesOnCell_0Halo !< Input: edgesOnCell field on 0 and 1 halos
+ type (field1dInteger), pointer :: indexToEdgeID_0Halo !< Output: indexToEdgeID field on 0 and 1 halos
+ type (field2dInteger), pointer :: cellsOnEdge_0Halo !< Output: CellsOnEdge field on 0 and 1 halos
+ type (field1dInteger), pointer :: nEdgesSolve !< Output: Array with max index to edges in halos
+
+ type (field0dInteger), pointer :: offSetField, edgeLimitField
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, edgeLimitCursor
+ type (field1dInteger), pointer :: indexToCellCursor, indexToEdgeCursor, nEdgesCursor, haloCursor, nEdgesSolveCursor
+ type (field2dInteger), pointer :: edgesOnCellCursor, cellsOnEdgeCursor, cellsOnCellCursor
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer, dimension(:), pointer :: localEdgeList
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer :: nEdgesLocal, nCellsInBlock, maxEdges, edgeDegree, nHalos
+ integer :: haloStart
+ integer :: iBlock, i, j, k
+
+ ! Setup sendingHaloLayers
+ allocate(sendingHaloLayers(1))
+ sendingHaloLayers(1) = 1
+
+ ! Get dimension information
+ maxEdges = edgesOnCell_0Halo % dimSizes(1)
+ edgeDegree = cellsOnEdgeBlock % dimSizes(1)
+ nHalos = config_num_halos
+
+ ! Setup initial block for each field
+ allocate(cellsOnEdge_0Halo)
+ allocate(indexToEdgeID_0Halo)
+
+ nullify(cellsOnEdge_0Halo % next)
+ nullify(indexToEdgeID_0Halo % next)
+
+ ! Loop over blocks
+ indexToCellCursor => indexToCellID_0Halo
+ edgesOnCellCursor => edgesOnCell_0Halo
+ nEdgesCursor => nEdgesOnCell_0Halo
+ indexToEdgeCursor => indexToEdgeID_0Halo
+ cellsOnEdgeCursor => cellsOnEdge_0Halo
+ do while(associated(indexToCellCursor))
+ ! Determine number of cells in block
+ nCellsInBlock = indexToCellCursor % dimSizes(1)
+
+ ! Determine all edges in block
+ call mpas_block_decomp_all_edges_in_block(maxEdges, nCellsInBlock, nEdgesCursor % array, edgesOnCellCursor % array, nEdgesLocal, localEdgeList)
+
+ ! Setup indexToEdge block
+ indexToEdgeCursor % block => indexToCellCursor % block
+ nullify(indexToEdgeCursor % ioinfo)
+ indexToEdgeCursor % dimSizes(1) = nEdgesLocal
+ allocate(indexToEdgeCursor % array(indexToEdgeCursor % dimSizes(1)))
+ indexToEdgeCursor % array(:) = localEdgeList(:)
+
+ ! Setup cellsOnEdge block
+ cellsOnEdgeCursor % block => indexToCellCursor % block
+ nullify(cellsOnEdgeCursor % ioinfo)
+ cellsOnEdgeCursor % dimSizes(1) = edgeDegree
+ cellsOnEdgeCursor % dimSizes(2) = nEdgesLocal
+ allocate(cellsOnEdgeCursor % array(cellsOnEdgeCursor % dimSizes(1), cellsOnEdgeCursor % dimSizes(2)))
+
+ ! Setup exchange lists
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % copyList, nHalos+1)
+
+ ! Link exchange lists
+ cellsOnEdgeCursor % sendList => indexToEdgeCursor % sendList
+ cellsOnEdgeCursor % recvList => indexToEdgeCursor % recvList
+ cellsOnEdgeCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Remove localEdgeList array
+ deallocate(localEdgeList)
+
+ ! Advance cursors, and create new blocks if needed
+ indexToCellCursor => indexToCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ nEdgescursor => nEdgesCursor % next
+ if(associated(indexToCellCursor)) then
+ allocate(indexToEdgeCursor % next)
+ indexToEdgeCursor => indexToEdgeCursor % next
+
+ allocate(cellsOnEdgeCursor % next)
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(indexToEdgeCursor % next)
+ nullify(cellsOnEdgeCursor % next)
+ end do ! indexToCursor loop over blocks
+
+ ! Build exchangel ists from read in blocks to owned blocks.
+ call mpas_dmpar_get_exch_list(1, indexToEdgeIDBlock, indexToEdgeID_0Halo)
+
+ ! Perform all to all to get owned block data
+ call mpas_dmpar_alltoall_field(cellsOnEdgeBlock, cellsOnEdge_0Halo, sendingHaloLayers)
+
+ ! Setup first block's fields if there is at least 1 block.
+ if(associated(indexToEdgeID_0Halo)) then
+ allocate(haloIndices)
+ allocate(offSetField)
+ allocate(edgeLimitField)
+ allocate(nEdgesSolve)
+ else
+ nullify(haloIndices)
+ nullify(offSetField)
+ nullify(edgeLimitField)
+ nullify(nEdgesSolve)
+ end if
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID_0Halo
+ cellsOnEdgeCursor => cellsOnEdge_0Halo
+ indexToCellCursor => indexToCellID_0Halo
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ edgeLimitCursor => edgeLimitField
+ nEdgesSolveCursor => nEdgesSolve
+ do while(associated(indexToEdgeCursor))
+ ! Determine 0 and 1 halo edges
+ call mpas_block_decomp_partitioned_edge_list(indexToCellCursor % dimSizes(1), indexToCellCursor % array, &
+ edgeDegree, indexToEdgeCursor % dimSizes(1), cellsOnEdgeCursor % array, &
+ indexToEdgeCursor % array, haloStart)
+
+ ! Link blocks
+ haloCursor % block => indexToEdgeCursor % block
+ offSetCursor % block => indexToEdgeCursor % block
+ edgeLimitCursor % block => indexToEdgeCursor % block
+ nEdgesSolveCursor % block => indexToEdgeCursor % block
+
+ ! Nullify io info
+ nullify(haloCursor % ioinfo)
+ nullify(offSetCursor % ioinfo)
+ nullify(edgeLimitCursor % ioinfo)
+ nullify(nEdgesSolveCursor % ioinfo)
+
+ ! Setup haloIndices
+ haloCursor % dimSizes(1) = indexToEdgeCursor % dimSizes(1) - (haloStart-1)
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+ haloCursor % array(:) = indexToEdgeCursor % array(haloStart:indexToEdgeCursor % dimSizes(1))
+
+ ! Link exchange lists
+ haloCursor % sendList => indexToEdgeCursor % sendList
+ haloCursor % recvList => indexToEdgeCursor % recvList
+ haloCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Determine offSet and limit on 0 halo edges for exchange list creation
+ offSetCursor % scalar = haloStart - 1
+ edgeLimitCursor % scalar = haloStart - 1
+
+ ! Setup nEdgesSolve
+ nEdgesSolveCursor % dimSizes(1) = nHalos+2
+ allocate(nEdgesSolveCursor % array(nEdgesSolve % dimSizes(1)))
+ nEdgesSolveCursor % array = -1
+ nEdgesSolveCursor % array(1) = haloStart - 1
+ nEdgesSolveCursor % array(2) = indexToEdgeCursor % dimSizes(1)
+
+ ! Advance cursors, and create new blocks if needed
+ indexToEdgeCursor => indexToEdgeCursor % next
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ indexToCellCursor => indexToCellCursor % next
+ if(associateD(indexToEdgeCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+
+ allocate(offSetcursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(edgeLimitCursor % next)
+ edgeLimitCursor => edgeLimitCursor % next
+
+ allocate(nEdgesSolveCursor % next)
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(haloCursor % next)
+ nullify(offSetCursor % next)
+ nullify(edgeLimitCursor % next)
+ nullify(nEdgesSolveCursor % next)
+ end do
+
+ ! Create exchange lists from 0 halo to 1 haloedges
+ call mpas_dmpar_get_exch_list(1, indexToEdgeID_0Halo, haloIndices, offSetField, edgeLimitField)
+
+ ! Deallocate fields that are not needed anymore.
+ call mpas_deallocate_field(haloIndices)
+ call mpas_deallocate_field(offSetField)
+ call mpas_deallocate_field(edgeLimitCursor)
+ deallocate(sendingHaloLayers)
+
+ end subroutine mpas_block_creator_build_0_and_1halo_edge_fields!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_cell_halos
+!
+!> \brief Builds cell halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell fields to determine
+!> which cells fall in each halo layer for a block. During this process, each
+!> halo's exchange lists are created. This process is performed for all blocks on
+!> a processor.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_cell_halos(indexToCellID, nEdgesOnCell, cellsOnCell, verticesOnCell, edgesOnCell, nCellsSolve)!{{{
+ type (field1dInteger), pointer :: indexToCellID !< Input/Output: indexToCellID field for all halos
+ type (field1dInteger), pointer :: nEdgesOnCell !< Input/Output: nEdgesOnCell field for all halos
+ type (field2dInteger), pointer :: cellsOnCell !< Input/Output: cellsOnCell field for all halos
+ type (field2dInteger), pointer :: verticesOnCell !< Input/Output: verticesOnCell field for all halos
+ type (field2dInteger), pointer :: edgesOnCell !< Input/Output: edgesOnCell field for all halos
+ type (field1dInteger), pointer :: nCellsSolve !< Output: Field with indices to end of each halo
+
+ type (dm_info), pointer :: dminfo
+
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, cellLimitCursor
+ type (field1dInteger), pointer :: indexCursor, nEdgesCursor, haloCursor, nCellsSolveCursor
+ type (field2dInteger), pointer :: cellsOnCellCursor, verticesOnCellCursor, edgesOnCellCursor
+
+ type (field0dInteger), pointer :: offSetField
+ type (field0dInteger), pointer :: cellLimitField
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer, dimension(:), pointer :: field1dArrayHolder
+ integer, dimension(:,:), pointer :: field2dArrayHolder
+
+ type (graph), pointer :: blockGraph, blockGraphWithHalo
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer :: nHalos, nCellsInBlock, nCellsInHalo, maxEdges
+ integer :: iHalo, iBlock, i
+
+ nHalos = config_num_halos
+ dminfo => indexToCellID % block % domain % dminfo
+ allocate(sendingHaloLayers(1))
+
+ ! Setup header fields
+ allocate(nCellsSolve)
+ allocate(cellLimitField)
+ allocate(offSetField)
+
+ nullify(nCellsSolve % next)
+ nullify(cellLimitField % next)
+ nullify(offSetField % next)
+
+ ! Loop over blocks
+ offSetCursor => offsetField
+ cellLimitCursor => cellLimitField
+ indexCursor => indexToCellID
+ nCellsSolveCursor => nCellsSolve
+ do while (associated(indexCursor))
+ ! Setup offset
+ offSetCursor % scalar = indexCursor % dimSizes(1)
+ offSetCursor % block => indexCursor % block
+ nullify(offSetCursor % ioinfo)
+
+ ! Setup nCellsSolve
+ nCellsSolveCursor % dimSizes(1) = nHalos+1
+ allocate(nCellsSolveCursor % array(nCellsSolveCursor % dimSizes(1)))
+ nCellsSolveCursor % array(1) = indexCursor % dimSizes(1)
+ nCellsSolveCursor % block => indexCursor % block
+ nullify(nCellsSolveCursor % ioinfo)
+
+ ! Setup owned cellLimit
+ cellLimitCursor % scalar = indexCursor % dimSizes(1)
+ cellLimitCursor % block => indexCursor % block
+ nullify(cellLimitCursor % ioinfo)
+
+ ! Advance cursors and create new blocks if needed
+ indexCursor => indexCursor % next
+ if(associated(indexCursor)) then
+ allocate(offSetCursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(nCellsSolveCursor % next)
+ nCellsSolveCursor => nCellsSolveCursor % next
+
+ allocate(cellLimitCursor % next)
+ cellLimitCursor => cellLimitCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(offSetCursor % next)
+ nullify(nCellssolveCursor % next)
+ nullify(cellLimitCursor % next)
+ end do
+
+ ! Loop over halos
+ do iHalo = 1, nHalos
+ ! Sending halo layer is the current halo
+ sendingHaloLayers(1) = iHalo
+
+ if(associated(indexToCellID)) then
+ allocate(haloIndices)
+ nullify(haloIndices % next)
+ else
+ nullify(haloIndices)
+ end if
+
+ ! Loop over blocks
+ indexCursor => indexToCellID
+ nEdgesCursor => nEdgesOnCell
+ cellsOnCellCursor => cellsOnCell
+ verticesOnCellCursor => verticesOnCell
+ edgesOnCellCursor => edgesOnCell
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ do while(associated(indexCursor))
+ ! Determine block dimensions
+ nCellsInBlock = indexCursor % dimSizes(1)
+ maxEdges = cellsOnCellCursor % dimSizes(1)
+
+ ! Setup offSet
+ offSetCursor % scalar = nCellsInBlock
+
+ ! Setup block graphs
+ allocate(blockGraphWithHalo)
+ allocate(blockGraph)
+ allocate(blockGraph % vertexID(nCellsInBlock))
+ allocate(blockGraph % nAdjacent(nCellsInBlock))
+ allocate(blockGraph % adjacencyList(maxEdges, nCellsInBlock))
+
+ blockGraph % nVertices = nCellsInBlock
+ blockGraph % nVerticesTotal = nCellsInBlock
+ blockGraph % maxDegree = maxEdges
+ blockGraph % ghostStart = nCellsInBlock + 1
+
+ blockGraph % vertexID(:) = indexCursor % array(:)
+ blockGraph % nAdjacent(:) = nEdgesCursor % array(:)
+ blockGraph % adjacencyList(:,:) = cellsOnCellCursor % array(:,:)
+
+ ! Determine all cell id's with the next halo added
+ call mpas_block_decomp_add_halo(dminfo, blockGraph, blockGraphWithHalo)
+
+ ! Setup haloIndices
+ haloCursor % dimSizes(1) = blockGraphWithHalo % nVerticesTotal - blockGraphWithHalo % nVertices
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+ haloCursor % array(:) = blockGraphWithHalo % vertexID(blockGraphWithHalo % nVertices+1:blockGraphWithHalo % nVerticesTotal)
+ call mpas_quicksort(haloCursor % dimSizes(1), haloCursor % array)
+ haloCursor % sendList => indexCursor % sendList
+ haloCursor % recvList => indexCursor % recvList
+ haloCursor % copyList => indexCursor % copyList
+ haloCursor % block => indexCursor % block
+ nullify(haloCursor % ioinfo)
+
+ ! Deallocate block graphs
+ deallocate(blockGraphWithHalo % vertexID)
+ deallocate(blockGraphWithHalo % nAdjacent)
+ deallocate(blockGraphWithHalo % adjacencyList)
+ deallocate(blockGraphWithHalo)
+
+ deallocate(blockGraph % vertexID)
+ deallocate(blockGraph % nAdjacent)
+ deallocate(blockGraph % adjacencyList)
+ deallocate(blockGraph)
+
+ ! Advance cursors and create new block if needed
+ indexCursor => indexCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ offSetCursor => offSetCursor % next
+ if(associated(indexCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+ end if
+ ! Nullify next pointer
+ nullify(haloCursor % next)
+ end do ! indexCursor loop over blocks
+
+ ! Create exchange lists for current halo layer
+ call mpas_dmpar_get_exch_list(iHalo, indexToCellID, haloIndices, offSetField, cellLimitField)
+
+ ! Loop over blocks
+ indexCursor => indexToCellID
+ nEdgesCursor => nEdgesOnCell
+ cellsOnCellCursor => cellsOnCell
+ verticesOnCellCursor => verticesOnCell
+ edgesOnCellCursor => edgesOnCell
+ haloCursor => haloIndices
+ nCellsSolveCursor => nCellsSolve
+ do while(associated(indexCursor))
+ ! Determine block dimensions
+ nCellsInBlock = indexCursor % dimSizes(1)
+ nCellsInHalo = haloCursor % dimSizes(1)
+
+ ! Setup new layer's nCellsSolve
+ nCellsSolveCursor % array(iHalo+1) = nCellsInBlock + nCellsInHalo
+
+ ! Copy cell indices into indexToCellID field
+ field1dArrayHolder => indexCursor % array
+ indexCursor % dimSizes(1) = nCellsSolveCursor % array(iHalo+1)
+ allocate(indexCursor % array(indexCursor % dimSizes(1)))
+ indexCursor % array(1:nCellsInBlock) = field1dArrayHolder(:)
+ indexCursor % array(nCellsInBlock+1:nCellsSolveCursor % array(iHalo+1)) = haloCursor % array(1:nCellsInHalo)
+ deallocate(field1dArrayHolder)
+
+ ! Allocate space in nEdgesOnCell
+ field1dArrayHolder => nEdgesCursor % array
+ nEdgesCursor % dimSizes(1) = nCellsSolveCursor % array(iHalo+1)
+ allocate(nEdgesCursor % array(nEdgesCursor % dimSizes(1)))
+ nEdgesCursor % array = -1
+ nEdgesCursor % array(1:nCellsInBlock) = field1dArrayHolder(:)
+ deallocate(field1dArrayHolder)
+
+ ! Allocate space in cellsOnCell
+ field2dArrayHolder => cellsOnCellCursor % array
+ cellsOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(cellsOnCellCursor % array(cellsOnCellCursor % dimSizes(1), cellsOnCellCursor % dimSizes(2)))
+ cellsOnCellCursor % array = -1
+ cellsOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ ! Allocate space in verticesOnCell
+ field2dArrayHolder => verticesOnCellCursor % array
+ verticesOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(verticesOnCellCursor % array(verticesOnCellCursor % dimSizes(1), verticesOnCellCursor % dimSizes(2)))
+ verticesOnCellCursor % array = -1
+ verticesOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ ! Allocate space in edgesOnCell
+ field2dArrayHolder => edgesOnCellCursor % array
+ edgesOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(edgesOnCellCursor % array(edgesOnCellCursor % dimSizes(1), edgesOnCellCursor % dimSizes(2)))
+ edgesOnCellCursor % array = -1
+ edgesOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ indexCursor => indexCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ haloCursor => haloCursor % next
+ nCellsSolveCursor => nCellsSolveCursor % next
+ end do
+
+ ! Perform allToAll communications
+ call mpas_dmpar_alltoall_field(indexToCellID, indexToCellID, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(nEdgesOnCell, nEdgesOncell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(cellsOnCell, cellsOnCell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(verticesOnCell, verticesOnCell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(edgesOnCell, edgesOnCell, sendingHaloLayers)
+
+ ! Deallocate haloindices field
+ call mpas_deallocate_field(haloIndices)
+ end do ! iHalo loop over nHalos
+
+ ! Deallocate array and field.
+ deallocate(sendingHaloLayers)
+ call mpas_deallocate_field(offSetField)
+
+ end subroutine mpas_block_creator_build_cell_halos!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_edge_halos
+!
+!> \brief Builds edge halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 and 1 edge fields and 0 halo cell fields to determine
+!> which edges fall in each halo layer for a block. During this process, each
+!> halo's exchange lists are created. This process is performed for all blocks on
+!> a processor.
+!> NOTE: This routine can be used on either edges or edges
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_edge_halos(indexToCellID, nEdgesOnCell, nCellsSolve, edgesOnCell, indexToEdgeID, cellsOnEdge, nEdgesSolve)!{{{
+ type (field1dInteger), pointer :: indexToCellID !< Input: indexToCellID field for all halos
+ type (field1dInteger), pointer :: nEdgesOnCell !< Input: nEdgesOnCell field for all halos
+ type (field1dInteger), pointer :: nCellsSolve !< Input: nCellsSolve field for all halos
+ type (field2dInteger), pointer :: edgesOnCell !< Input/Output: edgesOnCell field for all halos
+ type (field1dInteger), pointer :: indexToEdgeID !< Input/Output: indexToEdgeID field for halos 0 and 1, but output for all halos
+ type (field2dInteger), pointer :: cellsOnEdge !< Output: cellsOnEdge field for all halos
+ type (field1dInteger), pointer :: nEdgesSolve !< Input/Output: nEdgesSolve field for halos 0 and 1, but output for all halos
+
+ type (field0dInteger), pointer :: offSetField, edgeLimitField
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, edgeLimitCursor
+ type (field1dInteger), pointer :: indexToCellCursor, nEdgesCursor, nCellsSolveCursor, indexToEdgeCursor, nEdgesSolveCursor, haloCursor
+ type (field2dInteger), pointer :: edgesOnCellCursor, cellsOnEdgeCursor
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer, dimension(:), pointer :: array1dHolder, localEdgeList
+ integer, dimension(:,:), pointer :: array2dHolder
+
+ integer :: iHalo, iBlock, i, j, k
+ integer :: nHalos, nBlocks, nCellsInBlock, nEdgesLocal, haloStart, haloEnd, haloSize
+ integer :: maxEdges, edgeDegree
+
+ type (hashtable), dimension(:), pointer :: edgeList
+
+ ! Determine dimensions
+ nHalos = config_num_halos
+ maxEdges = edgesOnCell % dimSizes(1)
+ edgeDegree = cellsOnEdge % dimSizes(1)
+
+ ! Allocate some needed arrays and fields
+ allocate(sendingHaloLayers(1))
+
+ allocate(haloIndices)
+ allocate(offSetField)
+ allocate(edgeLimitField)
+
+ nullify(haloIndices % next)
+ nullify(offSetField % next)
+ nullify(edgeLimitField % next)
+
+ ! Determine number of blocks, and setup field lists
+ ! Loop over blocks
+ nBlocks = 0
+ indexToEdgeCursor => indexToEdgeID
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ edgeLimitCursor => edgeLimitField
+ nEdgesSolveCursor => nEdgesSolve
+ do while(associated(indexToEdgeCursor))
+ nBlocks = nBlocks + 1
+
+ ! Setup edgeLimit and offSet
+ edgeLimitCursor % scalar = nEdgesSolveCursor % array(1)
+ offSetCursor % scalar = nEdgesSolveCursor % array(2)
+
+ ! Link blocks
+ edgeLimitCursor % block => indexToEdgeCursor % block
+ offSetCursor % block => indexToEdgeCursor % block
+ haloCursor % block => indexToEdgeCursor % block
+
+ ! Nullify ioinfo
+ nullify(edgeLimitCursor % ioinfo)
+ nullify(offSetCursor % ioinfo)
+ nullify(haloCursor % ioinfo)
+
+ ! Link exchange lists
+ haloCursor % sendList => indexToEdgeCursor % sendList
+ haloCursor % recvList => indexToEdgeCursor % recvList
+ haloCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Advance cursors and create new blocks if needed
+ indexToEdgeCursor => indexToEdgeCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ if(associated(indexToEdgeCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+
+ allocate(offSetCursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(edgeLimitCursor % next)
+ edgeLimitCursor =>edgeLimitCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(haloCursor % next)
+ nullify(offSetCursor % next)
+ nullify(edgeLimitCursor % next)
+ end do
+
+ ! Allocate and initialize hashtables
+ allocate(edgeList(nBlocks))
+ do iBlock = 1, nBlocks
+ call mpas_hash_init(edgeList(iBlock))
+ end do
+
+ ! Build unique 0 and 1 halo list for each block
+ indexToEdgeCursor => indexToEdgeID
+ do while(associated(indexToEdgeCursor))
+ iBlock = indexToEdgeCursor % block % localBlockID + 1
+
+ do i = 1, indexToEdgeCursor % dimSizes(1)
+ if(.not. mpas_hash_search(edgeList(iBlock), indexToEdgeCursor % array(i))) then
+ call mpas_hash_insert(edgeList(iBlock), indexToEdgeCursor % array(i))
+ end if
+ end do
+
+ indexToEdgeCursor => indexToEdgeCursor % next
+ end do
+
+ ! Append new unique edge id's to indexToEdgeID field.
+ do iHalo = 3, nHalos+2
+ sendingHaloLayers(1) = iHalo-1
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID
+ nEdgesCursor => nEdgesOnCell
+ nCellsSolveCursor => nCellsSolve
+ edgesOnCellCursor => edgesOnCell
+ nEdgesSolveCursor => nEdgesSolve
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ do while(associated(indexToEdgeCursor))
+ iBlock = indexToEdgeCursor % block % localBlockID+1
+ nCellsInBlock = nCellsSolveCursor % array(iHalo-1)
+ offSetCursor % scalar = nEdgesSolveCursor % array(iHalo-1)
+
+ ! Determine all edges in block
+ call mpas_block_decomp_all_edges_in_block(maxEdges, nCellsInBlock, nEdgesCursor % array, edgesOnCellCursor % array, nEdgesLocal, localEdgeList)
+
+ nEdgesSolveCursor % array(iHalo) = nEdgesLocal
+ haloSize = nEdgesLocal - nEdgesSolveCursor % array(iHalo-1)
+ haloCursor % dimSizes(1) = haloSize
+
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+
+ ! Add all edges into block, and figure out which are new edges meaning they belong to the new halo layer
+ j = 1
+ do i = 1, nEdgesLocal
+ if(.not. mpas_hash_search(edgeList(iBlock), localEdgeList(i))) then
+ call mpas_hash_insert(edgeList(iBlock), localEdgeList(i))
+ haloCursor % array(j) = localEdgeList(i)
+ j = j + 1
+ end if
+ end do
+
+ deallocate(localEdgeList)
+
+ ! Advance Cursors
+ indexToEdgeCursor => indexToEdgeCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ nCellsSolveCursor => nCellsSolveCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ haloCursor => haloCursor % next
+ offSetCursor => offSetCursor % next
+ end do
+
+ ! Build current layers exchange list
+ call mpas_dmpar_get_exch_list(iHalo-1, indexToEdgeID, haloIndices, offSetField, edgeLimitField)
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID
+ cellsOnEdgeCursor => cellsOnEdge
+ nEdgesSolveCursor => nEdgesSolve
+ haloCursor => haloIndices
+ do while(associated(indexToEdgeCursor))
+ ! Copy in new halo indices
+ array1dHolder => indexToEdgeCursor % array
+ indexToEdgeCursor % dimSizes(1) = nEdgesSolveCursor % array(iHalo)
+ allocate(indexToEdgeCursor % array(indexToEdgeCursor % dimSizes(1)))
+ indexToEdgeCursor % array(1:nEdgesSolveCursor % array(iHalo-1)) = array1dHolder(:)
+ indexToEdgeCursor % array(nEdgesSolveCursor % array(iHalo-1)+1:nEdgesSolveCursor % array(iHalo)) = haloCursor % array(:)
+ deallocate(array1dHolder)
+
+ ! Allocate space in cellsOnEdge
+ array2dHolder => cellsOnEdgeCursor % array
+ cellsOnEdgeCursor % dimSizes(2) = nEdgesSolveCursor % array(iHalo)
+ allocate(cellsOnEdgeCursor % array(cellsOnEdgeCursor % dimSizes(1), cellsOnEdgeCursor % dimSizes(2)))
+ cellsOnEdgeCursor % array(:,1:nEdgesSolveCursor % array(iHalo-1)) = array2dHolder(:,:)
+ deallocate(array2dHolder)
+
+ ! Deallocate haloCursor array
+ deallocate(haloCursor % array)
+
+ ! Advance cursors
+ indexToEdgeCursor => indexToEdgeCursor % next
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ haloCursor => haloCursor % next
+ end do
+
+ ! Performe allToAll communication
+ call mpas_dmpar_alltoall_field(cellsOnEdge, cellsOnEdge, sendingHaloLayers)
+ end do
+
+ ! Deallocate fields, hashtables, and arrays
+ call mpas_deallocate_field(haloIndices)
+ call mpas_deallocate_field(edgeLimitField)
+ call mpas_deallocate_field(offSetField)
+ do iBlock=1,nBlocks
+ call mpas_hash_destroy(edgeList(iBlock))
+ end do
+ deallocate(edgeList)
+ deallocate(sendingHaloLayers)
+
+
+ end subroutine mpas_block_creator_build_edge_halos!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_finalize_block_init
+!
+!> \brief Finalize block creation
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine finalizes the block initialization processor. It calls
+!> mpas_block_allocate to allocate space for all fields in a block. Then the 0
+!> halo indices for each element and the exchange lists are copied into the
+!> appropriate block. A halo update is required after this routien is called
+!> to make sure all data in a block is valid.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_finalize_block_init(blocklist, & !{{{
+#include "dim_dummy_args.inc"
+ , nCellsSolve, nEdgesSolve, nVerticesSolve, indexToCellID, indexToEdgeID, indexToVertexID)
+ type (block_type), pointer :: blocklist !< Input/Output: Linked List of blocks
+#include "dim_dummy_decls_inout.inc"
+ type (field1dInteger), pointer :: nCellsSolve !< Input: nCellsSolve field information
+ type (field1dInteger), pointer :: nEdgesSolve !< Input: nEdgesSolve field information
+ type (field1dInteger), pointer :: nVerticesSolve !< Input: nVerticesSolve field information
+ type (field1dInteger), pointer :: indexToCellID !< Input: indexToCellID field information
+ type (field1dInteger), pointer :: indexToEdgeID !< Input: indexToEdgeID field information
+ type (field1dInteger), pointer :: indexToVertexID !< Input: indexToVertexID field information
+
+ type (domain_type), pointer :: domain
+
+ type (block_type), pointer :: block_ptr
+ type (field1dInteger), pointer :: nCellsCursor, nEdgesCursor, nVerticesCursor
+ type (field1dInteger), pointer :: indexToCellCursor, indexToEdgeCursor, indexToVertexCursor
+
+ integer :: nHalos
+ integer :: nCellsSolve_0Halo, nVerticesSolve_0Halo, nEdgesSolve_0Halo
+ integer :: blockID, localBlockID
+
+ nHalos = config_num_halos
+ domain => blocklist % domain
+
+ ! Loop over blocks
+ block_ptr => blocklist
+ nCellsCursor => nCellsSolve
+ nEdgesCursor => nEdgesSolve
+ nVerticesCursor => nVerticesSolve
+ indexToCellCursor => indexToCellID
+ indexToEdgeCursor => indexToEdgeID
+ indexToVertexCursor => indexToVertexID
+ do while(associated(block_ptr))
+ ! Determine block dimensions
+ nCells = nCellsCursor % array(nHalos+1)
+ nEdges = nEdgesCursor % array(nHalos+2)
+ nVertices = nVerticesCursor % array(nHalos+2)
+
+ nCellsSolve_0Halo = nCellsCursor % array(1)
+ nEdgesSolve_0Halo = nEdgesCursor % array(1)
+ nVerticesSolve_0Halo = nVerticesCursor % array(1)
+
+ ! Determine block IDs
+ blockID = block_ptr % blockID
+ localBlockID = block_ptr % localBlockID
+
+ ! Allocate fields in block
+ call mpas_allocate_block(nHalos, block_ptr, domain, blockID, &
+#include "dim_dummy_args.inc"
+ )
+
+ allocate(block_ptr % mesh % nCellsArray(0:nHalos))
+ allocate(block_ptr % mesh % nEdgesArray(0:nHalos+1))
+ allocate(block_ptr % mesh % nVerticesArray(0:nHalos+1))
+
+ block_ptr % mesh % nCellsArray(:) = nCellsCursor % array(:)
+ block_ptr % mesh % nEdgesArray(:) = nEdgesCursor % array(:)
+ block_ptr % mesh % nVerticesArray(:) = nVerticesCursor % array(:)
+
+ ! Set block's local id
+ block_ptr % localBlockID = localBlockID
+
+ ! Set block's *Solve dimensions
+ block_ptr % mesh % nCellsSolve = nCellsSolve_0Halo
+ block_ptr % mesh % nEdgesSolve = nEdgesSolve_0Halo
+ block_ptr % mesh % nVerticesSolve = nVerticesSolve_0Halo
+
+ ! Set block's 0 halo indices
+ block_ptr % mesh % indexToCellID % array(1:nCellsSolve_0Halo) = indexToCellCursor % array(1:nCellsSolve_0Halo)
+ block_ptr % mesh % indexToEdgeID % array(1:nEdgesSolve_0Halo) = indexToEdgeCursor % array(1:nEdgesSolve_0Halo)
+ block_ptr % mesh % indexToVertexID % array(1:nVerticesSolve_0Halo) = indexToVertexCursor % array(1:nVerticesSolve_0Halo)
+
+ ! Set block's exchange lists and nullify unneeded exchange lists
+ block_ptr % parinfo % cellsToSend => indexToCellCursor % sendList
+ block_ptr % parinfo % cellsToRecv => indexToCellCursor % recvList
+ block_ptr % parinfo % cellsToCopy => indexToCellCursor % copyList
+ nullify(indexToCellCursor % sendList)
+ nullify(indexToCellCursor % recvList)
+ nullify(indexToCellCursor % copyList)
+
+ block_ptr % parinfo % edgesToSend => indexToEdgeCursor % sendList
+ block_ptr % parinfo % edgesToRecv => indexToEdgeCursor % recvList
+ block_ptr % parinfo % edgesToCopy => indexToEdgeCursor % copyList
+ nullify(indexToEdgeCursor % sendList)
+ nullify(indexToEdgeCursor % recvList)
+ nullify(indexToEdgeCursor % copyList)
+
+ block_ptr % parinfo % verticesToSend => indexToVertexCursor % sendList
+ block_ptr % parinfo % verticesToRecv => indexToVertexCursor % recvList
+ block_ptr % parinfo % verticesToCopy => indexToVertexCursor % copyList
+ nullify(indexToVertexCursor % sendList)
+ nullify(indexToVertexCursor % recvList)
+ nullify(indexToVertexCursor % copyList)
+
+ ! Advance cursors
+ block_ptr => block_ptr % next
+ nCellsCursor => nCellsCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ nVerticesCursor => nVerticesCursor % next
+ indexToCellCursor => indexToCellCursor % next
+ indexToEdgeCursor => indexToEdgeCursor % next
+ indexToVertexCursor => indextoVertexcursor % next
+ end do
+
+ ! Link fields between blocks
+ block_ptr => blocklist
+ do while(associated(block_ptr))
+ call mpas_create_field_links(block_ptr)
+
+ block_ptr => block_ptr % next
+ end do
+ end subroutine mpas_block_creator_finalize_block_init!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_reindex_block_fields
+!
+!> \brief Reindex mesh connectivity arrays
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine re-indexes the connectivity arrays for the mesh data
+!> structure. Prior to this routine, all indices are given as global index (which
+!> can later be found in the indexTo* arrays). After this routine is called,
+!> indices are provided as local indices now (1:nCells+1 ... etc).
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_reindex_block_fields(blocklist)!{{{
+ type (block_type), pointer :: blocklist !< Input/Output: Linked list of blocks
+
+ type (block_type), pointer :: block_ptr
+
+ integer :: i, j, k
+ integer, dimension(:,:), pointer :: cellIDSorted, edgeIDSorted, vertexIDSorted
+
+ ! Loop over blocks
+ block_ptr => blocklist
+ do while(associated(block_ptr))
+ !
+ ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
+ !
+ allocate(cellIDSorted(2, block_ptr % mesh % nCells))
+ allocate(edgeIDSorted(2, block_ptr % mesh % nEdges))
+ allocate(vertexIDSorted(2, block_ptr % mesh % nVertices))
+
+ do i=1,block_ptr % mesh % nCells
+ cellIDSorted(1,i) = block_ptr % mesh % indexToCellID % array(i)
+ cellIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nCells, cellIDSorted)
+
+ do i=1,block_ptr % mesh % nEdges
+ edgeIDSorted(1,i) = block_ptr % mesh % indexToEdgeID % array(i)
+ edgeIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nEdges, edgeIDSorted)
+
+ do i=1,block_ptr % mesh % nVertices
+ vertexIDSorted(1,i) = block_ptr % mesh % indexToVertexID % array(i)
+ vertexIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nVertices, vertexIDSorted)
+
+
+ do i=1,block_ptr % mesh % nCells
+ do j=1,block_ptr % mesh % nEdgesOnCell % array(i)
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnCell % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnCell % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, block_ptr % mesh % nVertices, &
+ block_ptr % mesh % verticesOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nVertices) then
+ block_ptr % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
+ else
+ block_ptr % mesh % verticesOnCell % array(j,i) = block_ptr % mesh % nVertices + 1
+ end if
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nEdges
+ do j=1,2
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnEdge % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, block_ptr % mesh % nVertices, &
+ block_ptr % mesh % verticesOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nVertices) then
+ block_ptr % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
+ else
+ block_ptr % mesh % verticesOnEdge % array(j,i) = block_ptr % mesh % nVertices + 1
+ end if
+
+ end do
+
+ do j=1,block_ptr % mesh % nEdgesOnEdge % array(i)
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnEdge % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nVertices
+ do j=1,block_ptr % mesh % vertexDegree
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnVertex % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnVertex % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnVertex % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnVertex % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+ end do
+ end do
+
+ deallocate(cellIDSorted)
+ deallocate(edgeIDSorted)
+ deallocate(vertexIDSorted)
+
+ block_ptr => block_ptr % next
+ end do
+
+ end subroutine mpas_block_creator_reindex_block_fields!}}}
+
+end module mpas_block_creator
Modified: branches/atmos_physics/src/framework/mpas_block_decomp.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_block_decomp.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_block_decomp.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -1,8 +1,11 @@
module mpas_block_decomp
use mpas_dmpar
+ use mpas_dmpar_types
use mpas_hash
use mpas_sort
+ use mpas_grid_types
+ use mpas_configure
type graph
integer :: nVerticesTotal
@@ -45,6 +48,10 @@
integer, dimension(:), pointer :: local_nvertices
character (len=StrKIND) :: filename
+ logical :: no_blocks
+
+ no_blocks = .false.
+
if(config_number_of_blocks == 0) then
total_blocks = dminfo % nProcs
else
@@ -139,72 +146,84 @@
call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
global_start, local_nvertices, global_block_list, local_block_list)
end if
-
- allocate(sorted_local_cell_list(2, local_nvertices(dminfo % my_proc_id + 1)))
- allocate(block_id(blocks_per_proc))
- allocate(block_start(blocks_per_proc))
- allocate(block_count(blocks_per_proc))
- do i = 1, blocks_per_proc
- block_start = 0
- block_count = 0
- end do
+ if(blocks_per_proc == 0) then
+ no_blocks = .true.
+ blocks_per_proc = 1
+ end if
- do i = 1,local_nvertices(dminfo % my_proc_id +1)
- call mpas_get_local_block_id(dminfo, local_block_list(i), local_block_id)
-
- block_id(local_block_id+1) = local_block_list(i)
-
- sorted_local_cell_list(1, i) = local_block_list(i)
- sorted_local_cell_list(2, i) = local_cell_list(i)
-
- block_count(local_block_id+1) = block_count(local_block_id+1) + 1
- end do
+ if(no_blocks) then
+ allocate(block_id(blocks_per_proc))
+ allocate(block_start(blocks_per_proc))
+ allocate(block_count(blocks_per_proc))
- call quicksort(local_nvertices(dminfo % my_proc_id + 1), sorted_local_cell_list)
+ block_id(1) = config_number_of_blocks + 1
+ block_start(1) = 0
+ block_count(1) = 0
+ else
+ allocate(sorted_local_cell_list(2, local_nvertices(dminfo % my_proc_id + 1)))
+ allocate(block_id(blocks_per_proc))
+ allocate(block_start(blocks_per_proc))
+ allocate(block_count(blocks_per_proc))
+
+ do i = 1, blocks_per_proc
+ block_start = 0
+ block_count = 0
+ end do
+
+ do i = 1,local_nvertices(dminfo % my_proc_id +1)
+ call mpas_get_local_block_id(dminfo, local_block_list(i), local_block_id)
+
+ block_id(local_block_id+1) = local_block_list(i)
+
+ sorted_local_cell_list(1, i) = local_block_list(i)
+ sorted_local_cell_list(2, i) = local_cell_list(i)
+
+ block_count(local_block_id+1) = block_count(local_block_id+1) + 1
+ end do
+
+ call mpas_quicksort(local_nvertices(dminfo % my_proc_id + 1), sorted_local_cell_list)
+
+ do i = 1, local_nvertices(dminfo % my_proc_id+1)
+ local_cell_list(i) = sorted_local_cell_list(2, i)
+ end do
+
+ do i = 2,blocks_per_proc
+ block_start(i) = block_start(i-1) + block_count(i-1)
+ end do
- do i = 1, local_nvertices(dminfo % my_proc_id+1)
- local_cell_list(i) = sorted_local_cell_list(2, i)
- end do
+ deallocate(sorted_local_cell_list)
+ deallocate(local_block_list)
+ deallocate(local_nvertices)
+ deallocate(global_start)
+ deallocate(global_cell_list)
+ deallocate(global_block_list)
+ end if
+ else
- do i = 2,blocks_per_proc
- block_start(i) = block_start(i-1) + block_count(i-1)
- end do
-
- !dwj 01/31/12 debugging multiple blocks
-! do i=1,local_nvertices(dminfo % my_proc_id +1)
-! call mpas_get_local_block_id(dminfo, sorted_local_cell_list(1, i), local_block_id)
-! write(*,*) sorted_local_cell_list(1, i), local_block_id, sorted_local_cell_list(2,i)
-! end do
-
- deallocate(sorted_local_cell_list)
- deallocate(local_block_list)
- deallocate(local_nvertices)
- deallocate(global_start)
- deallocate(global_cell_list)
- deallocate(global_block_list)
- else
- allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
- allocate(block_id(1))
- allocate(block_start(1))
- allocate(block_count(1))
- block_id(1) = 0
- block_start(1) = 0
- block_count(1) = size(local_cell_list)
- do i=1,size(local_cell_list)
- local_cell_list(i) = i
- end do
+ if (dminfo % my_proc_id == IO_NODE) then
+ allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
+ allocate(block_id(1))
+ allocate(block_start(1))
+ allocate(block_count(1))
+ block_id(1) = 0
+ block_start(1) = 0
+ block_count(1) = size(local_cell_list)
+ do i=1,size(local_cell_list)
+ local_cell_list(i) = i
+ end do
+ else
+ allocate(local_cell_list(1))
+ allocate(block_id(1))
+ allocate(block_start(1))
+ allocate(block_count(1))
+ local_cell_list(1) = 0
+ block_id(1) = config_number_of_blocks + 1
+ block_start(1) = 0
+ block_count(1) = 0
+ end if
end if
- !dwj 01/31/12 debugging multiple blocks
-! write(*,*) 'Blocks per proc = ', blocks_per_proc, 'total_blocks = ', total_blocks
-
-! do i=1,blocks_per_proc
-! write(*,*) block_id(i), block_start(i), block_count(i)
-! end do
-
-! call mpas_dmpar_abort(dminfo)
-
end subroutine mpas_block_decomp_cells_for_proc!}}}
subroutine mpas_block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)!{{{
@@ -395,17 +414,37 @@
integer, intent(out) :: blocks_per_proc !< Output: Number of blocks proc_number computes on
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+ integer :: i, owning_proc, local_block_id
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
+ if(.not. explicitDecomp) then
+ if(total_blocks > dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ blocks_per_proc = blocks_per_proc_min
+
+ if(proc_number < remaining_blocks) then
+ blocks_per_proc = blocks_per_proc + 1
+ end if
+ else
+ if(dminfo % my_proc_id < total_blocks) then
+ blocks_per_proc = 1
+ else
+ blocks_per_proc = 0
+ end if
+ end if
+ else
+ blocks_per_proc = 0
+ do i = 1, total_blocks
+ call mpas_get_owning_proc(dminfo, i, owning_proc)
+ if(owning_proc == proc_number) then
+ call mpas_get_local_block_id(dminfo, i, local_block_id)
+ blocks_per_proc = max(blocks_per_proc, local_block_id)
+ end if
+ end do
+ end if
- blocks_per_proc = blocks_per_proc_min
-
- if(proc_number .le. remaining_blocks) then
- block_per_proc = blocks_per_proc + 1
- endif
-
end subroutine mpas_get_blocks_per_proc!}}}
subroutine mpas_get_local_block_id(dminfo, global_block_number, local_block_number)!{{{
@@ -416,14 +455,18 @@
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
if(.not.explicitDecomp) then
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
-
- if(global_block_number > even_blocks) then
- local_block_number = blocks_per_proc_min
+ if(total_blocks > dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ if(global_block_number > even_blocks) then
+ local_block_number = blocks_per_proc_min
+ else
+ local_block_number = mod(global_block_number, blocks_per_proc_min)
+ end if
else
- local_block_number = mod(global_block_number, blocks_per_proc_min)
+ local_block_number = 0
end if
else
local_block_number = block_local_id_list(global_block_number+1)
@@ -438,14 +481,18 @@
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
if(.not.explicitDecomp) then
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
+ if(total_blocks >= dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
- if(global_block_number > even_blocks) then
- owning_proc = global_block_number - even_blocks
+ if(global_block_number > even_blocks) then
+ owning_proc = global_block_number - even_blocks
+ else
+ owning_proc = global_block_number / blocks_per_proc_min
+ end if
else
- owning_proc = global_block_number / blocks_per_proc_min
+ owning_proc = global_block_number
end if
else
owning_proc = block_proc_list(global_block_number+1)
Modified: branches/atmos_physics/src/framework/mpas_dmpar.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_dmpar.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_dmpar.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -3,6 +3,7 @@
use mpas_dmpar_types
use mpas_grid_types
use mpas_sort
+ use mpas_hash
#ifdef _MPI
include 'mpif.h'
@@ -18,7 +19,6 @@
integer, parameter :: IO_NODE = 0
integer, parameter :: BUFSIZE = 6000
-
interface mpas_dmpar_alltoall_field
module procedure mpas_dmpar_alltoall_field1d_integer
module procedure mpas_dmpar_alltoall_field2d_integer
@@ -50,12 +50,26 @@
private :: mpas_dmpar_exch_halo_field2d_real
private :: mpas_dmpar_exch_halo_field3d_real
+ interface mpas_dmpar_copy_field
+ module procedure mpas_dmpar_copy_field1d_integer
+ module procedure mpas_dmpar_copy_field2d_integer
+ module procedure mpas_dmpar_copy_field3d_integer
+ module procedure mpas_dmpar_copy_field1d_real
+ module procedure mpas_dmpar_copy_field2d_real
+ module procedure mpas_dmpar_copy_field3d_real
+ end interface
+ private :: mpas_dmpar_copy_field1d_integer
+ private :: mpas_dmpar_copy_field2d_integer
+ private :: mpas_dmpar_copy_field3d_integer
+ private :: mpas_dmpar_copy_field1d_real
+ private :: mpas_dmpar_copy_field2d_real
+ private :: mpas_dmpar_copy_field3d_real
+
contains
+ subroutine mpas_dmpar_init(dminfo, mpi_comm)!{{{
- subroutine mpas_dmpar_init(dminfo, mpi_comm)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -94,11 +108,10 @@
dminfo % using_external_comm = .false.
#endif
- end subroutine mpas_dmpar_init
+ end subroutine mpas_dmpar_init!}}}
+ subroutine mpas_dmpar_finalize(dminfo)!{{{
- subroutine mpas_dmpar_finalize(dminfo)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -111,11 +124,10 @@
end if
#endif
- end subroutine mpas_dmpar_finalize
+ end subroutine mpas_dmpar_finalize!}}}
+ subroutine mpas_dmpar_abort(dminfo)!{{{
- subroutine mpas_dmpar_abort(dminfo)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -128,11 +140,10 @@
stop
- end subroutine mpas_dmpar_abort
+ end subroutine mpas_dmpar_abort!}}}
+ subroutine mpas_dmpar_global_abort(mesg)!{{{
- subroutine mpas_dmpar_global_abort(mesg)
-
implicit none
character (len=*), intent(in) :: mesg
@@ -147,11 +158,10 @@
write(0,*) trim(mesg)
stop
- end subroutine mpas_dmpar_global_abort
+ end subroutine mpas_dmpar_global_abort!}}}
+ subroutine mpas_dmpar_bcast_int(dminfo, i)!{{{
- subroutine mpas_dmpar_bcast_int(dminfo, i)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -163,11 +173,10 @@
call MPI_Bcast(i, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_int
+ end subroutine mpas_dmpar_bcast_int!}}}
+ subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)!{{{
- subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -180,11 +189,10 @@
call MPI_Bcast(iarray, n, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_ints
+ end subroutine mpas_dmpar_bcast_ints!}}}
+ subroutine mpas_dmpar_bcast_real(dminfo, r)!{{{
- subroutine mpas_dmpar_bcast_real(dminfo, r)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -196,11 +204,10 @@
call MPI_Bcast(r, 1, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_real
+ end subroutine mpas_dmpar_bcast_real!}}}
+ subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)!{{{
- subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -213,11 +220,10 @@
call MPI_Bcast(rarray, n, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_reals
+ end subroutine mpas_dmpar_bcast_reals!}}}
+ subroutine mpas_dmpar_bcast_logical(dminfo, l)!{{{
- subroutine mpas_dmpar_bcast_logical(dminfo, l)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -244,11 +250,10 @@
end if
#endif
- end subroutine mpas_dmpar_bcast_logical
+ end subroutine mpas_dmpar_bcast_logical!}}}
+ subroutine mpas_dmpar_bcast_char(dminfo, c)!{{{
- subroutine mpas_dmpar_bcast_char(dminfo, c)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -260,11 +265,10 @@
call MPI_Bcast(c, len(c), MPI_CHARACTER, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_char
+ end subroutine mpas_dmpar_bcast_char!}}}
+ subroutine mpas_dmpar_sum_int(dminfo, i, isum)!{{{
- subroutine mpas_dmpar_sum_int(dminfo, i, isum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -279,11 +283,10 @@
isum = i
#endif
- end subroutine mpas_dmpar_sum_int
+ end subroutine mpas_dmpar_sum_int!}}}
+ subroutine mpas_dmpar_sum_real(dminfo, r, rsum)!{{{
- subroutine mpas_dmpar_sum_real(dminfo, r, rsum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -298,11 +301,10 @@
rsum = r
#endif
- end subroutine mpas_dmpar_sum_real
+ end subroutine mpas_dmpar_sum_real!}}}
+ subroutine mpas_dmpar_min_int(dminfo, i, imin)!{{{
- subroutine mpas_dmpar_min_int(dminfo, i, imin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -317,11 +319,10 @@
imin = i
#endif
- end subroutine mpas_dmpar_min_int
+ end subroutine mpas_dmpar_min_int!}}}
+ subroutine mpas_dmpar_min_real(dminfo, r, rmin)!{{{
- subroutine mpas_dmpar_min_real(dminfo, r, rmin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -336,11 +337,10 @@
rmin = r
#endif
- end subroutine mpas_dmpar_min_real
+ end subroutine mpas_dmpar_min_real!}}}
+ subroutine mpas_dmpar_max_int(dminfo, i, imax)!{{{
- subroutine mpas_dmpar_max_int(dminfo, i, imax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -355,11 +355,10 @@
imax = i
#endif
- end subroutine mpas_dmpar_max_int
+ end subroutine mpas_dmpar_max_int!}}}
+ subroutine mpas_dmpar_max_real(dminfo, r, rmax)!{{{
- subroutine mpas_dmpar_max_real(dminfo, r, rmax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -374,11 +373,10 @@
rmax = r
#endif
- end subroutine mpas_dmpar_max_real
+ end subroutine mpas_dmpar_max_real!}}}
+ subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -394,10 +392,9 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_int_array
+ end subroutine mpas_dmpar_sum_int_array!}}}
-
- subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)
+ subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)!{{{
implicit none
@@ -414,11 +411,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_int_array
+ end subroutine mpas_dmpar_min_int_array!}}}
+ subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -434,11 +430,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_int_array
+ end subroutine mpas_dmpar_max_int_array!}}}
+ subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -454,11 +449,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_real_array
+ end subroutine mpas_dmpar_sum_real_array!}}}
+ subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -474,11 +468,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_real_array
+ end subroutine mpas_dmpar_min_real_array!}}}
+ subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -494,11 +487,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_real_array
+ end subroutine mpas_dmpar_max_real_array!}}}
+ subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)!{{{
- subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -513,10 +505,9 @@
call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_scatter_ints
+ end subroutine mpas_dmpar_scatter_ints!}}}
-
- subroutine mpas_dmpar_get_index_range(dminfo, &
+ subroutine mpas_dmpar_get_index_range(dminfo, &!{{{
global_start, global_end, &
local_start, local_end)
@@ -529,10 +520,9 @@
local_start = nint(real(dminfo % my_proc_id) * real(global_end - global_start + 1) / real(dminfo % nprocs)) + 1
local_end = nint(real(dminfo % my_proc_id + 1) * real(global_end - global_start + 1) / real(dminfo % nprocs))
- end subroutine mpas_dmpar_get_index_range
+ end subroutine mpas_dmpar_get_index_range!}}}
-
- subroutine mpas_dmpar_compute_index_range(dminfo, &
+ subroutine mpas_dmpar_compute_index_range(dminfo, &!{{{
local_start, local_end, &
global_start, global_end)
@@ -566,1630 +556,4114 @@
end if
- end subroutine mpas_dmpar_compute_index_range
+ end subroutine mpas_dmpar_compute_index_range!}}}
+ ! ----- NEW ROUTINES BELOW ----- !
- subroutine mpas_dmpar_get_owner_list(dminfo, &
- nOwnedList, nNeededList, &
- ownedList, neededList, &
- sendList, recvList, inOffset)
+subroutine mpas_dmpar_get_exch_list(haloLayer, ownedListField, neededListField, offsetListField, ownedLimitField)!{{{
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nOwnedList, nNeededList
- integer, dimension(nOwnedList), intent(in) :: ownedList
- integer, dimension(nNeededList), intent(in) :: neededList
- type (exchange_list), pointer :: sendList
- type (exchange_list), pointer :: recvList
- integer, optional :: inOffset
+ integer, intent(in) :: haloLayer
+ type (field1dInteger), pointer :: ownedListField, neededListField
+ type (field0dInteger), pointer, optional :: offsetListField
+ type (field0dInteger), pointer, optional :: ownedLimitField
- integer :: i, j, k, kk
+ type (dm_info), pointer :: dminfo
+
+ integer :: i, j, k, kk, iBlock
integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc, offset
- integer :: numToSend, numToRecv
- integer, dimension(nOwnedList) :: recipientList
- integer, dimension(2,nOwnedList) :: ownedListSorted
+ integer :: totalSent, totalRecv
+ integer, allocatable, dimension(:) :: numToSend, numToRecv
+ integer, allocatable, dimension(:) :: ownedList, ownedListIndex, ownedBlock, neededList, neededListIndex, neededBlock
+ integer, allocatable, dimension(:) :: offsetList, ownedLimitList
+ integer, allocatable, dimension(:,:) :: ownedListSorted, ownedBlockSorted, recipientList
integer, allocatable, dimension(:) :: ownerListIn, ownerListOut
- type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer, allocatable, dimension(:) :: packingOrder
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (field1dInteger), pointer :: fieldCursor, fieldCursor2
+ type (field0dInteger), pointer :: offsetCursor, ownedLimitCursor
+ integer :: nOwnedBlocks, nNeededBlocks
+ integer :: nOwnedList, nNeededList
integer :: mpi_ierr, mpi_rreq, mpi_sreq
+ type (hashtable) :: neededHash
+ integer :: nUniqueNeededList
+ integer, dimension(:,:), pointer :: uniqueSortedNeededList
+
+ !
+ ! *** NB: This code assumes that block % blockID values are local block IDs and are in the range [1, numBlocks]
+ ! where numBlocks is the number of blocks owned by each task
+ !
+
+
+ ! For the ownedListField:
+ ! - ownedList contains a list of the global indices owned by all blocks
+ ! - ownedListIndex contains a list of the block-local indices of the global indices owned by all blocks
+ ! - ownedBlock contains the local block ID associated with each index
+ !
+ ! Example:
+ ! ownedList := ( 21 13 15 01 05 06 33 42 44 45 ) ! Global indices from all blocks on this task
+ ! ownedListIndex := ( 1 2 3 4 1 2 3 4 5 6 ) ! Local indices of global indices on each block
+ ! ownedBlock := ( 1 1 1 1 2 2 2 2 2 2 ) ! Local indices of global indices on each block
+ !
+
+ ! For the neededListField:
+ ! similar to the owneListField...
+
+ dminfo => ownedListField % block % domain % dminfo
+
+ !
+ ! Determine total number of owned blocks on this task
+ !
+ nOwnedBlocks = 0
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ nOwnedBlocks = nOwnedBlocks + 1
+ if(associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % sendList % halos(haloLayer) % exchList)
+ end if
+
+ if(associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % copyList % halos(haloLayer) % exchList)
+ end if
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Determine total number of needed indices on this task
+ !
+ nNeededList = 0
+ nNeededBlocks = 0
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ nNeededBlocks = nNeededBlocks + 1
+ nNeededList = nNeededList + fieldCursor % dimSizes(1)
+ if(associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % recvList % halos(haloLayer) % exchList)
+ end if
+
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Determine unique list of needed elements.
+ !
+ nUniqueNeededList = 0
+ call mpas_hash_init(neededHash)
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do i = 1, fieldCursor % dimSizes(1)
+ if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then
+ nUniqueNeededList = nUniqueNeededList + 1
+ call mpas_hash_insert(neededHash, fieldCursor % array(i))
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ kk = mpas_hash_size(neededHash)
+
+ nUniqueNeededList = mpas_hash_size(neededHash)
+ allocate(uniqueSortedNeededList(2,nUniqueNeededList))
+ allocate(packingOrder(nUniqueNeededList))
+ call mpas_hash_destroy(neededHash)
+ call mpas_hash_init(neededHash)
+
+ j = 0
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do i = 1, fieldCursor % dimSizes(1)
+ if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then
+ j = j +1
+ uniqueSortedNeededList(1, j) = fieldCursor % array(i)
+ uniqueSortedNeededList(2, j) = fieldCursor % block % localBlockID
+ call mpas_hash_insert(neededHash, fieldCursor % array(i))
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ kk = mpas_hash_size(neededHash)
+
+ call mpas_hash_destroy(neededHash)
+ call mpas_quicksort(nUniqueNeededList, uniqueSortedNeededList)
+
+ !
+ ! Get list of index offsets for all blocks
+ !
+ allocate(offsetList(nNeededBlocks))
+ if (present(offsetListField)) then
+ offsetCursor => offsetListField
+ do while (associated(offsetCursor))
+ offsetList(offsetCursor % block % localBlockID+1) = offsetCursor % scalar
+ offsetCursor => offsetCursor % next
+ end do
+ else
+ offsetList(:) = 0
+ end if
+
+ !
+ ! Get list of bounds limit for owned elements
+ !
+ allocate(ownedLimitList(nOwnedBlocks))
+ if(present(ownedLimitField)) then
+ ownedLimitCursor => ownedLimitField
+ do while(associated(ownedLimitCursor))
+ ownedLimitList(ownedLimitCursor % block % localBlockID+1) = ownedLimitCursor % scalar
+ ownedLimitCursor => ownedLimitCursor % next
+ end do
+ else
+ fieldCursor => ownedListField
+ do while(associated(fieldCursor))
+ ownedLimitList(fieldCursor % block % localBlockID+1) = fieldCursor % dimSizes(1)
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+
+ !
+ ! Determine total number of owned indices on this task, and
+ ! initialize output send and recv lists for ownedListField
+ !
+ nOwnedList = 0
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldcursor % block % localBlockID + 1
+ nOwnedList = nOwnedList + ownedLimitList(iBlock)
+ fieldCursor => fieldCursor % next
+ end do
+
#ifdef _MPI
- allocate(sendList)
- allocate(recvList)
- nullify(sendList % next)
- nullify(recvList % next)
- sendListPtr => sendList
- recvListPtr => recvList
+ !
+ ! Gather list of all owned indices and their associated blocks on this task
+ !
+ allocate(ownedList(nOwnedList))
+ allocate(ownedBlock(nOwnedList))
+ ownedBlock = -1
+ ownedList = -1
+ fieldCursor => ownedListField
+ i = 1
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+ ownedList(i:i+ownedLimitList(iBlock)-1) = fieldCursor % array(1:ownedLimitList(iBlock))
+ ownedBlock(i:i+ownedLimitList(iBlock)-1) = fieldCursor % block % localBlockID
+ i = i + ownedLimitList(iBlock)
+ fieldCursor => fieldCursor % next
+ end do
- offset = 0
- if(present(inOffset)) then
- offset = inOffset
- end if
-
+ !
+ ! Gather list of all needed indices and their associated blocks on this task
+ !
+ allocate(neededList(nNeededList))
+ allocate(neededBlock(nNeededList))
+ fieldCursor => neededListField
+ i = 1
+ do while (associated(fieldCursor))
+ neededList(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % array(:)
+ neededBlock(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % block % localBlockID
+ i = i + fieldCursor % dimSizes(1)
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Obtain sorted list of global indices owned by this task and the associated local indices and block IDs
+ !
+ allocate(ownedListIndex(nOwnedList))
+ allocate(ownedListSorted(2,nOwnedList))
+ allocate(recipientList(2,nOwnedList))
+ j = 1
+ k = 1
do i=1,nOwnedList
- ownedListSorted(1,i) = ownedList(i)
- ownedListSorted(2,i) = i
+ ownedListSorted(1,i) = ownedList(i)
+ if (i > 1) then
+ if(ownedBlock(i) /= ownedBlock(i-1)) k = 1
+ end if
+ ownedListIndex(i) = k
+ ownedListSorted(2,i) = j
+ j = j + 1
+ k = k + 1
end do
- call quicksort(nOwnedList, ownedListSorted)
+ call mpas_quicksort(nOwnedList, ownedListSorted)
- call MPI_Allreduce(nNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+ allocate(ownedBlockSorted(2,nOwnedList))
+ do i=1,nOwnedList
+ ownedBlockSorted(1,i) = ownedList(i)
+ ownedBlockSorted(2,i) = ownedBlock(i)
+ end do
+ call mpas_quicksort(nOwnedList, ownedBlockSorted)
+
+ allocate(neededListIndex(nNeededList))
+ j = 1
+ do i=1,nNeededList
+ if (i > 1) then
+ if(neededBlock(i) /= neededBlock(i-1)) j = 1
+ end if
+ neededListIndex(i) = j
+ j = j + 1
+ end do
+
+ !
+ ! Set totalSize to the maximum number of items in any task's needed list
+ !
+ call MPI_Allreduce(nUniqueNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+
allocate(ownerListIn(totalSize))
allocate(ownerListOut(totalSize))
- nMesgRecv = nNeededList
- ownerListIn(1:nNeededList) = neededList(1:nNeededList)
+ nMesgSend = nUniqueNeededList
+ nMesgRecv = nUniqueNeededList
+ ownerListOut(1:nUniqueNeededList) = uniqueSortedNeededList(1,1:nUniqueNeededList)
recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
- do i=1, dminfo % nprocs
+ allocate(numToSend(nOwnedBlocks))
+ allocate(numToRecv(nNeededBlocks))
- recipientList(:) = -1
- numToSend = 0
+ ! Initial send of data to neighbors.
+ if(dminfo % nProcs == 1) then
+ ownerListIn = ownerListOut
+ else
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ end if
- currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
- do j=1,nMesgRecv
- if (ownerListIn(j) > 0) then
- k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
- if (k <= nOwnedList) then
- ownerListOut(j) = -1 * dminfo % my_proc_id
- numToSend = numToSend + 1
- recipientList(ownedListSorted(2,k)) = numToSend
- else
- ownerListOut(j) = ownerListIn(j)
- end if
+ !
+ ! For each processor (not including ourself), mark the indices that we will provide to
+ ! that processor in ownerListOut, and build a send list for that processor if we
+ ! do need to send any indices
+ !
+ do i=2, dminfo % nprocs
+ recipientList = -1
+ numToSend(:) = 0
+ totalSent = 0
+
+ currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
+ do j=1,nMesgRecv
+ if (ownerListIn(j) > 0) then
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
+ if (k <= nOwnedList) then
+ iBlock = ownedBlock(ownedListSorted(2,k)) + 1
+ numToSend(iBlock) = numToSend(iBlock) + 1
+ totalSent = totalSent + 1
+
+ ! recipientList(1,:) represents the index in the srcList to place this data
+ recipientList(1,ownedListSorted(2,k)) = numToSend(iBlock)
+ ! recipientList(2,:) represnets the index in the buffer to place this data
+ recipientList(2,ownedListSorted(2,k)) = totalSent
+
+ ownerListOut(j) = -1 * dminfo % my_proc_id
else
- ownerListOut(j) = ownerListIn(j)
+ ownerListOut(j) = ownerListIn(j)
end if
- end do
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ end do
- if (numToSend > 0) then
- allocate(sendListPtr % next)
- sendListPtr => sendListPtr % next
- sendListPtr % procID = currentProc
- sendListPtr % blockID = currentProc ! Currently, we have just one block per task, so blockID = procID
- sendListPtr % nlist = numToSend
- allocate(sendListPtr % list(numToSend))
- nullify(sendListPtr % next)
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+
+ if (numToSend(iBlock) > 0) then
+ ! Find end of send list
+ if(.not.associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % sendList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % sendList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = currentProc
+ exchListPtr % nlist = numToSend(iBlock)
+ allocate(exchListPtr % srcList(numToSend(iBlock)))
+ allocate(exchListPtr % destList(numToSend(iBlock)))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
kk = 1
do j=1,nOwnedList
- if (recipientList(j) /= -1) then
- sendListPtr % list(recipientList(j)) = j
+ if (recipientList(1,j) /= -1) then
+ if(ownedBlock(j) == fieldCursor % block % localBlockID) then
+ exchListPtr % srcList(recipientList(1,j)) = ownedListIndex(j)
+ exchListPtr % destList(recipientList(1,j)) = recipientList(2,j)
kk = kk + 1
- end if
+ end if
+ end if
end do
- end if
+ end if
- nMesgSend = nMesgRecv
- call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ fieldCursor => fieldCursor % next
+ end do
+
+ nMesgSend = nMesgRecv
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
end do
+ !
+ ! With our needed list returned to us, build receive lists based on which indices were
+ ! marked by other tasks
+ !
do i=0, dminfo % nprocs - 1
+ if(i == dminfo % my_proc_id) cycle
- numToRecv = 0
- do j=1,nNeededList
- if (ownerListIn(j) == -i) numToRecv = numToRecv + 1
- end do
- if (numToRecv > 0) then
- allocate(recvListPtr % next)
- recvListPtr => recvListPtr % next
- recvListPtr % procID = i
- recvListPtr % blockID = i ! Currently, we have just one block per task, so blockID = procID
- recvListPtr % nlist = numToRecv
- allocate(recvListPtr % list(numToRecv))
- nullify(recvListPtr % next)
- kk = 1
- do j=1,nNeededList
- if (ownerListIn(j) == -i) then
- recvListPtr % list(kk) = j + offset
+ numToRecv(:) = 0
+ packingOrder = 0
+
+ k = 0
+ do j=1,nUniqueNeededList
+ if (ownerListIn(j) == -i) then
+ k = k + 1
+ packingOrder(j) = k
+ end if
+ end do
+
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do j = 1, fieldCursor % dimSizes(1)
+ k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j))
+ if(k <= nUniqueNeededList) then
+ if(ownerListIn(k) == -i) then
+ iBlock = fieldCursor % block % localBlockID + 1
+ numToRecv(iBlock) = numToRecv(iBlock) + 1
+ end if
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+
+ if (numToRecv(iBlock) > 0) then
+ if(.not.associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % recvList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ ! Find end of recv list
+ exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % recvList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = i
+ exchListPtr % nlist = numToRecv(iBlock)
+ allocate(exchListPtr % srcList(exchListPtr % nList))
+ allocate(exchListPtr % destList(exchListPtr % nList))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
+ kk = 0
+ do j=1,fieldCursor % dimSizes(1)
+ k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j))
+ if(k <= nUniqueNeededList) then
+ if (ownerListIn(k) == -i) then
kk = kk + 1
- end if
+ exchListPtr % srcList(kk) = packingOrder(k)
+ exchListPtr % destList(kk) = j + offsetList(iBlock)
+ end if
+ end if
end do
- end if
+ end if
+ fieldCursor => fieldCursor % next
+ end do
end do
+ !
+ ! Free up memory
+ !
+ deallocate(numToSend)
+ deallocate(numToRecv)
+ deallocate(ownedList)
+ deallocate(ownedListIndex)
+ deallocate(ownedBlock)
+ deallocate(neededList)
+ deallocate(neededListIndex)
+ deallocate(neededBlock)
+ deallocate(ownedListSorted)
+ deallocate(ownedBlockSorted)
+ deallocate(recipientList)
deallocate(ownerListIn)
deallocate(ownerListOut)
+ deallocate(uniqueSortedNeededList)
+ deallocate(packingOrder)
+#endif
- sendListPtr => sendList
- sendList => sendList % next
- deallocate(sendListPtr)
+ ! Build Copy Lists
+ allocate(numToSend(1))
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+ nOwnedList = ownedLimitList(iBlock)
+ allocate(ownedListSorted(2, nOwnedList))
+ allocate(recipientList(2, nOwnedList))
- recvListPtr => recvList
- recvList => recvList % next
- deallocate(recvListPtr)
+ do i = 1, nOwnedList
+ ownedListSorted(1, i) = fieldCursor % array(i)
+ ownedListSorted(2, i) = i
+ end do
-#else
- allocate(recvList)
- recvList % procID = dminfo % my_proc_id
- recvList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
- recvList % nlist = nNeededList
- allocate(recvList % list(nNeededList))
- nullify(recvList % next)
- do j=1,nNeededList
- recvList % list(j) = j
- end do
+ call mpas_quicksort(nOwnedList, ownedListSorted)
- allocate(sendList)
- sendList % procID = dminfo % my_proc_id
- sendList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
- sendList % nlist = nOwnedList
- allocate(sendList % list(nOwnedList))
- nullify(sendList % next)
- do j=1,nOwnedList
- sendList % list(j) = j
+ fieldCursor2 => neededListField
+ do while(associated(fieldCursor2))
+ if(associated(fieldCursor, fieldCursor2)) then
+ fieldCursor2 => fieldCursor2 % next
+ cycle
+ end if
+
+ numToSend = 0
+ recipientList = -1
+
+ do i = 1, fieldCursor2 % dimSizes(1)
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, fieldCursor2 % array(i))
+ if (k <= nOwnedList) then
+ numToSend(1) = numToSend(1) + 1
+ ! recipientList(1,:) represents the needed block id
+ recipientList(1,ownedListSorted(2,k)) = fieldCursor2 % block % localBlockID
+ ! recipientList(2,:) represnets the index in the buffer to place this data
+ recipientList(2,ownedListSorted(2,k)) = i
+ end if
+ end do
+
+ if(numToSend(1) > 0) then
+ if(.not.associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % copyList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ ! Find end of copy list
+ exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % copyList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = fieldCursor2 % block % localBlockID
+ exchListPtr % nlist = numToSend(1)
+ allocate(exchListPtr % srcList(numToSend(1)))
+ allocate(exchListPtr % destList(numToSend(1)))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
+ kk = 1
+ do j=1,nOwnedList
+ if(recipientList(1,j) == fieldCursor2 % block % localBlockID) then
+ exchListPtr % srcList(kk) = j
+ exchListPtr % destList(kk) = recipientList(2,j) + offSetList(fieldCursor2 % block % localBlockID+1)
+ kk = kk + 1
+ end if
+ end do
+ end if
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ deallocate(recipientList)
+ deallocate(ownedListSorted)
+ fieldCursor => fieldCursor % next
end do
-#endif
+ deallocate(numToSend)
+ deallocate(offSetList)
- end subroutine mpas_dmpar_get_owner_list
+ end subroutine mpas_dmpar_get_exch_list!}}}
- subroutine mpas_dmpar_alltoall_field1d_integer(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field1d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, dimension(*), intent(in) :: arrayIn
- integer, dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dInteger), pointer :: fieldIn
+ type (field1dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ type (field1dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = 0
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ commListPtr % ibuffer = 0
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % destList(i) + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % srcList(i) + bufferOffset
+ fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field1d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field1d_integer!}}}
- subroutine mpas_dmpar_alltoall_field2d_integer(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field2d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- integer, dimension(dim1,*), intent(in) :: arrayIn
- integer, dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field2dInteger), pointer :: fieldIn
+ type (field2dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ type (field2dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset
+ fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field2d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field2d_integer!}}}
- subroutine mpas_dmpar_alltoall_field1d_real(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field3d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- real (kind=RKIND), dimension(*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field3dInteger), pointer :: fieldIn
+ type (field3dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ type (field3dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(2)
+ do k = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(2)
+ do k = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset
+ fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field1d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field3d_integer!}}}
- subroutine mpas_dmpar_alltoall_field2d_real(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field1d_real(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dReal), pointer :: fieldIn
+ type (field1dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ type (field1dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
-#endif
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % destList(i) + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_dmpar_alltoall_field2d_real
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
- subroutine mpas_dmpar_alltoall_field3d_real(dminfo, arrayIn, arrayOut, dim1, dim2, nOwnedList, nNeededList, sendList, recvList)
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- implicit none
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,dim2,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % srcList(i) + bufferOffset
+ fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d3
+ commListPtr => commListPtr % next
+ end do
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_alltoall_field1d_real!}}}
+
+ subroutine mpas_dmpar_alltoall_field2d_real(fieldIn, fieldout, haloLayersIn)!{{{
+
+ implicit none
+
+ type (field2dReal), pointer :: fieldIn
+ type (field2dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
+
+ type (field2dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,:,recvListPtr % list(i)) = arrayIn(:,:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, nNeededList, arrayOut, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,:,1:nNeededList) = arrayIn(:,:,1:nOwnedList)
- end if
-#endif
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_dmpar_alltoall_field3d_real
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
-
- subroutine mpas_pack_send_buf1d_integer(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ commListPtr => commListPtr % next
+ end do
- implicit none
+#endif
- integer, intent(in) :: nField, nBuffer, startPackIdx
- integer, dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- integer :: i
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset
+ fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_pack_send_buf1d_integer
+ commListPtr => commListPtr % next
+ end do
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- subroutine mpas_pack_send_buf2d_integer(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- implicit none
+ deallocate(haloLayers)
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- integer, dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ end subroutine mpas_dmpar_alltoall_field2d_real!}}}
- integer :: i, n
+ subroutine mpas_dmpar_alltoall_field3d_real(fieldIn, fieldout, haloLayersIn)!{{{
- n = de-ds+1
+ implicit none
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
+ type (field3dReal), pointer :: fieldIn
+ type (field3dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ type (field3dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
- end subroutine mpas_pack_send_buf2d_integer
+ logical :: comm_list_found
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
- subroutine mpas_pack_send_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ dminfo => fieldIn % block % domain % dminfo
- implicit none
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- integer :: i, j, k, n
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ end if
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
+ exchListPtr => exchListPtr % next
end do
- end do
- lastPackedIdx = sendList % nlist
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- end subroutine mpas_pack_send_buf3d_integer
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- subroutine mpas_pack_send_buf1d_real(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ commListPtr => commListPtr % next
+ end do
- implicit none
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- integer, intent(in) :: nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- integer :: i
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(2)
+ do k = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
- end subroutine mpas_pack_send_buf1d_real
+ commListPtr => commListPtr % next
+ end do
+#endif
- subroutine mpas_pack_send_buf2d_real(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- implicit none
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(2)
+ do k = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset
+ fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- integer :: i, n
+ commListPtr => commListPtr % next
+ end do
- n = de-ds+1
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ deallocate(haloLayers)
- end subroutine mpas_pack_send_buf2d_real
+ end subroutine mpas_dmpar_alltoall_field3d_real!}}}
- subroutine mpas_pack_send_buf3d_real(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{
implicit none
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ type (field1DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- integer :: i, j, k, n
+ type (dm_info), pointer :: dminfo
+ type (field1DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ logical :: comm_list_found
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
+ do i = 1, 1
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
end if
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
- end do
- end do
- lastPackedIdx = sendList % nlist
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header node.
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- end subroutine mpas_pack_send_buf3d_real
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+ dminfo = field % block % domain % dminfo
- subroutine mpas_unpack_recv_buf1d_integer(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- implicit none
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- integer, dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ exit
+ end if
- integer :: i
+ commListPtr => commListPtr % next
+ end do
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- end subroutine mpas_unpack_recv_buf1d_integer
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+ exchListPtr => exchListPtr % next
+ end do
- subroutine mpas_unpack_recv_buf2d_integer(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- implicit none
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- integer, dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ commListPtr => commListPtr % next
+ end do
- integer :: i, n
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- n = de-ds+1
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf2d_integer
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
- subroutine mpas_unpack_recv_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
+ ! Determine size of recieve lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- implicit none
+ commListPtr => commListPtr % next
+ end do
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
- integer :: i, j, k, n
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
+ commListPtr => commListPtr % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_integer
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ commListPtr % ibuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
- subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayers)
+ exchListPtr => exchListPtr % next
+ end do
- implicit none
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- type (field1DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer, dimension(size(field % dimSizes)) :: dims
+ commListPtr => commListPtr % next
+ end do
+#endif
-#ifdef _MPI
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ exchListPtr => exchListPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ fieldCursor => fieldCursor % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor % array(exchListPtr % destList(i)) = commListPtr % ibuffer(exchListPtr % srcList(i) + bufferOffset)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
-
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field1d_integer!}}}
- subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayersIn)!{{{
implicit none
- type (field2DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field2DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field2DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d2
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i, j
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+
+ do i = 1, 2
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dims(1) * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ commListPtr => commListPtr % next
+ end do
-#endif
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- end subroutine mpas_dmpar_exch_halo_field2d_integer
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+ exchListPtr => exchListPtr % next
+ end do
- subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayers)
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- implicit none
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- type (field3DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ commListPtr => commListPtr % next
+ end do
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d3
- integer, dimension(size(field % dimSizes)) :: dims
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
-#ifdef _MPI
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ exchListPtr => exchListPtr % next
+ end do
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ fieldCursor => fieldCursor % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d3))
- call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recieve list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d3))
- call mpas_pack_send_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+#ifdef _MPI
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) + j + bufferOffset)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_integer
+ deallocate(haloLayers)
-
- subroutine mpas_unpack_recv_buf1d_real(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ end subroutine mpas_dmpar_exch_halo_field2d_integer!}}}
+ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayersIn)!{{{
+
implicit none
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ type (field3DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- integer :: i
+ type (dm_info), pointer :: dminfo
+ type (field3DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
+ logical :: comm_list_found
+
+ do i = 1, 3
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf1d_real
+ dminfo => field % block % domain % dminfo
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
- subroutine mpas_unpack_recv_buf2d_real(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- implicit none
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ dminfo = field % block % domain % dminfo
- integer :: i, n
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- n = de-ds+1
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ exit
+ end if
- end subroutine mpas_unpack_recv_buf2d_real
+ commListPtr => commListPtr % next
+ end do
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- subroutine mpas_unpack_recv_buf3d_real(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ end if
- implicit none
+ exchListPtr => exchListPtr % next
+ end do
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- integer :: i, j, k, n
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ commListPtr => commListPtr % next
+ end do
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_real
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
- subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayers)
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- implicit none
+ commListPtr => commListPtr % next
+ end do
- type (field1DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer, dimension(size(field % dimSizes)) :: dims
+ commListPtr => commListPtr % next
+ end do
-#ifdef _MPI
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ exchListPtr => exchListPtr % next
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
-
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field3d_integer!}}}
- subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayersIn)!{{{
implicit none
- type (field2DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field1dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field1dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d2
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+ do i = 1, 1
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dims(1) * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ commListPtr % rbuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor % array(exchListPtr % destList(i)) = commListPtr % rbuffer(exchListPtr % srcList(i) + bufferOffset)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field2d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field1d_real!}}}
- subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayersIn)!{{{
implicit none
- type (field3DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field2dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field2dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d3
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i, j
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+
+ do i = 1, 2
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizeS(1) + j + bufferOffset)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field2d_real!}}}
- subroutine mpas_aggregate_exchange_lists(myProcID, haloLayersIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)
+ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayersIn)!{{{
implicit none
- !--- in variables ---!
- integer, intent(in) :: myProcID
- integer, dimension(:), intent(in), target, optional :: haloLayersIn
- type (exchange_list), dimension(:), pointer :: sendListArray, recvListArray
-
- !--- out variabls ---!
- type (exchange_list), pointer :: aggregateSendList, aggregateRecvList
+ type (field3dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- !--- local variables ---!
- integer :: i, j
+ type (dm_info), pointer :: dminfo
+ type (field3dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k
+ integer :: bufferOffset, nAdded
integer, dimension(:), pointer :: haloLayers
- type (exchange_list), pointer :: inListPtr, aggListPtr
- logical :: blockAdded
- logical :: listInitilized
- if (present(haloLayersIn)) then
- haloLayers => haloLayersIn
+ logical :: comm_list_found
+
+ do i = 1, 3
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
else
- allocate(haloLayers(size(sendListArray)))
- do i=1, size(haloLayers)
- haloLayers(i) = i
- end do
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
end if
- nullify(aggregateSendList)
- nullify(aggregateRecvList)
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- do i=1, size(haloLayers)
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
- inListPtr => sendListArray(haloLayers(i)) % next
- do while(associated(inListPtr))
+ dminfo = field % block % domain % dminfo
- blockAdded = .false.
- aggListPtr => aggregateSendList
-
- do while(associated(aggListPtr))
- if(inListPtr % blockID == aggListPtr % blockID) then
- if(inListPtr % procID .ne. myProcID) then
- call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
- end if
- blockAdded = .true.
- exit
- end if
- aggListPtr => aggListPtr % next
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
+
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
end do
- if(.not. blockAdded) then
-
- if (.not. associated(aggregateSendList)) then
- allocate(aggregateSendList)
- nullify(aggregateSendList % next)
- aggListPtr => aggregateSendList
- else
- aggListPtr => aggregateSendList
- do while(associated(aggListPtr % next))
- aggListPtr => aggListPtr % next
- end do
- allocate(aggListPtr % next)
- aggListPtr => aggListPtr % next
- end if
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- nullify(aggListPtr % next)
- aggListPtr % procID = inListPtr % procID
- aggListPtr % blockID = inListPtr % blockID
- aggListPtr % nlist = inListPtr % nlist
- allocate(aggListPtr % list(inListPtr % nlist))
- aggListPtr % list = inListPtr % list
- aggListPtr % reqID = inListPtr % reqID
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
end if
- inListPtr => inListPtr % next
- end do
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+ fieldCursor => fieldCursor % next
+ end do
- inListPtr => recvListArray(haloLayers(i)) % next
- do while(associated(inListPtr))
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
- blockAdded = .false.
- aggListPtr => aggregateRecvList
- do while(associated(aggListPtr))
- if(inListPtr % blockID == aggListPtr % blockID) then
- if(inListPtr % procID .ne. myProcID) then
- call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
- end if
- blockAdded = .true.
- exit
- end if
- aggListPtr => aggListPtr % next
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- if(.not. blockAdded) then
+ commListPtr => commListPtr % next
+ end do
- if (.not. associated(aggregateRecvList)) then
- allocate(aggregateRecvList)
- nullify(aggregateRecvList % next)
- aggListPtr => aggregateRecvList
- else
- aggListPtr => aggregateRecvList
- do while(associated(aggListPtr % next))
- aggListPtr => aggListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
end do
+ end do
+ end if
- allocate(aggListPtr % next)
- aggListPtr => aggListPtr % next
- nullify(aggListPtr % next)
- end if
-
- aggListPtr % procID = inListPtr % procID
- aggListPtr % blockID = inListPtr % blockID
- aggListPtr % nlist = inListPtr % nlist
- allocate(aggListPtr % list(inListPtr % nlist))
- aggListPtr % list = inListPtr % list
- aggListPtr % reqID = inListPtr % reqID
+ exchListPtr => exchListPtr % next
+ end do
- end if
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- inListPtr => inListPtr % next
- end do
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+#endif
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- if (.not. present(haloLayersIn)) then
- deallocate(haloLayers)
- end if
+#ifdef _MPI
- end subroutine mpas_aggregate_exchange_lists
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- subroutine mpas_destroy_exchange_list(exchangeList)
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- implicit none
+ deallocate(haloLayers)
- !--- in variables ---!
- type (exchange_list), pointer :: exchangeList
+ end subroutine mpas_dmpar_exch_halo_field3d_real!}}}
- !--- local variables ---!
- type (exchange_list), pointer :: exchangeListPtr
+ subroutine mpas_dmpar_init_mulithalo_exchange_list(exchList, nHalos)!{{{
+ type (mpas_multihalo_exchange_list), pointer :: exchList
+ integer, intent(in) :: nHalos
- do while (associated(exchangeList))
- exchangeListPtr => exchangeList % next
+ integer :: i
- deallocate(exchangeList % list)
- deallocate(exchangeList)
- exchangeList => exchangeListPtr
- end do
+ allocate(exchList)
+ allocate(exchList % halos(nHalos))
+ do i = 1, nHalos
+ nullify(exchList % halos(i) % exchList)
+ end do
+ end subroutine mpas_dmpar_init_mulithalo_exchange_list!}}}
- end subroutine mpas_destroy_exchange_list
+ subroutine mpas_dmpar_destroy_mulithalo_exchange_list(exchList)!{{{
+ type (mpas_multihalo_exchange_list), pointer :: exchList
+ integer :: nHalos
+ integer :: i
- subroutine mpas_merge_integer_arrays(mergeArray, nMergeArray, dataToAppend)
+ nHalos = size(exchList % halos)
- implicit none
+ do i = 1, nHalos
+ call mpas_dmpar_destroy_exchange_list(exchList % halos(i) % exchList)
+ end do
- !--- inout variables ---!
- integer, dimension(:), pointer :: mergeArray
- integer, intent(inout) :: nMergeArray
+ deallocate(exchList % halos)
+ deallocate(exchList)
+ nullify(exchList)
+ end subroutine mpas_dmpar_destroy_mulithalo_exchange_list!}}}
- !--- in variables ---!
- integer, dimension(:), pointer :: dataToAppend
+ subroutine mpas_dmpar_destroy_communication_list(commList)!{{{
+ type (mpas_communication_list), pointer :: commList
+ type (mpas_communication_list), pointer :: commListPtr
- !--- local variables ---!
- integer :: nDataToAppend, newSize
- integer, dimension(nMergeArray) :: mergeArrayCopy
-
-
- nDataToAppend = size(dataToAppend)
- newSize = nMergeArray + nDataToAppend
- mergeArrayCopy = mergeArray
- deallocate(mergeArray)
- allocate(mergeArray(newSize))
- mergeArray(1:nMergeArray) = mergeArrayCopy
- mergeArray(nMergeArray+1:newSize) = dataToAppend
- nMergeArray = newSize
+ commListPtr => commList
+ do while(associated(commListPtr))
+ if(associated(commList % next)) then
+ commList => commList % next
+ else
+ nullify(commList)
+ end if
- end subroutine mpas_merge_integer_arrays
+ if(associated(commListPtr % ibuffer)) then
+ deallocate(commListPtr % ibuffer)
+ end if
+ if(associated(commListPtr % rbuffer)) then
+ deallocate(commListPtr % rbuffer)
+ end if
+ deallocate(commListPtr)
+ commListPtr => commList
+ end do
+
+ end subroutine mpas_dmpar_destroy_communication_list!}}}
+
+ subroutine mpas_dmpar_destroy_exchange_list(exchList)!{{{
+ type (mpas_exchange_list), pointer :: exchList
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ exchListPtr => exchList
+ do while(associated(exchList))
+ if(associated(exchList % next)) then
+ exchList => exchList % next
+ else
+ nullify(exchList)
+ end if
+
+ if(associated(exchListPtr % srcList)) then
+ deallocate(exchListPtr % srcList)
+ end if
+
+ if(associated(exchListPtr % destList)) then
+ deallocate(exchListPtr % destList)
+ end if
+
+ deallocate(exchListPtr)
+ exchListPtr => exchList
+ end do
+
+ end subroutine mpas_dmpar_destroy_exchange_list!}}}
+
+ subroutine mpas_dmpar_copy_field1d_integer(field)!{{{
+ type (field1dInteger), pointer :: field
+ type (field1dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field1d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field2d_integer(field)!{{{
+ type (field2dInteger), pointer :: field
+ type (field2dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field2d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field3d_integer(field)!{{{
+ type (field3dInteger), pointer :: field
+ type (field3dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field3d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field1d_real(field)!{{{
+ type (field1dReal), pointer :: field
+ type (field1dReal), pointer :: fieldCursor
+
+
+ if(associated(field % next)) then
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ fieldCursor % array(:) = field % array(:)
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field1d_real!}}}
+
+ subroutine mpas_dmpar_copy_field2d_real(field)!{{{
+ type (field2dReal), pointer :: field
+ type (field2dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field2d_real!}}}
+
+ subroutine mpas_dmpar_copy_field3d_real(field)!{{{
+ type (field3dReal), pointer :: field
+ type (field3dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field3d_real!}}}
+
end module mpas_dmpar
Modified: branches/atmos_physics/src/framework/mpas_dmpar_types.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_dmpar_types.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_dmpar_types.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -3,21 +3,50 @@
use mpas_kind_types
type dm_info
- integer :: nprocs, my_proc_id, comm, info
- logical :: using_external_comm
+ integer :: nprocs, my_proc_id, comm, info
+ logical :: using_external_comm
end type dm_info
type exchange_list
- integer :: procID
- integer :: blockID
- integer :: nlist
- integer, dimension(:), pointer :: list
- type (exchange_list), pointer :: next
- real (kind=RKIND), dimension(:), pointer :: rbuffer
- integer, dimension(:), pointer :: ibuffer
- integer :: reqID
+ integer :: procID
+ integer :: blockID
+ integer :: nlist
+ integer, dimension(:), pointer :: list
+ type (mpas_exchange_list), pointer :: next
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
end type exchange_list
+ type mpas_exchange_list
+ integer :: endPointID
+ integer :: nlist
+ integer, dimension(:), pointer :: srcList
+ integer, dimension(:), pointer :: destList
+ type (mpas_exchange_list), pointer :: next
+
+ end type mpas_exchange_list
+
+ type mpas_exchange_list_pointer
+ type (mpas_exchange_list), pointer :: exchList
+ end type mpas_exchange_list_pointer
+
+ type mpas_multihalo_exchange_list
+ type (mpas_exchange_list_pointer), dimension(:), pointer :: halos
+ end type mpas_multihalo_exchange_list
+
+
+ type mpas_communication_list
+ integer :: procID
+ integer :: nlist
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
+ type (mpas_communication_list), pointer :: next
+
+ end type mpas_communication_list
+
+
end module mpas_dmpar_types
Modified: branches/atmos_physics/src/framework/mpas_framework.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_framework.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_framework.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -13,18 +13,19 @@
contains
- subroutine mpas_framework_init(dminfo, domain)
+ subroutine mpas_framework_init(dminfo, domain, mpi_comm)
implicit none
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
+ integer, intent(in), optional :: mpi_comm
integer :: pio_num_iotasks
integer :: pio_stride
allocate(dminfo)
- call mpas_dmpar_init(dminfo)
+ call mpas_dmpar_init(dminfo, mpi_comm)
call mpas_read_namelist(dminfo)
Modified: branches/atmos_physics/src/framework/mpas_grid_types.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_grid_types.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_grid_types.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -44,9 +44,9 @@
type (field3DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field3DReal
@@ -73,9 +73,9 @@
type (field2DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field2DReal
@@ -102,9 +102,9 @@
type (field1DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DReal
@@ -129,9 +129,9 @@
type (field0DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DReal
@@ -158,9 +158,9 @@
type (field3DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field3DInteger
@@ -187,9 +187,9 @@
type (field2DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field2DInteger
@@ -216,9 +216,9 @@
type (field1DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DInteger
@@ -243,9 +243,9 @@
type (field0DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DInteger
@@ -272,9 +272,9 @@
type (field1DChar), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DChar
@@ -299,9 +299,9 @@
type (field0DChar), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DChar
@@ -325,17 +325,17 @@
! Type for storing (possibly architecture specific) information concerning to parallelism
type parallel_info
- type (exchange_list), dimension(:), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
- type (exchange_list), dimension(:), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
- type (exchange_list), dimension(:), pointer :: cellsToCopy ! List of types describing which cells to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToCopy ! List of types describing which cells to copy from other blocks
- type (exchange_list), dimension(:), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
- type (exchange_list), dimension(:), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
- type (exchange_list), dimension(:), pointer :: edgesToCopy ! List of types describing which edges to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToCopy ! List of types describing which edges to copy from other blocks
- type (exchange_list), dimension(:), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
- type (exchange_list), dimension(:), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
- type (exchange_list), dimension(:), pointer :: verticesToCopy ! List of types describing which vertices to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToCopy ! List of types describing which vertices to copy from other blocks
end type parallel_info
@@ -363,7 +363,40 @@
type (dm_info), pointer :: dminfo
end type domain_type
+ interface mpas_allocate_scratch_field
+ module procedure mpas_allocate_scratch_field1d_integer
+ module procedure mpas_allocate_scratch_field2d_integer
+ module procedure mpas_allocate_scratch_field3d_integer
+ module procedure mpas_allocate_scratch_field1d_real
+ module procedure mpas_allocate_scratch_field2d_real
+ module procedure mpas_allocate_scratch_field3d_real
+ module procedure mpas_allocate_scratch_field1d_char
+ end interface
+ interface mpas_deallocate_scratch_field
+ module procedure mpas_deallocate_scratch_field1d_integer
+ module procedure mpas_deallocate_scratch_field2d_integer
+ module procedure mpas_deallocate_scratch_field3d_integer
+ module procedure mpas_deallocate_scratch_field1d_real
+ module procedure mpas_deallocate_scratch_field2d_real
+ module procedure mpas_deallocate_scratch_field3d_real
+ module procedure mpas_deallocate_scratch_field1d_char
+ end interface
+
+ interface mpas_deallocate_field
+ module procedure mpas_deallocate_field0d_integer
+ module procedure mpas_deallocate_field1d_integer
+ module procedure mpas_deallocate_field2d_integer
+ module procedure mpas_deallocate_field3d_integer
+ module procedure mpas_deallocate_field0d_real
+ module procedure mpas_deallocate_field1d_real
+ module procedure mpas_deallocate_field2d_real
+ module procedure mpas_deallocate_field3d_real
+ module procedure mpas_deallocate_field0d_char
+ module procedure mpas_deallocate_field1d_char
+ end interface
+
+
contains
@@ -381,41 +414,26 @@
end subroutine mpas_allocate_domain
- subroutine mpas_allocate_block(b, dom, blockID, &
+ subroutine mpas_allocate_block(nHaloLayers, b, dom, blockID, &
#include "dim_dummy_args.inc"
)
implicit none
+ integer, intent(in) :: nHaloLayers
type (block_type), pointer :: b
type (domain_type), pointer :: dom
integer, intent(in) :: blockID
#include "dim_dummy_decls.inc"
- integer, parameter :: nHaloLayers = 2
integer :: i
b % blockID = blockID
- nullify(b % prev)
- nullify(b % next)
-
allocate(b % parinfo)
- allocate(b % parinfo % cellsToSend(nHaloLayers))
- allocate(b % parinfo % cellsToRecv(nHaloLayers))
- allocate(b % parinfo % cellsToCopy(nHaloLayers))
-
- allocate(b % parinfo % edgesToSend(nHaloLayers + 1)) ! first index is owned-cell edges
- allocate(b % parinfo % edgesToRecv(nHaloLayers + 1)) ! first index is owned-cell edges
- allocate(b % parinfo % edgesToCopy(nHaloLayers + 1)) ! first index is owned-cell edges
-
- allocate(b % parinfo % verticesToSend(nHaloLayers + 1)) ! first index is owned-cell vertices
- allocate(b % parinfo % verticesToRecv(nHaloLayers + 1)) ! first index is owned-cell vertices
- allocate(b % parinfo % verticesToCopy(nHaloLayers + 1)) ! first index is owned-cell vertices
-
b % domain => dom
#include "block_allocs.inc"
@@ -425,9 +443,11 @@
#include "group_alloc_routines.inc"
+#include "provis_alloc_routines.inc"
- subroutine mpas_deallocate_domain(dom)
+ subroutine mpas_deallocate_domain(dom)!{{{
+
implicit none
type (domain_type), pointer :: dom
@@ -442,10 +462,668 @@
deallocate(dom)
- end subroutine mpas_deallocate_domain
+ end subroutine mpas_deallocate_domain!}}}
+ subroutine mpas_allocate_scratch_field1d_integer(f, single_block_in)!{{{
+ type (field1dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dInteger), pointer :: f_cursor
- subroutine mpas_deallocate_block(b)
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field1d_integer!}}}
+
+ subroutine mpas_allocate_scratch_field2d_integer(f, single_block_in)!{{{
+ type (field2dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field2dInteger), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field2d_integer!}}}
+
+ subroutine mpas_allocate_scratch_field3d_integer(f, single_block_in)!{{{
+ type (field3dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field3dInteger), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field3d_integer!}}}
+
+ subroutine mpas_allocate_scratch_field1d_real(f, single_block_in)!{{{
+ type (field1dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field1d_real!}}}
+
+ subroutine mpas_allocate_scratch_field2d_real(f, single_block_in)!{{{
+ type (field2dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field2dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field2d_real!}}}
+
+ subroutine mpas_allocate_scratch_field3d_real(f, single_block_in)!{{{
+ type (field3dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field3dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field3d_real!}}}
+
+ subroutine mpas_allocate_scratch_field1d_char(f, single_block_in)!{{{
+ type (field1dChar), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dChar), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field1d_char!}}}
+
+ subroutine mpas_deallocate_scratch_field1d_integer(f, single_block_in)!{{{
+ type (field1dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dInteger), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field1d_integer!}}}
+
+ subroutine mpas_deallocate_scratch_field2d_integer(f, single_block_in)!{{{
+ type (field2dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field2dInteger), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field2d_integer!}}}
+
+ subroutine mpas_deallocate_scratch_field3d_integer(f, single_block_in)!{{{
+ type (field3dInteger), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field3dInteger), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field3d_integer!}}}
+
+ subroutine mpas_deallocate_scratch_field1d_real(f, single_block_in)!{{{
+ type (field1dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field1d_real!}}}
+
+ subroutine mpas_deallocate_scratch_field2d_real(f, single_block_in)!{{{
+ type (field2dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field2dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field2d_real!}}}
+
+ subroutine mpas_deallocate_scratch_field3d_real(f, single_block_in)!{{{
+ type (field3dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field3dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field3d_real!}}}
+
+ subroutine mpas_deallocate_scratch_field1d_char(f, single_block_in)!{{{
+ type (field1dChar), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field1dChar), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field1d_char!}}}
+
+
+ subroutine mpas_deallocate_field0d_integer(f)!{{{
+ type (field0dInteger), pointer :: f
+ type (field0dInteger), pointer :: f_cursor
+
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_integer!}}}
+
+ subroutine mpas_deallocate_field1d_integer(f)!{{{
+ type (field1dInteger), pointer :: f
+ type (field1dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_integer!}}}
+
+ subroutine mpas_deallocate_field2d_integer(f)!{{{
+ type (field2dInteger), pointer :: f
+ type (field2dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field2d_integer!}}}
+
+ subroutine mpas_deallocate_field3d_integer(f)!{{{
+ type (field3dInteger), pointer :: f
+ type (field3dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field3d_integer!}}}
+
+ subroutine mpas_deallocate_field0d_real(f)!{{{
+ type (field0dReal), pointer :: f
+ type (field0dReal), pointer :: f_cursor
+
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_real!}}}
+
+ subroutine mpas_deallocate_field1d_real(f)!{{{
+ type (field1dReal), pointer :: f
+ type (field1dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_real!}}}
+
+ subroutine mpas_deallocate_field2d_real(f)!{{{
+ type (field2dReal), pointer :: f
+ type (field2dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field2d_real!}}}
+
+ subroutine mpas_deallocate_field3d_real(f)!{{{
+ type (field3dReal), pointer :: f
+ type (field3dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field3d_real!}}}
+
+ subroutine mpas_deallocate_field0d_char(f)!{{{
+ type (field0dChar), pointer :: f
+ type (field0dChar), pointer :: f_cursor
+
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_char!}}}
+
+ subroutine mpas_deallocate_field1d_char(f)!{{{
+ type (field1dChar), pointer :: f
+ type (field1dChar), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_char!}}}
+
+ subroutine mpas_deallocate_block(b)!{{{
implicit none
@@ -472,7 +1150,7 @@
#include "block_deallocs.inc"
- end subroutine mpas_deallocate_block
+ end subroutine mpas_deallocate_block!}}}
#include "group_dealloc_routines.inc"
Modified: branches/atmos_physics/src/framework/mpas_hash.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_hash.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_hash.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -106,7 +106,7 @@
mpas_hash_search = .false.
hashval = mod(key, TABLESIZE) + 1
-
+
cursor => h%table(hashval)%p
do while(associated(cursor))
if (cursor%key == key) then
Modified: branches/atmos_physics/src/framework/mpas_io.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_io.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -366,6 +366,7 @@
if (present(ierr)) ierr = MPAS_IO_ERR_PIO
deallocate(new_dimlist_node % dimhandle)
deallocate(new_dimlist_node)
+ write(0,*) 'WARNING: Dimension ', trim(dimname), ' not in input file.'
dimsize = -1
return
end if
@@ -551,6 +552,7 @@
if (present(ierr)) ierr = MPAS_IO_ERR_PIO
deallocate(new_fieldlist_node % fieldhandle)
deallocate(new_fieldlist_node)
+! write(0,*) 'WARNING: Variable ', trim(fieldname), ' not in input file.'
return
end if
!write(0,*) 'Inquired about variable ID', new_fieldlist_node % fieldhandle % fieldid
Modified: branches/atmos_physics/src/framework/mpas_io_input.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_input.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_io_input.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -3,6 +3,7 @@
use mpas_grid_types
use mpas_dmpar
use mpas_block_decomp
+ use mpas_block_creator
use mpas_sort
use mpas_configure
use mpas_timekeeping
@@ -26,26 +27,20 @@
end type io_input_object
-
- type (exchange_list), pointer :: sendCellList, recvCellList
- type (exchange_list), pointer :: sendEdgeList, recvEdgeList
- type (exchange_list), pointer :: sendVertexList, recvVertexList
- type (exchange_list), pointer :: sendVertLevelList, recvVertLevelList
-
integer :: readCellStart, readCellEnd, nReadCells
integer :: readEdgeStart, readEdgeEnd, nReadEdges
integer :: readVertexStart, readVertexEnd, nReadVertices
- integer :: readVertLevelStart, readVertLevelEnd, nReadVertLevels
-
contains
+ subroutine mpas_input_state_for_domain(domain)!{{{
- subroutine mpas_input_state_for_domain(domain)
-
implicit none
type (domain_type), pointer :: domain
+
+ type (block_type), pointer :: block_ptr
+ type (block_type), pointer :: readingBlock
integer :: i, j, k
type (io_input_object) :: input_obj
@@ -58,111 +53,80 @@
integer, dimension(:), pointer :: readIndices
type (MPAS_IO_Handle_type) :: inputHandle
- type (field1dInteger) :: indexToCellIDField
- type (field1dInteger) :: indexToEdgeIDField
- type (field1dInteger) :: indexToVertexIDField
- type (field1dInteger) :: nEdgesOnCellField
- type (field2dInteger) :: cellsOnCellField
- type (field2dInteger) :: edgesOnCellField
- type (field2dInteger) :: verticesOnCellField
- type (field2dInteger) :: cellsOnEdgeField
- type (field2dInteger) :: cellsOnVertexField
+ type (field1dInteger), pointer :: indexToCellIDField
+ type (field1dInteger), pointer :: indexToEdgeIDField
+ type (field1dInteger), pointer :: indexToVertexIDField
+ type (field1dInteger), pointer :: nEdgesOnCellField
+ type (field2dInteger), pointer :: cellsOnCellField
+ type (field2dInteger), pointer :: edgesOnCellField
+ type (field2dInteger), pointer :: verticesOnCellField
+ type (field2dInteger), pointer :: cellsOnEdgeField
+ type (field2dInteger), pointer :: cellsOnVertexField
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- type (field1dReal) :: xCellField, yCellField, zCellField
- type (field1dReal) :: xEdgeField, yEdgeField, zEdgeField
- type (field1dReal) :: xVertexField, yVertexField, zVertexField
-#endif
-#endif
+ type (field1dReal), pointer :: xCellField, yCellField, zCellField
+ type (field1dReal), pointer :: xEdgeField, yEdgeField, zEdgeField
+ type (field1dReal), pointer :: xVertexField, yVertexField, zVertexField
type (field1DChar) :: xtime
-
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
- integer, dimension(:,:), pointer :: cellsOnCell_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_2Halo
+ type (field1dInteger), pointer :: nCellsSolveField
+ type (field1dInteger), pointer :: nVerticesSolveField
+ type (field1dInteger), pointer :: nEdgesSolveField
- integer, dimension(:,:), pointer :: edgesOnCell_2Halo
- integer, dimension(:,:), pointer :: verticesOnCell_2Halo
+ type (field1DInteger), pointer :: indexToCellID_Block
+ type (field1DInteger), pointer :: nEdgesOnCell_Block
+ type (field2DInteger), pointer :: cellsOnCell_Block
+ type (field2DInteger), pointer :: verticesOnCell_Block
+ type (field2DInteger), pointer :: edgesOnCell_Block
- integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
- integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
+ type (field1DInteger), pointer :: indexToVertexID_Block
+ type (field2DInteger), pointer :: cellsOnVertex_Block
- integer, dimension(:,:), pointer :: cellIDSorted
- integer, dimension(:,:), pointer :: edgeIDSorted
- integer, dimension(:,:), pointer :: vertexIDSorted
+ type (field1DInteger), pointer :: indexToEdgeID_Block
+ type (field2DInteger), pointer :: cellsOnEdge_Block
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- real (kind=RKIND), dimension(:), pointer :: xCell, yCell, zCell
- real (kind=RKIND), dimension(:), pointer :: xEdge, yEdge, zEdge
- real (kind=RKIND), dimension(:), pointer :: xVertex, yVertex, zVertex
-#endif
-#endif
+ type (field1DReal), pointer :: xCell, yCell, zCell
+ type (field1DReal), pointer :: xEdge, yEdge, zEdge
+ type (field1DReal), pointer :: xVertex, yVertex, zVertex
- integer, dimension(:), pointer :: local_cell_list, local_edge_list, local_vertex_list
+ integer, dimension(:), pointer :: local_cell_list
integer, dimension(:), pointer :: block_id, block_start, block_count
- integer, dimension(:), pointer :: local_vertlevel_list, needed_vertlevel_list
- integer :: nlocal_edges, nlocal_vertices
- type (exchange_list), pointer :: send1Halo, recv1Halo
- type (exchange_list), pointer :: send2Halo, recv2Halo
type (graph) :: partial_global_graph_info
- type (graph) :: block_graph_0Halo, block_graph_1Halo, block_graph_2Halo
- integer :: ghostEdgeStart, ghostVertexStart
type (MPAS_Time_type) :: startTime
- type (MPAS_Time_type) :: sliceTime
- type (MPAS_TimeInterval_type) :: timeDiff
- type (MPAS_TimeInterval_type) :: minTimeDiff
character(len=StrKIND) :: timeStamp
character(len=StrKIND) :: filename
- integer, parameter :: nHalos = 2
- integer, dimension(nHalos+1) :: nCellsCumulative ! own cells, halo 1 cells, halo 2 cells
- integer, dimension(nHalos+2) :: nEdgesCumulative ! own edges, own cell's edges, halo 1 edges, halo 2 edges
- integer, dimension(nHalos+2) :: nVerticesCumulative ! own vertices, own cell's vertices, halo 1 vertices, halo 2 vertices
+ integer :: nHalos
- integer, dimension(nHalos) :: nCellsHalo ! halo 1 cells, halo 2 cells
- integer, dimension(nHalos+1) :: nEdgesHalo ! own cell's edges, halo 1 edges, halo 2 edges
- integer, dimension(nHalos+1) :: nVerticesHalo ! own cell's vertices, halo 1 vertices, halo 2 vertices
+ nHalos = config_num_halos
- integer, dimension(:), pointer :: tempIDs
- integer :: ntempIDs, offset
-
- integer :: nHalo, nOwnCells, nOwnEdges, nOwnVertices, cellCount, edgeCount, vertexCount, iEdge, iVertex
- type (hashtable) :: edgeHash, vertexHash
-
-
if (config_do_restart) then
+ ! this get followed by set is to ensure that the time is in standard format
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
+ call mpas_get_time(curr_time=startTime, dateTimeString=timeStamp)
- ! this get followed by set is to ensure that the time is in standard format
- call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
- call mpas_get_time(curr_time=startTime, dateTimeString=timeStamp)
+ call mpas_insert_string_suffix(trim(config_restart_name), timeStamp, filename)
- call mpas_insert_string_suffix(trim(config_restart_name), timeStamp, filename)
-
- input_obj % filename = trim(filename)
- input_obj % stream = STREAM_RESTART
+ input_obj % filename = trim(filename)
+ input_obj % stream = STREAM_RESTART
else
- input_obj % filename = trim(config_input_name)
- input_obj % stream = STREAM_INPUT
+ input_obj % filename = trim(config_input_name)
+ input_obj % stream = STREAM_INPUT
end if
inputHandle = MPAS_io_open(trim(input_obj % filename), MPAS_IO_READ, MPAS_IO_PNETCDF, ierr)
if (ierr /= MPAS_IO_NOERR) then
- write(0,*) ' '
- if (input_obj % stream == STREAM_RESTART) then
- write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_INPUT) then
- write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_SFC) then
- write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
- end if
- write(0,*) ' '
- call mpas_dmpar_abort(domain % dminfo)
+ write(0,*) ' '
+ if (input_obj % stream == STREAM_RESTART) then
+ write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_INPUT) then
+ write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_SFC) then
+ write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
+ end if
+ write(0,*) ' '
+ call mpas_dmpar_abort(domain % dminfo)
end if
-
!
! Read global number of cells/edges/vertices
@@ -182,256 +146,23 @@
call mpas_dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
nReadVertices = readVertexEnd - readVertexStart + 1
- readVertLevelStart = 1
- readVertLevelEnd = nVertLevels
- nReadVertLevels = nVertLevels
-
-
+ allocate(readingBlock)
+ readingBlock % domain => domain
+ readingBlock % blockID = domain % dminfo % my_proc_id
+ readingBlock % localBlockID = 0
+
!
! Allocate and read fields that we will need in order to ultimately work out
! which cells/edges/vertices are owned by each block, and which are ghost
!
- ! Global cell indices
- allocate(indexToCellIDField % ioinfo)
- indexToCellIDField % ioinfo % fieldName = 'indexToCellID'
- indexToCellIDField % ioinfo % start(1) = readCellStart
- indexToCellIDField % ioinfo % count(1) = nReadCells
- allocate(indexToCellIDField % array(nReadCells))
- allocate(readIndices(nReadCells))
- do i=1,nReadCells
- readIndices(i) = i + readCellStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Cell x-coordinates (in 3d Cartesian space)
- allocate(xCellField % ioinfo)
- xCellField % ioinfo % fieldName = 'xCell'
- xCellField % ioinfo % start(1) = readCellStart
- xCellField % ioinfo % count(1) = nReadCells
- allocate(xCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xCell', xCellField % array, ierr)
+ call mpas_io_setup_cell_block_fields(inputHandle, nreadCells, readCellStart, readingBlock, maxEdges, indexTocellIDField, xCellField, &
+ yCellField, zCellField, nEdgesOnCellField, cellsOnCellField, edgesOnCellField, verticesOnCellField)
- ! Cell y-coordinates (in 3d Cartesian space)
- allocate(yCellField % ioinfo)
- yCellField % ioinfo % fieldName = 'yCell'
- yCellField % ioinfo % start(1) = readCellStart
- yCellField % ioinfo % count(1) = nReadCells
- allocate(yCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yCell', yCellField % array, ierr)
+ call mpas_io_setup_edge_block_fields(inputHandle, nReadEdges, readEdgeStart, readingBlock, indexToEdgeIDField, xEdgeField, yEdgeField, zEdgeField, cellsOnEdgeField)
- ! Cell z-coordinates (in 3d Cartesian space)
- allocate(zCellField % ioinfo)
- zCellField % ioinfo % fieldName = 'zCell'
- zCellField % ioinfo % start(1) = readCellStart
- zCellField % ioinfo % count(1) = nReadCells
- allocate(zCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zCell', zCellField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
-
- ! Global edge indices
- allocate(indexToEdgeIDField % ioinfo)
- indexToEdgeIDField % ioinfo % fieldName = 'indexToEdgeID'
- indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
- indexToEdgeIDField % ioinfo % count(1) = nReadEdges
- allocate(indexToEdgeIDField % array(nReadEdges))
- allocate(indexToEdgeIDField % array(nReadEdges))
- allocate(readIndices(nReadEdges))
- do i=1,nReadEdges
- readIndices(i) = i + readEdgeStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Edge x-coordinates (in 3d Cartesian space)
- allocate(xEdgeField % ioinfo)
- xEdgeField % ioinfo % fieldName = 'xEdge'
- xEdgeField % ioinfo % start(1) = readEdgeStart
- xEdgeField % ioinfo % count(1) = nReadEdges
- allocate(xEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xEdge', xEdgeField % array, ierr)
-
- ! Edge y-coordinates (in 3d Cartesian space)
- allocate(yEdgeField % ioinfo)
- yEdgeField % ioinfo % fieldName = 'yEdge'
- yEdgeField % ioinfo % start(1) = readEdgeStart
- yEdgeField % ioinfo % count(1) = nReadEdges
- allocate(yEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yEdge', yEdgeField % array, ierr)
-
- ! Edge z-coordinates (in 3d Cartesian space)
- allocate(zEdgeField % ioinfo)
- zEdgeField % ioinfo % fieldName = 'zEdge'
- zEdgeField % ioinfo % start(1) = readEdgeStart
- zEdgeField % ioinfo % count(1) = nReadEdges
- allocate(zEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zEdge', zEdgeField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
-
- ! Global vertex indices
- allocate(indexToVertexIDField % ioinfo)
- indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
- indexToVertexIDField % ioinfo % start(1) = readVertexStart
- indexToVertexIDField % ioinfo % count(1) = nReadVertices
- allocate(indexToVertexIDField % array(nReadVertices))
- allocate(readIndices(nReadVertices))
- do i=1,nReadVertices
- readIndices(i) = i + readVertexStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Vertex x-coordinates (in 3d Cartesian space)
- allocate(xVertexField % ioinfo)
- xVertexField % ioinfo % fieldName = 'xVertex'
- xVertexField % ioinfo % start(1) = readVertexStart
- xVertexField % ioinfo % count(1) = nReadVertices
- allocate(xVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xVertex', xVertexField % array, ierr)
-
- ! Vertex y-coordinates (in 3d Cartesian space)
- allocate(yVertexField % ioinfo)
- yVertexField % ioinfo % fieldName = 'yVertex'
- yVertexField % ioinfo % start(1) = readVertexStart
- yVertexField % ioinfo % count(1) = nReadVertices
- allocate(yVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yVertex', yVertexField % array, ierr)
-
- ! Vertex z-coordinates (in 3d Cartesian space)
- allocate(zVertexField % ioinfo)
- zVertexField % ioinfo % fieldName = 'zVertex'
- zVertexField % ioinfo % start(1) = readVertexStart
- zVertexField % ioinfo % count(1) = nReadVertices
- allocate(zVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zVertex', zVertexField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
- ! Number of cell/edges/vertices adjacent to each cell
- allocate(nEdgesOnCellField % ioinfo)
- nEdgesOnCellField % ioinfo % fieldName = 'nEdgesOnCell'
- nEdgesOnCellField % ioinfo % start(1) = readCellStart
- nEdgesOnCellField % ioinfo % count(1) = nReadCells
- allocate(nEdgesOnCellField % array(nReadCells))
- allocate(readIndices(nReadCells))
- do i=1,nReadCells
- readIndices(i) = i + readCellStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCellField % array, ierr)
-
- ! Global indices of cells adjacent to each cell
- allocate(cellsOnCellField % ioinfo)
- cellsOnCellField % ioinfo % fieldName = 'cellsOnCell'
- cellsOnCellField % ioinfo % start(1) = 1
- cellsOnCellField % ioinfo % start(2) = readCellStart
- cellsOnCellField % ioinfo % count(1) = maxEdges
- cellsOnCellField % ioinfo % count(2) = nReadCells
- allocate(cellsOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCellField % array, ierr)
-
- ! Global indices of edges adjacent to each cell
- allocate(edgesOnCellField % ioinfo)
- edgesOnCellField % ioinfo % fieldName = 'edgesOnCell'
- edgesOnCellField % ioinfo % start(1) = 1
- edgesOnCellField % ioinfo % start(2) = readCellStart
- edgesOnCellField % ioinfo % count(1) = maxEdges
- edgesOnCellField % ioinfo % count(2) = nReadCells
- allocate(edgesOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCellField % array, ierr)
-
- ! Global indices of vertices adjacent to each cell
- allocate(verticesOnCellField % ioinfo)
- verticesOnCellField % ioinfo % fieldName = 'verticesOnCell'
- verticesOnCellField % ioinfo % start(1) = 1
- verticesOnCellField % ioinfo % start(2) = readCellStart
- verticesOnCellField % ioinfo % count(1) = maxEdges
- verticesOnCellField % ioinfo % count(2) = nReadCells
- allocate(verticesOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'verticesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCellField % array, ierr)
- deallocate(readIndices)
-
- ! Global indices of cells adjacent to each edge
- ! used for determining which edges are owned by a block, where
- ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
- allocate(cellsOnEdgeField % ioinfo)
- cellsOnEdgeField % ioinfo % fieldName = 'cellsOnEdge'
- cellsOnEdgeField % ioinfo % start(1) = 1
- cellsOnEdgeField % ioinfo % start(2) = readEdgeStart
- cellsOnEdgeField % ioinfo % count(1) = 2
- cellsOnEdgeField % ioinfo % count(2) = nReadEdges
- allocate(cellsOnEdgeField % array(2,nReadEdges))
- allocate(readIndices(nReadEdges))
- do i=1,nReadEdges
- readIndices(i) = i + readEdgeStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'cellsOnEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdgeField % array, ierr)
- deallocate(readIndices)
-
- ! Global indices of cells adjacent to each vertex
- ! used for determining which vertices are owned by a block, where
- ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
- allocate(cellsOnVertexField % ioinfo)
- cellsOnVertexField % ioinfo % fieldName = 'cellsOnVertex'
- cellsOnVertexField % ioinfo % start(1) = 1
- cellsOnVertexField % ioinfo % start(2) = readVertexStart
- cellsOnVertexField % ioinfo % count(1) = vertexDegree
- cellsOnVertexField % ioinfo % count(2) = nReadVertices
- allocate(cellsOnVertexField % array(vertexDegree,nReadVertices))
- allocate(readIndices(nReadVertices))
- do i=1,nReadVertices
- readIndices(i) = i + readVertexStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertexField % array, ierr)
- deallocate(readIndices)
-
-
+ call mpas_io_setup_vertex_block_fields(inputHandle, nReadVertices, readVertexStart, readingBlock, vertexDegree, indexToVertexIDField, &
+ xVertexField, yVertexField, zVertexField, cellsOnVertexField)
!
! Set up a graph derived data type describing the connectivity for the cells
! that were read by this process
@@ -450,7 +181,6 @@
partial_global_graph_info % vertexID(:) = indexToCellIDField % array(:)
partial_global_graph_info % nAdjacent(:) = nEdgesOnCellField % array(:)
partial_global_graph_info % adjacencyList(:,:) = cellsOnCellField % array(:,:)
-
! TODO: Ensure (by renaming or exchanging) that initial cell range on each proc is contiguous
! This situation may occur when reading a restart file with cells/edges/vertices written
@@ -463,508 +193,78 @@
deallocate(partial_global_graph_info % vertexID)
deallocate(partial_global_graph_info % nAdjacent)
deallocate(partial_global_graph_info % adjacencyList)
-
-
- allocate(indexToCellID_0Halo(size(local_cell_list)))
- allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
- allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xCell(size(local_cell_list)))
- allocate(yCell(size(local_cell_list)))
- allocate(zCell(size(local_cell_list)))
-#endif
-#endif
-
- !
- ! Now that each process has a list of cells that it owns, exchange cell connectivity
- ! information between the processes that read info for a cell and those that own that cell
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- indexToCellIDField % array, local_cell_list, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
- size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
- size(xCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
- size(yCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
- size(zCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-#endif
-#endif
+ call mpas_block_creator_setup_blocks_and_0halo_cells(domain, indexToCellID_Block, local_cell_list, block_id, block_start, block_count)
+ call mpas_block_creator_build_0halo_cell_fields(indexToCellIDField, nEdgesOnCellField, cellsOnCellField, verticesOnCellField, edgesOnCellField, indexToCellID_Block, nEdgesOnCell_Block, cellsOnCell_Block, verticesOnCell_Block, edgesOnCell_Block)
- deallocate(sendCellList % list)
- deallocate(sendCellList)
- deallocate(recvCellList % list)
- deallocate(recvCellList)
+ call mpas_block_creator_build_0_and_1halo_edge_fields(indexToEdgeIDField, cellsOnEdgeField, indexToCellID_Block, nEdgesOnCell_Block, edgesOnCell_Block, indexToEdgeID_Block, cellsOnEdge_Block, nEdgesSolveField)
+ call mpas_block_creator_build_0_and_1halo_edge_fields(indexToVertexIDField, cellsOnVertexField, indexToCellID_Block, nEdgesOnCell_Block, verticesOnCell_Block, indexToVertexID_Block, cellsOnVertex_Block, nVerticesSolveField)
+ call mpas_block_creator_build_cell_halos(indexToCellID_Block, nEdgesOnCell_Block, cellsOnCell_Block, verticesOnCell_Block, edgesOnCell_Block, nCellsSolveField)
+ call mpas_block_creator_build_edge_halos(indexToCellID_Block, nEdgesOnCell_Block, nCellsSolveField, edgesOnCell_Block, indexToEdgeID_Block, cellsOnEdge_Block, nEdgesSolveField)
+ call mpas_block_creator_build_edge_halos(indexToCellID_Block, nEdgesOnCell_Block, nCellsSolveField, verticesOnCell_Block, indexToVertexID_Block, cellsOnVertex_Block, nVerticesSolveField)
- !
- ! Build a graph of cell connectivity based on cells owned by this process
- !
- block_graph_0Halo % nVerticesTotal = size(local_cell_list)
- block_graph_0Halo % nVertices = size(local_cell_list)
- block_graph_0Halo % maxDegree = maxEdges
- block_graph_0Halo % ghostStart = size(local_cell_list) + 1
- allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
- allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
- allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
-
- block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
- block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
- block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
-
- ! Get back a graph describing the owned cells plus the cells in the 1-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
-
-
- !
- ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
-
- !
- ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
- !
- block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
- block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
-
- ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
-
- block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
- block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
- nOwnCells = block_graph_2Halo % nVertices
+ ! Allocate blocks, and copy indexTo arrays into blocks
+ call mpas_block_creator_finalize_block_init(domain % blocklist, &
+#include "dim_dummy_args.inc"
+ , nCellsSolveField, nEdgesSolveField, nVerticesSolveField, indexToCellID_Block, indexToEdgeID_Block, indexToVertexID_Block)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- !! For now, only use Zoltan with MPI
- !! Zoltan initialization
- call mpas_zoltan_start()
- !! Zoltan hook for cells
- call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
-#endif
-#endif
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
-
-
- !
- ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
- ! on each cell and which vertices are on each cell from the processes that read these
- ! fields for each cell to the processes that own the cells
- !
- allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
- allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
- allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
- indexToCellIDField % array, block_graph_2Halo % vertexID, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
-
- !
- ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
- !
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- edgesOnCell_2Halo, nlocal_edges, local_edge_list)
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-
-
- !
- ! Work out which edges and vertices are owned by this process, and which are ghost
- !
- allocate(cellsOnEdge_2Halo(2,nlocal_edges))
- allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
- 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
- vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
- sendVertexList, recvVertexList)
-
-
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
-
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
-
- !------- set owned and halo cell indices -------!
-
- nCellsCumulative(1) = nOwnCells
- nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
- nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
-
- !------- determin the perimeter and owned edges of own cells and halos -------!
-
- nOwnEdges = ghostEdgeStart-1
- nOwnVertices = ghostVertexStart-1
-
- ! skip the own edges found at the beginning of local_edge_list
- call mpas_hash_init(edgeHash)
- do i=1,nOwnEdges
- call mpas_hash_insert(edgeHash, local_edge_list(i))
- end do
-
- ! skip the own vertices found at the beginning of local_vertex_list
- call mpas_hash_init(vertexHash)
- do i=1,nOwnVertices
- call mpas_hash_insert(vertexHash, local_vertex_list(i))
- end do
-
- cellCount = 1 !tracks the index of the local cell array
- edgeCount = nOwnEdges !tracks where to insert the next local edge
- vertexCount = nOwnVertices !tracks where to insert the next local vertex
-
- nEdgesCumulative(1) = nOwnEdges
- nVerticesCumulative(1) = nOwnVertices
-
- !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
- do i = 1, nHalos + 1 ! for the own cells and each halo...
- do j = cellCount, nCellsCumulative(i)
-
- ! the number of edges on a cell is same to the number of vertices, and therefore
- ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
- do k = 1, nEdgesOnCell_2Halo(j)
- iEdge = edgesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(edgeHash, iEdge)) then
- edgeCount = edgeCount + 1
- local_edge_list(edgeCount) = iEdge
- call mpas_hash_insert(edgeHash, iEdge)
- end if
-
- iVertex = verticesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(vertexHash, iVertex)) then
- vertexCount = vertexCount + 1
- local_vertex_list(vertexCount) = iVertex
- call mpas_hash_insert(vertexHash, iVertex)
- end if
- end do
-
- end do
-
- cellCount = nCellsCumulative(i) + 1
- nEdgesCumulative(i+1) = edgeCount
- nVerticesCumulative(i+1) = vertexCount
- end do
-
- do i = 1, nHalos
- nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
- end do
-
- do i = 1, nHalos + 1
- nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
- end do
-
- do i = 1, nHalos + 1
- nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
- end do
-
- call mpas_hash_destroy(edgeHash)
- call mpas_hash_destroy(vertexHash)
-
-
- ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
- ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
-
- ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
- ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
-
- ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
- ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
- ! contains all of the ghost cells
-
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xEdge(nlocal_edges))
- allocate(yEdge(nlocal_edges))
- allocate(zEdge(nlocal_edges))
- allocate(xVertex(nlocal_vertices))
- allocate(yVertex(nlocal_vertices))
- allocate(zVertex(nlocal_vertices))
-#endif
-#endif
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
- size(xEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
- size(yEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
- size(zEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
- size(xVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
- size(yVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
- size(zVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- !!!!!!!!!!!!!!!!!!
- !! Reorder edges
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
- !!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!
- !! Reorder vertices
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
- !!!!!!!!!!!!!!!!!!
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#endif
-#endif
-
- !
- ! Build ownership and exchange lists for vertical levels
- ! Essentially, process 0 owns all vertical levels when reading and writing,
- ! and it distributes them or gathers them to/from all other processes
- !
- if (domain % dminfo % my_proc_id == 0) then
- allocate(local_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- local_vertlevel_list(i) = i
- end do
- else
- allocate(local_vertlevel_list(0))
- end if
- allocate(needed_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- needed_vertlevel_list(i) = i
- end do
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(local_vertlevel_list), size(needed_vertlevel_list), &
- local_vertlevel_list, needed_vertlevel_list, &
- sendVertLevelList, recvVertLevelList)
-
- deallocate(local_vertlevel_list)
- deallocate(needed_vertlevel_list)
-
-
- !
- ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
- !
- allocate(domain % blocklist)
-
- nCells = block_graph_2Halo % nVerticesTotal
- nEdges = nlocal_edges
- nVertices = nlocal_vertices
-
- call mpas_allocate_block(domain % blocklist, domain, domain%dminfo%my_proc_id, &
-#include "dim_dummy_args.inc"
- )
-
-!!!!!!!!!!MGD HERE WE NEED TO READ IN indexTo*ID fields !!!!!!!!!!!!!!!!!
- call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', local_cell_list(1:nOwnCells), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToCellID', domain % blocklist % mesh % indexToCellID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', local_edge_list(1:nOwnEdges), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToEdgeID', domain % blocklist % mesh % indexToEdgeID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', local_vertex_list(1:nOwnVertices), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToVertexID', domain % blocklist % mesh % indexToVertexID % array, ierr)
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = nOwnVertices
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
call mpas_io_input_init(input_obj, domain % blocklist, domain % dminfo)
-
- !
- ! Read attributes
- !
call MPAS_readStreamAtt(input_obj % io_stream, 'sphere_radius', r_sphere_radius, ierr)
if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
- write(0,*) ' Setting sphere_radius to 1.0'
- domain % blocklist % mesh % sphere_radius = 1.0
+ write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting sphere_radius to 1.0'
+ domain % blocklist % mesh % sphere_radius = 1.0
else
- domain % blocklist % mesh % sphere_radius = r_sphere_radius
+ domain % blocklist % mesh % sphere_radius = r_sphere_radius
end if
call MPAS_readStreamAtt(input_obj % io_stream, 'on_a_sphere', c_on_a_sphere, ierr)
if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
- write(0,*) ' Setting on_a_sphere to ''YES'''
- domain % blocklist % mesh % on_a_sphere = .true.
+ write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting on_a_sphere to ''YES'''
+ domain % blocklist % mesh % on_a_sphere = .true.
else
- if (index(c_on_a_sphere, 'YES') /= 0) then
- domain % blocklist % mesh % on_a_sphere = .true.
- else
- domain % blocklist % mesh % on_a_sphere = .false.
- end if
+ if (index(c_on_a_sphere, 'YES') /= 0) then
+ domain % blocklist % mesh % on_a_sphere = .true.
+ else
+ domain % blocklist % mesh % on_a_sphere = .false.
+ end if
end if
+ block_ptr => domain % blocklist % next
+ do while (associated(block_ptr))
+ block_ptr % mesh % sphere_radius = domain % blocklist % mesh % sphere_radius
+ block_ptr % mesh % on_a_sphere = domain % blocklist % mesh % on_a_sphere
+
+ ! Link the sendList and recvList pointers in each field type to the appropriate lists
+ ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
+ ! link blocks of fields to eachother
+ call mpas_create_field_links(block_ptr)
+
+ block_ptr => block_ptr % next
+ end do
+
if (.not. config_do_restart) then
- input_obj % time = 1
+ input_obj % time = 1
else
- !
- ! If doing a restart, we need to decide which time slice to read from the
- ! restart file
- !
- input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
- if (ierr == MPAS_IO_ERR) then
- write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
- call mpas_dmpar_abort(domain % dminfo)
- end if
-write(0,*) 'MGD DEBUGGING time = ', input_obj % time
- write(0,*) 'Restarting model from time ', trim(timeStamp)
-
+ !
+ ! If doing a restart, we need to decide which time slice to read from the
+ ! restart file
+ !
+ input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+ if (ierr == MPAS_IO_ERR) then
+ write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
+ call mpas_dmpar_abort(domain % dminfo)
+ end if
+!write(0,*) 'MGD DEBUGGING time = ', input_obj % time
+ write(0,*) 'Restarting model from time ', trim(timeStamp)
end if
-
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Do the actual work of reading all fields in from the input or restart file
! For each field:
@@ -973,7 +273,7 @@
! process
! 2) All processes then send the global indices that were read to the
! processes that own those indices based on
- ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
+ ! {send,recv}{Cell,Edge,Vertex}List
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
call mpas_read_and_distribute_fields(input_obj)
@@ -981,292 +281,208 @@
call MPAS_io_close(inputHandle, ierr)
-
!
- ! Work out halo exchange lists for cells, edges, and vertices
- ! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
- ! the list, since Fortran does not allow arrays of pointers
- !
-
- !--------- Create Cell Exchange Lists ---------!
-
- ! pass in neededList of ownedCells and halo layer 1 cells
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nCellsCumulative(2), &
- block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
- domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
-
- ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
- offset = nCellsHalo(1)
- nTempIDs = nOwnCells + nCellsHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
- tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nTempIDs, &
- block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
- domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Edge Exchange Lists ---------!
-
- ! pass in neededList of ownedEdges and ownedCell perimeter edges
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nEdgesCumulative(2), &
- local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
- domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
- offset = nEdgesHalo(1)
- nTempIDs = nOwnEdges + nEdgesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
- offset = nEdgesHalo(1) + nEdgesHalo(2)
- nTempIDs = nOwnEdges + nEdgesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Vertex Exchange Lists ---------!
-
-
- ! pass in neededList of ownedVertices and ownedCell perimeter vertices
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nVerticesCumulative(2), &
- local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
- domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
- offset = nVerticesHalo(1)
- nTempIDs = nOwnVertices + nVerticesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
- offset = nVerticesHalo(1) + nVerticesHalo(2)
- nTempIDs = nOwnVertices + nVerticesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
- ! Link the sendList and recvList pointers in each field type to the appropriate lists
- ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
- ! link blocks of fields to eachother
- call mpas_create_field_links(domain % blocklist)
-
-
- !
! Exchange halos for all of the fields that were read from the input file
!
call mpas_exch_input_field_halos(domain, input_obj)
-
- !
- ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
- !
- allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
- allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
- allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
+ call mpas_block_creator_reindex_block_fields(domain % blocklist)
- do i=1,domain % blocklist % mesh % nCells
- cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
- cellIDSorted(2,i) = i
- end do
- call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % copyList)
- do i=1,domain % blocklist % mesh % nEdges
- edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
- edgeIDSorted(2,i) = i
- end do
- call quicksort(nlocal_edges, edgeIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % copyList)
- do i=1,domain % blocklist % mesh % nVertices
- vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
- vertexIDSorted(2,i) = i
- end do
- call quicksort(nlocal_vertices, vertexIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % copyList)
+ call mpas_deallocate_field(indexToCellIDField)
+ call mpas_deallocate_field(indexToEdgeIDField)
+ call mpas_deallocate_field(indexToVertexIDField)
+ call mpas_deallocate_field(cellsOnCellField)
- do i=1,domain % blocklist % mesh % nCells
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
+ call mpas_deallocate_field(edgesOnCellField)
+ call mpas_deallocate_field(verticesOnCellField)
+ call mpas_deallocate_field(cellsOnEdgeField)
+ call mpas_deallocate_field(cellsOnVertexField)
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
+ call mpas_deallocate_field(indexToCellID_Block)
+ call mpas_deallocate_field(nEdgesOnCell_Block)
+ call mpas_deallocate_field(cellsOnCell_Block)
+ call mpas_deallocate_field(verticesOnCell_Block)
+ call mpas_deallocate_field(edgesOnCell_Block)
+ call mpas_deallocate_field(indexToVertexID_Block)
+ call mpas_deallocate_field(cellsOnVertex_Block)
+ call mpas_deallocate_field(indexToEdgeID_Block)
+ call mpas_deallocate_field(cellsOnEdge_Block)
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
+ call mpas_deallocate_field(nCellsSolveField)
+ call mpas_deallocate_field(nVerticesSolveField)
+ call mpas_deallocate_field(nEdgesSolveField)
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
+#ifdef HAVE_ZOLTAN
+ call mpas_deallocate_field(xCellField)
+ call mpas_deallocate_field(yCellField)
+ call mpas_deallocate_field(zCellField)
+ call mpas_deallocate_field(xVertexField)
+ call mpas_deallocate_field(yVertexField)
+ call mpas_deallocate_field(zVertexField)
+ call mpas_deallocate_field(xEdgeField)
+ call mpas_deallocate_field(yEdgeField)
+ call mpas_deallocate_field(zEdgeField)
- end do
- end do
+ call mpas_deallocate_field(xCell)
+ call mpas_deallocate_field(yCell)
+ call mpas_deallocate_field(zCell)
+ call mpas_deallocate_field(xVertex)
+ call mpas_deallocate_field(yVertex)
+ call mpas_deallocate_field(zVertex)
+ call mpas_deallocate_field(xEdge)
+ call mpas_deallocate_field(yEdge)
+ call mpas_deallocate_field(zEdge)
+#endif
- do i=1,domain % blocklist % mesh % nEdges
- do j=1,2
+ deallocate(local_cell_list)
+ deallocate(block_id)
+ deallocate(block_start)
+ deallocate(block_count)
+ deallocate(readingBlock)
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! allocate(xCell(size(local_cell_list)))
+! allocate(yCell(size(local_cell_list)))
+! allocate(zCell(size(local_cell_list)))
+! call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
+! size(xCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
+! size(yCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
+! size(zCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!#endif
+!#endif
+
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! !! For now, only use Zoltan with MPI
+! !! Zoltan initialization
+! call mpas_zoltan_start()
+!
+! !! Zoltan hook for cells
+! call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
+!#endif
+!#endif
+!
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! allocate(xEdge(nlocal_edges))
+! allocate(yEdge(nlocal_edges))
+! allocate(zEdge(nlocal_edges))
+! allocate(xVertex(nlocal_vertices))
+! allocate(yVertex(nlocal_vertices))
+! allocate(zVertex(nlocal_vertices))
+!#endif
+!#endif
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
+! size(xEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
+! size(yEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
+! size(zEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
+! size(xVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
+! size(yVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
+! size(zVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder edges
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
+! !!!!!!!!!!!!!!!!!!
+!
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder vertices
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
+! !!!!!!!!!!!!!!!!!!
+!
+! deallocate(sendEdgeList % list)
+! deallocate(sendEdgeList)
+! deallocate(recvEdgeList % list)
+! deallocate(recvEdgeList)
+!
+! deallocate(sendVertexList % list)
+! deallocate(sendVertexList)
+! deallocate(recvVertexList % list)
+! deallocate(recvVertexList)
+!
+! !
+! ! Knowing which edges/vertices are owned by this block and which are actually read
+! ! from the input or restart file, we can build exchange lists to perform
+! ! all-to-all field exchanges from process that reads a field to the processes that
+! ! need them
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToEdgeIDField % array), nlocal_edges, &
+! indexToEdgeIDField % array, local_edge_list, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToVertexIDField % array), nlocal_vertices, &
+! indexToVertexIDField % array, local_vertex_list, &
+! sendVertexList, recvVertexList)
+!
+!#endif
+!#endif
+!
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
- end do
+! !
+! ! Deallocate fields, graphs, and other memory
+! !
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! deallocate(xCellField % ioinfo)
+! deallocate(xCellField % array)
+! deallocate(yCellField % ioinfo)
+! deallocate(yCellField % array)
+! deallocate(zCellField % ioinfo)
+! deallocate(zCellField % array)
+!#endif
+!#endif
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! deallocate(xCell)
+! deallocate(yCell)
+! deallocate(zCell)
+!#endif
+!#endif
+ end subroutine mpas_input_state_for_domain!}}}
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nVertices
- do j=1,vertexDegree
-
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
-
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
- deallocate(cellIDSorted)
- deallocate(edgeIDSorted)
- deallocate(vertexIDSorted)
-
-
- !
- ! Deallocate fields, graphs, and other memory
- !
- deallocate(indexToCellIDField % ioinfo)
- deallocate(indexToCellIDField % array)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCellField % ioinfo)
- deallocate(xCellField % array)
- deallocate(yCellField % ioinfo)
- deallocate(yCellField % array)
- deallocate(zCellField % ioinfo)
- deallocate(zCellField % array)
-#endif
-#endif
- deallocate(indexToEdgeIDField % ioinfo)
- deallocate(indexToEdgeIDField % array)
- deallocate(indexToVertexIDField % ioinfo)
- deallocate(indexToVertexIDField % array)
- deallocate(cellsOnCellField % ioinfo)
- deallocate(cellsOnCellField % array)
- deallocate(edgesOnCellField % ioinfo)
- deallocate(edgesOnCellField % array)
- deallocate(verticesOnCellField % ioinfo)
- deallocate(verticesOnCellField % array)
- deallocate(cellsOnEdgeField % ioinfo)
- deallocate(cellsOnEdgeField % array)
- deallocate(cellsOnVertexField % ioinfo)
- deallocate(cellsOnVertexField % array)
- deallocate(cellsOnCell_0Halo)
- deallocate(nEdgesOnCell_0Halo)
- deallocate(indexToCellID_0Halo)
- deallocate(cellsOnEdge_2Halo)
- deallocate(cellsOnVertex_2Halo)
- deallocate(nEdgesOnCell_2Halo)
- deallocate(edgesOnCell_2Halo)
- deallocate(verticesOnCell_2Halo)
- deallocate(block_graph_0Halo % vertexID)
- deallocate(block_graph_0Halo % nAdjacent)
- deallocate(block_graph_0Halo % adjacencyList)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCell)
- deallocate(yCell)
- deallocate(zCell)
-#endif
-#endif
- end subroutine mpas_input_state_for_domain
-
-
!CR:TODO: an identical subroutine is found in module_io_output - merge
- subroutine mpas_insert_string_suffix(stream, suffix, filename)
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)!{{{
implicit none
@@ -1289,10 +505,9 @@
if (filename(i:i) == ':') filename(i:i) = '.'
end do
- end subroutine mpas_insert_string_suffix
+ end subroutine mpas_insert_string_suffix!}}}
-
- subroutine mpas_read_and_distribute_fields(input_obj)
+ subroutine mpas_read_and_distribute_fields(input_obj)!{{{
implicit none
@@ -1304,11 +519,9 @@
call MPAS_readStream(input_obj % io_stream, 1, ierr)
- end subroutine mpas_read_and_distribute_fields
+ end subroutine mpas_read_and_distribute_fields!}}}
-
-
- subroutine mpas_io_input_init(input_obj, blocklist, dminfo)
+ subroutine mpas_io_input_init(input_obj, blocklist, dminfo)!{{{
implicit none
@@ -1334,10 +547,9 @@
#include "add_input_fields.inc"
- end subroutine mpas_io_input_init
+ end subroutine mpas_io_input_init!}}}
-
- subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)
+ subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)!{{{
implicit none
@@ -1347,10 +559,9 @@
!include "get_dimension_by_name.inc"
- end subroutine mpas_io_input_get_dimension
+ end subroutine mpas_io_input_get_dimension!}}}
-
- subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)!{{{
implicit none
@@ -1360,10 +571,9 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_real
+ end subroutine mpas_io_input_get_att_real!}}}
-
- subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)!{{{
implicit none
@@ -1373,11 +583,10 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_text
+ end subroutine mpas_io_input_get_att_text!}}}
+ subroutine mpas_exch_input_field_halos(domain, input_obj)!{{{
- subroutine mpas_exch_input_field_halos(domain, input_obj)
-
implicit none
type (domain_type), intent(inout) :: domain
@@ -1385,10 +594,11 @@
#include "exchange_input_field_halos.inc"
- end subroutine mpas_exch_input_field_halos
+#include "non_decomp_copy_input_fields.inc"
+ end subroutine mpas_exch_input_field_halos!}}}
- subroutine mpas_io_input_finalize(input_obj, dminfo)
+ subroutine mpas_io_input_finalize(input_obj, dminfo)!{{{
implicit none
@@ -1399,6 +609,435 @@
call MPAS_closeStream(input_obj % io_stream, nferr)
- end subroutine mpas_io_input_finalize
+ end subroutine mpas_io_input_finalize!}}}
+
+ subroutine mpas_io_setup_cell_block_fields(inputHandle, nReadCells, readCellStart, readingBlock, maxEdges, indexToCellID, xCell, yCell, zCell, nEdgesOnCell, cellsOnCell, edgesOnCell, verticesOnCell)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadCells
+ integer, intent(in) :: readCellStart
+ integer, intent(in) :: maxEdges
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToCellID
+ type (field1dReal), pointer :: xCell
+ type (field1dReal), pointer :: yCell
+ type (field1dReal), pointer :: zCell
+ type (field1dInteger), pointer :: nEdgesOnCell
+ type (field2dInteger), pointer :: cellsOnCell
+ type (field2dInteger), pointer :: edgesOnCell
+ type (field2dInteger), pointer :: verticesOnCell
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ !
+ ! Allocate and read fields that we will need in order to ultimately work out
+ ! which cells/edges/vertices are owned by each block, and which are ghost
+ !
+
+ ! Global cell indices
+ allocate(indexToCellID)
+ allocate(indexToCellID % ioinfo)
+ indexToCellID % ioinfo % fieldName = 'indexToCellID'
+ indexToCellID % ioinfo % start(1) = readCellStart
+ indexToCellID % ioinfo % count(1) = nReadCells
+ allocate(indexToCellID % array(nReadCells))
+ allocate(readIndices(nReadCells))
+ do i=1,nReadCells
+ readIndices(i) = i + readCellStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellID % array, ierr)
+ indexToCellID % dimSizes(1) = nReadCells
+ indexToCellID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % sendList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % recvList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % copyList, nHalos)
+ nullify(indexToCellID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Cell x-coordinates (in 3d Cartesian space)
+ allocate(xCell)
+ allocate(xCell % ioinfo)
+ xCell % ioinfo % fieldName = 'xCell'
+ xCell % ioinfo % start(1) = readCellStart
+ xCell % ioinfo % count(1) = nReadCells
+ allocate(xCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xCell', xCell % array, ierr)
+ xCell % dimSizes(1) = nReadCells
+ xCell % block => readingBlock
+ xCell % sendList => indexToCellID % sendList
+ xCell % recvList => indexToCellID % recvList
+ xCell % copyList => indexToCellID % copyList
+ nullify(xCell % next)
+
+ ! Cell y-coordinates (in 3d Cartesian space)
+ allocate(yCell)
+ allocate(yCell % ioinfo)
+ yCell % ioinfo % fieldName = 'yCell'
+ yCell % ioinfo % start(1) = readCellStart
+ yCell % ioinfo % count(1) = nReadCells
+ allocate(yCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yCell', yCell % array, ierr)
+ yCell % sendList => indexToCellID % sendList
+ yCell % recvList => indexToCellID % recvList
+ yCell % copyList => indexToCellID % copyList
+ yCell % dimSizes(1) = nReadCells
+ yCell % block => readingBlock
+ nullify(yCell % next)
+
+ ! Cell z-coordinates (in 3d Cartesian space)
+ allocate(zCell)
+ allocate(zCell % ioinfo)
+ zCell % ioinfo % fieldName = 'zCell'
+ zCell % ioinfo % start(1) = readCellStart
+ zCell % ioinfo % count(1) = nReadCells
+ allocate(zCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zCell', zCell % array, ierr)
+ zCell % dimSizes(1) = nReadCells
+ zCell % block => readingBlock
+ zCell % sendList => indexToCellID % sendList
+ zCell % recvList => indexToCellID % recvList
+ zCell % copyList => indexToCellID % copyList
+ nullify(zCell % next)
+#endif
+#endif
+
+ ! Number of cell/edges/vertices adjacent to each cell
+ allocate(nEdgesOnCell)
+ allocate(nEdgesOnCell % ioinfo)
+ nEdgesOnCell % ioinfo % fieldName = 'nEdgesOnCell'
+ nEdgesOnCell % ioinfo % start(1) = readCellStart
+ nEdgesOnCell % ioinfo % count(1) = nReadCells
+ allocate(nEdgesOnCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCell % array, ierr)
+ nEdgesOnCell % dimSizes(1) = nReadCells
+ nEdgesOnCell % block => readingBlock
+ nEdgesOnCell % sendList => indexToCellID % sendList
+ nEdgesOnCell % recvList => indexToCellID % recvList
+ nEdgesOnCell % copyList => indexToCellID % copyList
+ nullify(nEdgesOnCell % next)
+
+ ! Global indices of cells adjacent to each cell
+ allocate(cellsOnCell)
+ allocate(cellsOnCell % ioinfo)
+ cellsOnCell % ioinfo % fieldName = 'cellsOnCell'
+ cellsOnCell % ioinfo % start(1) = 1
+ cellsOnCell % ioinfo % start(2) = readCellStart
+ cellsOnCell % ioinfo % count(1) = maxEdges
+ cellsOnCell % ioinfo % count(2) = nReadCells
+ allocate(cellsOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCell % array, ierr)
+ cellsOnCell % dimSizes(1) = maxEdges
+ cellsOnCell % dimSizes(2) = nReadCells
+ cellsOnCell % block => readingBlock
+ cellsOnCell % sendList => indexToCellID % sendList
+ cellsOnCell % recvList => indexToCellID % recvList
+ cellsOnCell % copyList => indexToCellID % copyList
+ nullify(cellsOnCell % next)
+
+ ! Global indices of edges adjacent to each cell
+ allocate(edgesOnCell)
+ allocate(edgesOnCell % ioinfo)
+ edgesOnCell % ioinfo % fieldName = 'edgesOnCell'
+ edgesOnCell % ioinfo % start(1) = 1
+ edgesOnCell % ioinfo % start(2) = readCellStart
+ edgesOnCell % ioinfo % count(1) = maxEdges
+ edgesOnCell % ioinfo % count(2) = nReadCells
+ allocate(edgesOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCell % array, ierr)
+ edgesOnCell % dimSizes(1) = maxEdges
+ edgesOnCell % dimSizes(2) = nReadCells
+ edgesOnCell % block => readingBlock
+ edgesOnCell % sendList => indexToCellID % sendList
+ edgesOnCell % recvList => indexToCellID % recvList
+ edgesOnCell % copyList => indexToCellID % copyList
+ nullify(edgesOnCell % next)
+
+ ! Global indices of vertices adjacent to each cell
+ allocate(verticesOnCell)
+ allocate(verticesOnCell % ioinfo)
+ verticesOnCell % ioinfo % fieldName = 'verticesOnCell'
+ verticesOnCell % ioinfo % start(1) = 1
+ verticesOnCell % ioinfo % start(2) = readCellStart
+ verticesOnCell % ioinfo % count(1) = maxEdges
+ verticesOnCell % ioinfo % count(2) = nReadCells
+ allocate(verticesOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'verticesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCell % array, ierr)
+ verticesOnCell % dimSizes(1) = maxEdges
+ verticesOnCell % dimSizes(2) = nReadCells
+ verticesOnCell % block => readingBlock
+ verticesOnCell % sendList => indexToCellID % sendList
+ verticesOnCell % recvList => indexToCellID % recvList
+ verticesOnCell % copyList => indexToCellID % copyList
+ nullify(verticesOnCell % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_cell_block_fields!}}}
+
+ subroutine mpas_io_setup_edge_block_fields(inputHandle, nReadEdges, readEdgeStart, readingBlock, indexToEdgeID, xEdge, yEdge, zEdge, cellsOnEdge)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadEdges
+ integer, intent(in) :: readEdgeStart
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToEdgeID
+ type (field1dReal), pointer :: xEdge
+ type (field1dReal), pointer :: yEdge
+ type (field1dReal), pointer :: zEdge
+ type (field2dInteger), pointer :: cellsOnEdge
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ !
+ ! Allocate and read fields that we will need in order to ultimately work out
+ ! which cells/edges/vertices are owned by each block, and which are ghost
+ !
+
+ allocate(readIndices(nReadEdges))
+
+ ! Global edge indices
+ allocate(indexToEdgeID)
+ allocate(indexToEdgeID % ioinfo)
+ indexToEdgeID % ioinfo % fieldName = 'indexToEdgeID'
+ indexToEdgeID % ioinfo % start(1) = readEdgeStart
+ indexToEdgeID % ioinfo % count(1) = nReadEdges
+ allocate(indexToEdgeID % array(nReadEdges))
+ allocate(indexToEdgeID % array(nReadEdges))
+ do i=1,nReadEdges
+ readIndices(i) = i + readEdgeStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeID % array, ierr)
+ indexToEdgeID % dimSizes(1) = nREadEdges
+ indexToEdgeID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % copyList, nHalos+1)
+ nullify(indexToEdgeID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Edge x-coordinates (in 3d Cartesian space)
+ allocate(xEdge)
+ allocate(xEdge % ioinfo)
+ xEdge % ioinfo % fieldName = 'xEdge'
+ xEdge % ioinfo % start(1) = readEdgeStart
+ xEdge % ioinfo % count(1) = nReadEdges
+ allocate(xEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xEdge', xEdge % array, ierr)
+ xEdge % dimSizes(1) = nReadEdges
+ xEdge % block => readingBlock
+ xEdge % sendList => indexToEdgeID % sendList
+ xEdge % recvList => indexToEdgeID % recvList
+ xEdge % copyList => indexToEdgeID % copyList
+ nullify(xEdge % next)
+
+ ! Edge y-coordinates (in 3d Cartesian space)
+ allocate(yEdge)
+ allocate(yEdge % ioinfo)
+ yEdge % ioinfo % fieldName = 'yEdge'
+ yEdge % ioinfo % start(1) = readEdgeStart
+ yEdge % ioinfo % count(1) = nReadEdges
+ allocate(yEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yEdge', yEdge % array, ierr)
+ yEdge % dimSizes(1) = nReadEdges
+ yEdge % block => readingBlock
+ yEdge % sendList => indexToEdgeID % sendList
+ yEdge % recvList => indexToEdgeID % recvList
+ yEdge % copyList => indexToEdgeID % copyList
+ nullify(yEdge % next)
+
+ ! Edge z-coordinates (in 3d Cartesian space)
+ allocate(zEdge)
+ allocate(zEdge % ioinfo)
+ zEdge % ioinfo % fieldName = 'zEdge'
+ zEdge % ioinfo % start(1) = readEdgeStart
+ zEdge % ioinfo % count(1) = nReadEdges
+ allocate(zEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zEdge', zEdge % array, ierr)
+ zEdge % dimSizes(1) = nReadEdges
+ zEdge % block => readingBlock
+ zEdge % sendList => indexToEdgeID % sendList
+ zEdge % recvList => indexToEdgeID % recvList
+ zEdge % copyList => indexToEdgeID % copyList
+ nullify(zEdge % next)
+#endif
+#endif
+
+
+ ! Global indices of cells adjacent to each edge
+ ! used for determining which edges are owned by a block, where
+ ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
+ allocate(cellsOnEdge)
+ allocate(cellsOnEdge % ioinfo)
+ cellsOnEdge % ioinfo % fieldName = 'cellsOnEdge'
+ cellsOnEdge % ioinfo % start(1) = 1
+ cellsOnEdge % ioinfo % start(2) = readEdgeStart
+ cellsOnEdge % ioinfo % count(1) = 2
+ cellsOnEdge % ioinfo % count(2) = nReadEdges
+ allocate(cellsOnEdge % array(2,nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdge % array, ierr)
+ cellsOnEdge % dimSizes(1) = 2
+ cellsOnEdge % dimSizes(2) = nReadEdges
+ cellsOnEdge % block => readingBlock
+ cellsOnEdge % sendList => indexToEdgeID % sendList
+ cellsOnEdge % recvList => indexToEdgeID % recvList
+ cellsOnEdge % copyList => indexToEdgeID % copyList
+ nullify(cellsOnEdge % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_edge_block_fields!}}}
+
+ subroutine mpas_io_setup_vertex_block_fields(inputHandle, nReadVertices, readVertexStart, readingBlock, vertexDegree, indexToVertexID, xVertex, yVertex, zVertex, cellsOnVertex)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadVertices
+ integer, intent(in) :: readVertexStart
+ integer, intent(in) :: vertexDegree
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToVertexID
+ type (field1dReal), pointer :: xVertex
+ type (field1dReal), pointer :: yVertex
+ type (field1dReal), pointer :: zVertex
+ type (field2dInteger), pointer :: cellsOnVertex
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ ! Global vertex indices
+ allocate(indexToVertexID)
+ allocate(indexToVertexID % ioinfo)
+ indexToVertexID % ioinfo % fieldName = 'indexToVertexID'
+ indexToVertexID % ioinfo % start(1) = readVertexStart
+ indexToVertexID % ioinfo % count(1) = nReadVertices
+ allocate(indexToVertexID % array(nReadVertices))
+ allocate(readIndices(nReadVertices))
+ do i=1,nReadVertices
+ readIndices(i) = i + readVertexStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexID % array, ierr)
+ indexToVertexID % dimSizes(1) = nReadVertices
+ indexToVertexID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % copyList, nHalos+1)
+ nullify(indexToVertexID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Vertex x-coordinates (in 3d Cartesian space)
+ allocate(xVertex)
+ allocate(xVertex % ioinfo)
+ xVertex % ioinfo % fieldName = 'xVertex'
+ xVertex % ioinfo % start(1) = readVertexStart
+ xVertex % ioinfo % count(1) = nReadVertices
+ allocate(xVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xVertex', xVertex % array, ierr)
+ xVertex % dimSizes(1) = nReadVertices
+ xVertex % block => readingBlock
+ xVertex % sendList => indexToVertexID % sendList
+ xVertex % recvList => indexToVertexID % recvList
+ xVertex % copyList => indexToVertexID % copyList
+ nullify(xVertex % next)
+
+ ! Vertex y-coordinates (in 3d Cartesian space)
+ allocate(yVertex)
+ allocate(yVertex % ioinfo)
+ yVertex % ioinfo % fieldName = 'yVertex'
+ yVertex % ioinfo % start(1) = readVertexStart
+ yVertex % ioinfo % count(1) = nReadVertices
+ allocate(yVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yVertex', yVertex % array, ierr)
+ yVertex % dimSizes(1) = nReadVertices
+ yVertex % block => readingBlock
+ yVertex % sendList => indexToVertexID % sendList
+ yVertex % recvList => indexToVertexID % recvList
+ yVertex % copyList => indexToVertexID % copyList
+ nullify(yVertex % next)
+
+ ! Vertex z-coordinates (in 3d Cartesian space)
+ allocate(zVertex)
+ allocate(zVertex % ioinfo)
+ zVertex % ioinfo % fieldName = 'zVertex'
+ zVertex % ioinfo % start(1) = readVertexStart
+ zVertex % ioinfo % count(1) = nReadVertices
+ allocate(zVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zVertex', zVertex % array, ierr)
+ zVertex % dimSizes(1) = nReadVertices
+ zVertex % block => readingBlock
+ zVertex % sendList => indexToVertexID % sendList
+ zVertex % recvList => indexToVertexID % recvList
+ zVertex % copyList => indexToVertexID % copyList
+ nullify(zVertex % next)
+#endif
+#endif
+
+
+ ! Global indices of cells adjacent to each vertex
+ ! used for determining which vertices are owned by a block, where
+ ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
+ allocate(cellsOnVertex)
+ allocate(cellsOnVertex % ioinfo)
+ cellsOnVertex % ioinfo % fieldName = 'cellsOnVertex'
+ cellsOnVertex % ioinfo % start(1) = 1
+ cellsOnVertex % ioinfo % start(2) = readVertexStart
+ cellsOnVertex % ioinfo % count(1) = vertexDegree
+ cellsOnVertex % ioinfo % count(2) = nReadVertices
+ allocate(cellsOnVertex % array(vertexDegree,nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertex % array, ierr)
+ cellsOnVertex % dimSizes(1) = vertexDegree
+ cellsOnVertex % dimSizes(2) = nReadVertices
+ cellsOnVertex % block => readingBlock
+ cellsOnVertex % sendList => indexToVertexID % sendList
+ cellsOnVertex % recvList => indexToVertexID % recvList
+ cellsOnVertex % copyList => indexToVertexID % copyList
+ nullify(cellsOnVertex % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_vertex_block_fields!}}}
+
end module mpas_io_input
Modified: branches/atmos_physics/src/framework/mpas_io_output.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_output.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_io_output.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -22,8 +22,7 @@
contains
-
- subroutine mpas_output_state_init(output_obj, domain, stream, outputSuffix)
+ subroutine mpas_output_state_init(output_obj, domain, stream, outputSuffix)!{{{
implicit none
@@ -66,11 +65,10 @@
block_ptr % mesh &
)
- end subroutine mpas_output_state_init
+ end subroutine mpas_output_state_init!}}}
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)!{{{
- subroutine mpas_insert_string_suffix(stream, suffix, filename)
-
implicit none
character (len=*), intent(in) :: stream
@@ -92,10 +90,9 @@
if (filename(i:i) == ':') filename(i:i) = '.'
end do
- end subroutine mpas_insert_string_suffix
+ end subroutine mpas_insert_string_suffix!}}}
-
- subroutine mpas_output_state_for_domain(output_obj, domain, itime)
+ subroutine mpas_output_state_for_domain(output_obj, domain, itime)!{{{
implicit none
@@ -103,127 +100,223 @@
type (domain_type), intent(inout) :: domain
integer, intent(in) :: itime
+ type(block_type), pointer :: block_ptr
+
+ integer :: nCells, nEdges, nVertices, vertexDegree
+ integer :: maxEdges, maxEdges2, nEdgesSolve, nCellsSolve, nVerticesSolve
integer :: ierr
integer :: i, j
- integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
- cellsOnEdge, verticesOnEdge, edgesOnEdge, cellsOnVertex, edgesOnVertex
- integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
- cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
- cellsOnVertex_save, edgesOnVertex_save
- type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
- type (field0dReal) :: real0d
- type (field1dReal) :: real1d
- type (field2dReal) :: real2d
- type (field3dReal) :: real3d
- type (field0dChar) :: char0d
- type (field1dChar) :: char1d
+ type (field2dInteger), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
+ cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
+ cellsOnVertex_save, edgesOnVertex_save
+ type (field2dInteger), pointer :: cellsOnCell_ptr, edgesOnCell_ptr, verticesOnCell_ptr, &
+ cellsOnEdge_ptr, verticesOnEdge_ptr, edgesOnEdge_ptr, &
+ cellsOnVertex_ptr, edgesOnVertex_ptr
+
output_obj % time = itime
- allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(cellsOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(verticesOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(edgesOnEdge(2 * domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nEdgesSolve))
- allocate(cellsOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
- allocate(edgesOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
-
-
!
! Convert connectivity information from local to global indices
+ ! Needs to be done block by block
!
- do i=1,domain % blocklist % mesh % nCellsSolve
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnCell % array(i)+1,domain % blocklist % mesh % maxEdges
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- end do
- end do
- do i=1,domain % blocklist % mesh % nEdgesSolve
- cellsOnEdge(1,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(1,i))
- cellsOnEdge(2,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(2,i))
- verticesOnEdge(1,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(1,i))
- verticesOnEdge(2,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(2,i))
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnEdge % array(i)+1,2*domain % blocklist % mesh % maxEdges
- if(domain % blocklist % mesh % nEdgesOnEdge % array(i) .eq. 0) then
- edgesOnEdge(j,i) = domain % blocklist % mesh % nEdgesSolve + 1
- else
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnEdge % array(i))
- endif
- end do
- end do
- do i=1,domain % blocklist % mesh % nVerticesSolve
- do j=1,domain % blocklist % mesh % vertexDegree
- cellsOnVertex(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- edgesOnVertex(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- end do
- end do
+ ! Also, backup local indices to be copied back into blocks after output is complete.
+ !
+ allocate(cellsOnCell_save)
+ allocate(edgesOnCell_save)
+ allocate(verticesOnCell_save)
+ allocate(cellsOnEdge_save)
+ allocate(verticesOnEdge_save)
+ allocate(edgesOnEdge_save)
+ allocate(cellsOnVertex_save)
+ allocate(edgesOnVertex_save)
- cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
- edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
- verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
- cellsOnEdge_save => domain % blocklist % mesh % cellsOnEdge % array
- verticesOnEdge_save => domain % blocklist % mesh % verticesOnEdge % array
- edgesOnEdge_save => domain % blocklist % mesh % edgesOnEdge % array
- cellsOnVertex_save => domain % blocklist % mesh % cellsOnVertex % array
- edgesOnVertex_save => domain % blocklist % mesh % edgesOnVertex % array
+ cellsOnCell_ptr => cellsOnCell_save
+ edgesOnCell_ptr => edgesOnCell_save
+ verticesOnCell_ptr => verticesOnCell_save
+ cellsOnEdge_ptr => cellsOnEdge_save
+ verticesOnEdge_ptr => verticesOnEdge_save
+ edgesOnEdge_ptr => edgesOnEdge_save
+ cellsOnVertex_ptr => cellsOnVertex_save
+ edgesOnVertex_ptr => edgesOnVertex_save
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ maxEdges = block_ptr % mesh % maxEdges
+ maxEdges2 = block_ptr % mesh % maxEdges2
+ vertexDegree = block_ptr % mesh % vertexDegree
+ nCells = block_ptr % mesh % nCells
+ nEdges = block_ptr % mesh % nEdges
+ nVertices = block_ptr % mesh % nVertices
+ nCellsSolve = block_ptr % mesh % nCellsSolve
+ nEdgesSolve = block_ptr % mesh % nEdgesSolve
+ nVerticesSolve = block_ptr % mesh % nVerticesSolve
+ nullify(cellsOncell_ptr % ioinfo)
+ cellsOncell_ptr % array => block_ptr % mesh % cellsOncell % array
+ allocate(block_ptr % mesh % cellsOnCell % array(maxEdges, nCells+1))
+
+ nullify(edgesOnCell_ptr % ioinfo)
+ edgesOnCell_ptr % array => block_ptr % mesh % edgesOnCell % array
+ allocate(block_ptr % mesh % edgesOnCell % array(maxEdges, nCells+1))
+
+ nullify(verticesOnCell_ptr % ioinfo)
+ verticesOnCell_ptr % array => block_ptr % mesh % verticesOnCell % array
+ allocate(block_ptr % mesh % verticesOnCell % array(maxEdges, nCells+1))
+
+ nullify(cellsOnEdge_ptr % ioinfo)
+ cellsOnEdge_ptr % array => block_ptr % mesh % cellsOnEdge % array
+ allocate(block_ptr % mesh % cellsOnEdge % array(2, nEdges+1))
+
+ nullify(verticesOnEdge_ptr % ioinfo)
+ verticesOnEdge_ptr % array => block_ptr % mesh % verticesOnEdge % array
+ allocate(block_ptr % mesh % verticesOnEdge % array(2, nEdges+1))
+
+ nullify(edgesOnEdge_ptr % ioinfo)
+ edgesOnEdge_ptr % array => block_ptr % mesh % edgesOnEdge % array
+ allocate(block_ptr % mesh % edgesOnEdge % array(maxEdges2, nEdges+1))
+
+ nullify(cellsOnVertex_ptr % ioinfo)
+ cellsOnVertex_ptr % array => block_ptr % mesh % cellsOnVertex % array
+ allocate(block_ptr % mesh % cellsOnVertex % array(vertexDegree, nVertices+1))
+
+ nullify(edgesOnVertex_ptr % ioinfo)
+ edgesOnVertex_ptr % array => block_ptr % mesh % edgesOnVertex % array
+ allocate(block_ptr % mesh % edgesOnVertex % array(vertexDegree, nVertices+1))
+
+ do i = 1, nCellsSolve
+ do j = 1, block_ptr % mesh % nEdgesOnCell % array(i)
+ block_ptr % mesh % cellsOnCell % array(j, i) = block_ptr % mesh % indexToCellID % array(cellsOnCell_ptr % array(j, i))
+ block_ptr % mesh % edgesOnCell % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnCell_ptr % array(j, i))
+ block_ptr % mesh % verticesOnCell % array(j, i) = block_ptr % mesh % indexToVertexID % array(verticesOnCell_ptr % array(j, i))
+ end do
+
+ block_ptr % mesh % cellsOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nCells+1
+ block_ptr % mesh % edgesOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nEdges+1
+ block_ptr % mesh % verticesOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nVertices+1
+ end do
+
+ do i = 1, nEdgesSolve
+ block_ptr % mesh % cellsOnEdge % array(1, i) = block_ptr % mesh % indexToCellID % array(cellsOnEdge_ptr % array(1, i))
+ block_ptr % mesh % cellsOnEdge % array(2, i) = block_ptr % mesh % indexToCellID % array(cellsOnEdge_ptr % array(2, i))
+
+ block_ptr % mesh % verticesOnedge % array(1, i) = block_ptr % mesh % indexToVertexID % array(verticesOnEdge_ptr % array(1,i))
+ block_ptr % mesh % verticesOnedge % array(2, i) = block_ptr % mesh % indexToVertexID % array(verticesOnEdge_ptr % array(2,i))
+
+ do j = 1, block_ptr % mesh % nEdgesOnEdge % array(i)
+ block_ptr % mesh % edgesOnEdge % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnEdge_ptr % array(j, i))
+ end do
+
+ block_ptr % mesh % edgesOnEdge % array(block_ptr % mesh % nEdgesOnEdge % array(i)+1:maxEdges2, i) = nEdges+1
+ end do
+
+ do i = 1, nVerticesSolve
+ do j = 1, vertexDegree
+ block_ptr % mesh % cellsOnVertex % array(j, i) = block_ptr % mesh % indexToCellID % array(cellsOnVertex_ptr % array(j, i))
+ block_ptr % mesh % edgesOnVertex % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnVertex_ptr % array(j, i))
+ end do
+ end do
+
+ block_ptr => block_ptr % next
+ if(associated(block_ptr)) then
+ allocate(cellsOnCell_ptr % next)
+ allocate(edgesOnCell_ptr % next)
+ allocate(verticesOnCell_ptr % next)
+ allocate(cellsOnEdge_ptr % next)
+ allocate(verticesOnEdge_ptr % next)
+ allocate(edgesOnEdge_ptr % next)
+ allocate(cellsOnVertex_ptr % next)
+ allocate(edgesOnVertex_ptr % next)
+
+ cellsOnCell_ptr => cellsOnCell_ptr % next
+ edgesOnCell_ptr => edgesOnCell_ptr % next
+ verticesOnCell_ptr => verticesOnCell_ptr % next
+ cellsOnEdge_ptr => cellsOnEdge_ptr % next
+ verticesOnEdge_ptr => verticesOnEdge_ptr % next
+ edgesOnEdge_ptr => edgesOnEdge_ptr % next
+ cellsOnVertex_ptr => cellsOnVertex_ptr % next
+ edgesOnVertex_ptr => edgesOnVertex_ptr % next
+ end if
+
+ nullify(cellsOnCell_ptr % next)
+ nullify(edgesOnCell_ptr % next)
+ nullify(verticesOnCell_ptr % next)
+ nullify(cellsOnEdge_ptr % next)
+ nullify(verticesOnEdge_ptr % next)
+ nullify(edgesOnEdge_ptr % next)
+ nullify(cellsOnVertex_ptr % next)
+ nullify(edgesOnVertex_ptr % next)
+ end do
+
+ ! Write output file
call MPAS_writeStream(output_obj % io_stream, output_obj % time, ierr)
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell_save
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge_save
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge_save
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge_save
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex_save
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex_save
+ ! Converge indices back to local indices, and deallocate all temporary arrays.
+ cellsOnCell_ptr => cellsOnCell_save
+ edgesOnCell_ptr => edgesOnCell_save
+ verticesOnCell_ptr => verticesOnCell_save
+ cellsOnEdge_ptr => cellsOnEdge_save
+ verticesOnEdge_ptr => verticesOnEdge_save
+ edgesOnEdge_ptr => edgesOnEdge_save
+ cellsOnVertex_ptr => cellsOnVertex_save
+ edgesOnVertex_ptr => edgesOnVertex_save
- deallocate(cellsOnCell)
- deallocate(edgesOnCell)
- deallocate(verticesOnCell)
- deallocate(cellsOnEdge)
- deallocate(verticesOnEdge)
- deallocate(edgesOnEdge)
- deallocate(cellsOnVertex)
- deallocate(edgesOnVertex)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
- end subroutine mpas_output_state_for_domain
+ deallocate(block_ptr % mesh % cellsOnCell % array)
+ deallocate(block_ptr % mesh % edgesOnCell % array)
+ deallocate(block_ptr % mesh % verticesOnCell % array)
+ deallocate(block_ptr % mesh % cellsOnEdge % array)
+ deallocate(block_ptr % mesh % verticesOnEdge % array)
+ deallocate(block_ptr % mesh % edgesOnEdge % array)
+ deallocate(block_ptr % mesh % cellsOnVertex % array)
+ deallocate(block_ptr % mesh % edgesOnVertex % array)
+ block_ptr % mesh % cellsOncell % array => cellsOnCell_ptr % array
+ block_ptr % mesh % edgesOnCell % array => edgesOnCell_ptr % array
+ block_ptr % mesh % verticesOnCell % array => verticesOnCell_ptr % array
+ block_ptr % mesh % cellsOnEdge % array => cellsOnEdge_ptr % array
+ block_ptr % mesh % verticesOnEdge % array => verticesOnEdge_ptr % array
+ block_ptr % mesh % edgesOnEdge % array => edgesOnEdge_ptr % array
+ block_ptr % mesh % cellsOnVertex % array => cellsOnVertex_ptr % array
+ block_ptr % mesh % edgesOnVertex % array => edgesOnVertex_ptr % array
- subroutine mpas_output_state_finalize(output_obj, dminfo)
+ nullify(cellsOnCell_ptr % array)
+ nullify(edgesOnCell_ptr % array)
+ nullify(verticesOnCell_ptr % array)
+ nullify(cellsOnEdge_ptr % array)
+ nullify(verticesOnEdge_ptr % array)
+ nullify(edgesOnEdge_ptr % array)
+ nullify(cellsOnVertex_ptr % array)
+ nullify(edgesOnVertex_ptr % array)
+ block_ptr => block_ptr % next
+ cellsOnCell_ptr => cellsOnCell_ptr % next
+ edgesOnCell_ptr => edgesOnCell_ptr % next
+ verticesOnCell_ptr => verticesOnCell_ptr % next
+ cellsOnEdge_ptr => cellsOnEdge_ptr % next
+ verticesOnEdge_ptr => verticesOnEdge_ptr % next
+ edgesOnEdge_ptr => edgesOnEdge_ptr % next
+ cellsOnVertex_ptr => cellsOnVertex_ptr % next
+ edgesOnVertex_ptr => edgesOnVertex_ptr % next
+ end do
+
+ call mpas_deallocate_field(cellsOnCell_save)
+ call mpas_deallocate_field(edgesOnCell_save)
+ call mpas_deallocate_field(verticesOnCell_save)
+ call mpas_deallocate_field(cellsOnEdge_save)
+ call mpas_deallocate_field(verticesOnEdge_save)
+ call mpas_deallocate_field(edgesOnEdge_save)
+ call mpas_deallocate_field(cellsOnVertex_save)
+ call mpas_deallocate_field(edgesOnVertex_save)
+
+
+
+ end subroutine mpas_output_state_for_domain!}}}
+
+ subroutine mpas_output_state_finalize(output_obj, dminfo)!{{{
+
implicit none
type (io_output_object), intent(inout) :: output_obj
@@ -231,10 +324,9 @@
call mpas_io_output_finalize(output_obj, dminfo)
- end subroutine mpas_output_state_finalize
+ end subroutine mpas_output_state_finalize!}}}
-
- subroutine mpas_io_output_init( domain, output_obj, &
+ subroutine mpas_io_output_init( domain, output_obj, &!{{{
dminfo, &
mesh &
)
@@ -262,10 +354,9 @@
#include "add_output_atts.inc"
- end subroutine mpas_io_output_init
+ end subroutine mpas_io_output_init!}}}
-
- subroutine mpas_io_output_finalize(output_obj, dminfo)
+ subroutine mpas_io_output_finalize(output_obj, dminfo)!{{{
implicit none
@@ -276,6 +367,6 @@
call MPAS_closeStream(output_obj % io_stream, nferr)
- end subroutine mpas_io_output_finalize
+ end subroutine mpas_io_output_finalize!}}}
end module mpas_io_output
Modified: branches/atmos_physics/src/framework/mpas_io_streams.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_streams.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_io_streams.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -1406,16 +1406,17 @@
! Distribute field to multiple blocks
field_1dint_ptr => field_cursor % int1dField
i = 1
- if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
- ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
- ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
- ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_1dint_ptr % dimSizes(1)
- end if
do while (associated(field_1dint_ptr))
+ if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dint_ptr % dimSizes(1)
+ end if
+
if (field_cursor % int1dField % isSuperArray) then
field_1dint_ptr % array(j) = int0d_temp
else
@@ -1483,16 +1484,17 @@
! Distribute field to multiple blocks
field_2dint_ptr => field_cursor % int2dField
i = 1
- if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
- ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
- ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
- ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_2dint_ptr % dimSizes(2)
- end if
do while (associated(field_2dint_ptr))
+ if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dint_ptr % dimSizes(2)
+ end if
+
if (field_cursor % int2dField % isSuperArray) then
field_2dint_ptr % array(j,1:ownedSize) = int1d_temp(i:i+ownedSize-1)
else
@@ -1564,16 +1566,17 @@
! Distribute field to multiple blocks
field_3dint_ptr => field_cursor % int3dField
i = 1
- if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
- ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
- ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
- ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_3dint_ptr % dimSizes(3)
- end if
do while (associated(field_3dint_ptr))
+ if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dint_ptr % dimSizes(3)
+ end if
+
if (field_cursor % int3dField % isSuperArray) then
field_3dint_ptr % array(j,:,1:ownedSize) = int2d_temp(:,i:i+ownedSize-1)
else
@@ -1663,16 +1666,18 @@
! Distribute field to multiple blocks
field_1dreal_ptr => field_cursor % real1dField
i = 1
- if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
- ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
- ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
- ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_1dreal_ptr % dimSizes(1)
- end if
+
do while (associated(field_1dreal_ptr))
+ if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dreal_ptr % dimSizes(1)
+ end if
+
if (field_cursor % real1dField % isSuperArray) then
field_1dreal_ptr % array(j) = real0d_temp
else
@@ -1740,16 +1745,17 @@
! Distribute field to multiple blocks
field_2dreal_ptr => field_cursor % real2dField
i = 1
- if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
- ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
- ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
- ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_2dreal_ptr % dimSizes(2)
- end if
do while (associated(field_2dreal_ptr))
+ if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dreal_ptr % dimSizes(2)
+ end if
+
if (field_cursor % real2dField % isSuperArray) then
field_2dreal_ptr % array(j,1:ownedSize) = real1d_temp(i:i+ownedSize-1)
else
@@ -1824,16 +1830,17 @@
! Distribute field to multiple blocks
field_3dreal_ptr => field_cursor % real3dField
i = 1
- if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
- ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
- ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
- ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_3dreal_ptr % dimSizes(3)
- end if
do while (associated(field_3dreal_ptr))
+ if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dreal_ptr % dimSizes(3)
+ end if
+
if (field_cursor % real3dField % isSuperArray) then
!write(0,*) 'DEBUGGING : copying the temporary array'
field_3dreal_ptr % array(j,:,1:ownedSize) = real2d_temp(:,i:i+ownedSize-1)
Modified: branches/atmos_physics/src/framework/mpas_sort.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_sort.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_sort.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -2,16 +2,16 @@
use mpas_kind_types
- interface quicksort
- module procedure mpas_quicksort_int
- module procedure mpas_quicksort_real
+ interface mpas_quicksort
+ module procedure mpas_quicksort_1dint
+ module procedure mpas_quicksort_1dreal
+ module procedure mpas_quicksort_2dint
+ module procedure mpas_quicksort_2dreal
end interface
-
contains
-
- recursive subroutine mpas_mergesort(array, d1, n1, n2)
+ recursive subroutine mpas_mergesort(array, d1, n1, n2)!{{{
implicit none
@@ -71,14 +71,137 @@
array(1:d1,n1:n2) = temp(1:d1,1:k-1)
- end subroutine mpas_mergesort
+ end subroutine mpas_mergesort!}}}
+ subroutine mpas_quicksort_1dint(nArray, array)!{{{
- subroutine mpas_quicksort_int(nArray, array)
+ implicit none
+ integer, intent(in) :: nArray
+ integer, dimension(nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ integer :: pivot_value
+ integer, dimension(1) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(pivot)
+ temp(1) = array(pivot)
+ array(pivot) = array(r)
+ array(r) = temp(1)
+
+ s = l
+ do i=l,r-1
+ if (array(i) <= pivot_value) then
+ temp(1) = array(s)
+ array(s) = array(i)
+ array(i) = temp(1)
+ s = s + 1
+ end if
+ end do
+
+ temp(1) = array(s)
+ array(s) = array(r)
+ array(r) = temp(1)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_1dint!}}}
+
+ subroutine mpas_quicksort_1dreal(nArray, array)!{{{
+
implicit none
integer, intent(in) :: nArray
+ real (kind=RKIND), dimension(nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ real (kind=RKIND) :: pivot_value
+ real (kind=RKIND), dimension(1) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(pivot)
+ temp(1) = array(pivot)
+ array(pivot) = array(r)
+ array(r) = temp(1)
+
+ s = l
+ do i=l,r-1
+ if (array(i) <= pivot_value) then
+ temp(1) = array(s)
+ array(s) = array(i)
+ array(i) = temp(1)
+ s = s + 1
+ end if
+ end do
+
+ temp(1) = array(s)
+ array(s) = array(r)
+ array(r) = temp(1)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_1dreal!}}}
+
+ subroutine mpas_quicksort_2dint(nArray, array)!{{{
+
+ implicit none
+
+ integer, intent(in) :: nArray
integer, dimension(2,nArray), intent(inout) :: array
integer :: i, j, top, l, r, pivot, s
@@ -140,11 +263,10 @@
end if
end do
- end subroutine mpas_quicksort_int
+ end subroutine mpas_quicksort_2dint!}}}
+ subroutine mpas_quicksort_2dreal(nArray, array)!{{{
- subroutine mpas_quicksort_real(nArray, array)
-
implicit none
integer, intent(in) :: nArray
@@ -209,11 +331,10 @@
end if
end do
- end subroutine mpas_quicksort_real
+ end subroutine mpas_quicksort_2dreal!}}}
+ integer function mpas_binary_search(array, d1, n1, n2, key)!{{{
- integer function mpas_binary_search(array, d1, n1, n2, key)
-
implicit none
integer, intent(in) :: d1, n1, n2, key
@@ -239,6 +360,6 @@
end if
end do
- end function mpas_binary_search
+ end function mpas_binary_search!}}}
end module mpas_sort
Modified: branches/atmos_physics/src/framework/mpas_timer.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_timer.F        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/framework/mpas_timer.F        2012-10-27 00:29:44 UTC (rev 2281)
@@ -1,20 +1,15 @@
module mpas_timer
+ use mpas_kind_types
use mpas_grid_types
- use mpas_dmpar
implicit none
save
-! private
#ifdef _PAPI
include 'f90papi.h'
#endif
-!#ifdef _MPI
-! include 'mpif.h'
-!#endif
-
type timer_node
character (len=StrKIND) :: timer_name
logical :: running, printable
@@ -38,6 +33,10 @@
contains
subroutine mpas_timer_start(timer_name, clear_timer, timer_ptr)!{{{
+# ifdef _MPI
+ use mpi
+# endif
+
character (len=*), intent (in) :: timer_name !< Input: name of timer, stored as name of timer
logical, optional, intent(in) :: clear_timer !< Input: flag to clear timer
type (timer_node), optional, pointer :: timer_ptr !< Output: pointer to store timer in module
@@ -47,6 +46,10 @@
integer :: clock, hz, usecs
+#ifdef MPAS_TAU
+ call tau_start(timer_name)
+#endif
+
timer_added = .false.
timer_found = .false.
@@ -159,6 +162,10 @@
end subroutine mpas_timer_start!}}}
subroutine mpas_timer_stop(timer_name, timer_ptr)!{{{
+# ifdef _MPI
+ use mpi
+# endif
+
character (len=*), intent(in) :: timer_name !< Input: name of timer to stop
type (timer_node), pointer, optional :: timer_ptr !< Input: pointer to timer, for stopping
@@ -167,6 +174,10 @@
real (kind=RKIND) :: time_temp
logical :: timer_found, string_equal, check_flag
integer :: clock, hz, usecs
+
+#ifdef MPAS_TAU
+ call tau_stop(timer_name)
+#endif
timer_found = .false.
@@ -250,9 +261,7 @@
tname = ''
do i=0,timer_ptr%levels+2
tname = tname//' '
-! write(*,'(a,$)') ' '
end do
-! tname = tname//timer_ptr%timer_name
if(timer_ptr%total_time == 0.0d0) then
timer_ptr%min_time = 0.0d0
@@ -315,6 +324,8 @@
end subroutine mpas_timer_init!}}}
subroutine mpas_timer_sync()!{{{
+ use mpas_dmpar
+
type (timer_node), pointer :: current
real (kind=RKIND) :: all_total_time, all_max_time, all_min_time, all_ave_time
Modified: branches/atmos_physics/src/registry/gen_inc.c
===================================================================
--- branches/atmos_physics/src/registry/gen_inc.c        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/registry/gen_inc.c        2012-10-27 00:29:44 UTC (rev 2281)
@@ -143,8 +143,8 @@
fortprintf(fd, " call mpas_dmpar_abort(dminfo)</font>
<font color="black">");
fortprintf(fd, " else if (ierr < 0) then</font>
<font color="black">");
fortprintf(fd, " write(0,*) \'Namelist record &%s not found; using default values for this namelist\'\'s variables\'</font>
<font color="red">",nls_ptr->record);
- fortprintf(fd, " rewind(funit)</font>
<font color="black">");
fortprintf(fd, " end if</font>
<font color="blue">");
+ fortprintf(fd, " rewind(funit)</font>
<font color="gray">");
dict_insert(dictionary, nls_ptr->record);
}
@@ -180,7 +180,7 @@
struct dimension * dim_ptr;
struct dimension_list * dimlist_ptr;
struct group_list * group_ptr;
- FILE * fd;
+ FILE * fd, *fd2;
char super_array[1024];
char array_class[1024];
char outer_dim[1024];
@@ -202,14 +202,18 @@
}
dim_ptr = dims;
while (dim_ptr) {
- if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " integer :: %sSolve</font>
<font color="red">", dim_ptr->name_in_code);
- if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " integer :: %sSolve</font>
<font color="blue">", dim_ptr->name_in_file);
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+                 fortprintf(fd, " integer :: %sSolve</font>
<font color="blue">", dim_ptr->name_in_code);
+                 fortprintf(fd, " integer, dimension(:), pointer :: %sArray</font>
<font color="blue">", dim_ptr->name_in_code);
+         }
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+                 fortprintf(fd, " integer :: %sSolve</font>
<font color="gray">", dim_ptr->name_in_file);
+         }
dim_ptr = dim_ptr->next;
}
fclose(fd);
-
/*
* Generate dummy dimension argument list
*/
@@ -232,7 +236,6 @@
fclose(fd);
-
/*
* Generate dummy dimension argument declaration list
*/
@@ -255,8 +258,76 @@
fclose(fd);
+ /*
+ * Generate dummy dimension argument declaration list
+ */
+ fd = fopen("dim_dummy_decls_inout.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer, intent(inout) :: %s", dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer, intent(inout) :: %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+ fclose(fd);
+
/*
+ * Generate non-input dummy dimension argument declaration list
+ */
+ fd = fopen("dim_dummy_decls_noinput.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer :: %s", dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer :: %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+
+ fclose(fd);
+
+
+
+ /*
+ * Generate dummy dimension assignment instructions
+ */
+ fd = fopen("dim_dummy_assigns.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_code, dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_code, dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="gray">");
+
+ fclose(fd);
+
+
+ /*
* Generate declarations of dimensions
*/
fd = fopen("dim_decls.inc", "w");
@@ -479,16 +550,71 @@
group_ptr = groups;
while (group_ptr) {
- if (group_ptr->vlist->var->ntime_levs > 1)
+ if (group_ptr->vlist->var->ntime_levs > 1) {
fortprintf(fd, " type (%s_multilevel_type), pointer :: %s</font>
<font color="red">", group_ptr->name, group_ptr->name);
- else
+ fortprintf(fd, " type (%s_type), pointer :: provis</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         } else {
fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         }
group_ptr = group_ptr->next;
}
fclose(fd);
+ /*
+ * Generate routines for allocating provisional types
+ */
+ fd = fopen("provis_alloc_routines.inc", "w");
+
+ group_ptr = groups;
+ while (group_ptr) {
+ if (group_ptr->vlist->var->ntime_levs > 1) {
+                 fortprintf(fd, " subroutine mpas_setup_provis_%ss(b)!{{{</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+                 fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, "#include \"dim_dummy_decls_noinput.inc\"</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+                 fortprintf(fd, "#include \"dim_dummy_assigns.inc\"</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " allocate(block %% provis)</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_allocate_%s(block, block %% provis, &</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="blue">");
+ fortprintf(fd, " )</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next </font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+ fortprintf(fd, " if(associated(block %% prev) .and. associated(block %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, prev = block %% prev %% provis, next = block %% next %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else if(associated(block %% prev)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, prev = block %% prev %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else if(associated(block %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, next = block %% next %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end if</font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next </font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="blue">");
+                 fortprintf(fd, " end subroutine mpas_setup_provis_%ss!}}}</font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+
+                 fortprintf(fd, " subroutine mpas_deallocate_provis_%ss(b)!{{{</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+                 fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+                 fortprintf(fd, " call mpas_deallocate_%s(block %% provis)</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " deallocate(block %% provis)</font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next</font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="blue">");
+                 fortprintf(fd, " end subroutine mpas_deallocate_provis_%ss!}}}</font>
<font color="gray">", group_ptr->name);
+         }
+ group_ptr = group_ptr->next;
+ }
+ fclose(fd);
+
+
+
/* To be included in allocate_block */
fd = fopen("block_allocs.inc", "w");
group_ptr = groups;
@@ -687,6 +813,10 @@
fortprintf(fd, " %s %% %s %% fieldName = \'%s\'</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_file);
fortprintf(fd, " %s %% %s %% isSuperArray = .false.</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
if (var_ptr->ndims > 0) {
+                          if(var_ptr->persistence == SCRATCH){
+                                 fortprintf(fd, " ! SCRATCH VARIABLE</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% array)</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
+                         } else if(var_ptr->persistence == PERSISTENT){
fortprintf(fd, " allocate(%s %% %s %% array(", group_ptr->name, var_ptr->name_in_code);
dimlist_ptr = var_ptr->dimlist;
if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
@@ -717,6 +847,7 @@
else if (var_ptr->vtype == CHARACTER)
fortprintf(fd, " %s %% %s %% array = \'\'</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code ); /* initialize field to zero */
+                         }
dimlist_ptr = var_ptr->dimlist;
i = 1;
while (dimlist_ptr) {
@@ -743,7 +874,7 @@
i++;
dimlist_ptr = dimlist_ptr->next;
}
- }
+                        }
if (var_ptr->timedim) fortprintf(fd, " %s %% %s %% hasTimeDimension = .true.</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
else fortprintf(fd, " %s %% %s %% hasTimeDimension = .false.</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
@@ -808,14 +939,18 @@
var_list_ptr2 = var_list_ptr;
var_list_ptr = var_list_ptr->next;
}
- fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="blue">", group_ptr->name, var_list_ptr2->var->super_array);
+ fortprintf(fd, " if(associated(%s %% %s %% array)) then</font>
<font color="blue">", group_ptr->name, var_list_ptr2->var->super_array);
+ fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="blue">", group_ptr->name, var_list_ptr2->var->super_array);
+ fortprintf(fd, " end if</font>
<font color="black">");
fortprintf(fd, " deallocate(%s %% %s %% ioinfo)</font>
<font color="black">", group_ptr->name, var_list_ptr2->var->super_array);
fortprintf(fd, " call mpas_deallocate_attlist(%s %% %s %% attList)</font>
<font color="black">", group_ptr->name, var_list_ptr2->var->super_array);
fortprintf(fd, " deallocate(%s %% %s)</font>
<font color="black"></font>
<font color="red">", group_ptr->name, var_list_ptr2->var->super_array);
}
else {
if (var_ptr->ndims > 0) {
- fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " if(associated(%s %% %s %% array)) then</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " deallocate(%s %% %s %% array)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " end if</font>
<font color="black">");
fortprintf(fd, " deallocate(%s %% %s %% ioinfo)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " call mpas_deallocate_attlist(%s %% %s %% attList)</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " deallocate(%s %% %s)</font>
<font color="black"></font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
@@ -967,14 +1102,27 @@
/* subroutine to call link subroutine for every field type */
fortprintf(fd, " subroutine mpas_create_field_links(b)</font>
<font color="black"></font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="red">");
- fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: prev, next</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " if(associated(b %% prev)) then</font>
<font color="blue">");
+ fortprintf(fd, " prev => b %% prev</font>
<font color="blue">");
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " nullify(prev)</font>
<font color="blue">");
+ fortprintf(fd, " end if</font>
<font color="blue">");
+ fortprintf(fd, " if(associated(b %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " next => b %% next</font>
<font color="blue">");
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " nullify(next)</font>
<font color="blue">");
+ fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
group_ptr = groups;
while (group_ptr)
{
var_list_ptr = group_ptr->vlist;
var_list_ptr = var_list_ptr->next;
+
+ if (!var_list_ptr) break;
+
var_ptr = var_list_ptr->var;
-
int ntime_levs = 1;
@@ -995,12 +1143,28 @@
{
for(i=1; i<=ntime_levs; i++)
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else</font>
<font color="blue">");
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
}        
}
else
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
}
}
else if (var_ptr->ndims > 0)
@@ -1012,12 +1176,28 @@
{
for(i=1; i<=ntime_levs; i++)
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else</font>
<font color="blue">");
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
}        
}
else
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else</font>
<font color="blue">");
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
}
}
@@ -1029,9 +1209,10 @@
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine mpas_create_%s_links(%s)</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_create_%s_links(%s, prev, next)</font>
<font color="black"></font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, " implicit none</font>
<font color="red">");
- fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         fortprintf(fd, " type (%s_type), pointer, optional :: prev, next</font>
<font color="gray">", group_ptr->name);
var_list_ptr = group_ptr->vlist;
while (var_list_ptr) {
@@ -1050,17 +1231,62 @@
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
- }
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+ } else {
+                                 fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+
+                         }
fortprintf(fd, "</font>
<font color="gray">");
}
else
@@ -1073,17 +1299,61 @@
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
- }
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+ } else {
+ fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+                         }
fortprintf(fd, "</font>
<font color="gray">");
         }
var_list_ptr = var_list_ptr->next;
@@ -1107,7 +1377,7 @@
struct dimension_list * dimlist_ptr, * lastdim;
struct group_list * group_ptr;
struct dtable * dictionary;
- FILE * fd;
+ FILE * fd, *fd2;
char vtype[5];
char fname[32];
char super_array[1024];
@@ -1857,6 +2127,7 @@
* MGD NEW CODE
*/
fd = fopen("exchange_input_field_halos.inc", "w");
+ fd2 = fopen("non_decomp_copy_input_fields.inc", "w");
group_ptr = groups;
while (group_ptr) {
@@ -1866,16 +2137,19 @@
dimlist_ptr = var_ptr->dimlist;
i = 1;
+                 if(var_ptr->persistence == PERSISTENT){
while (dimlist_ptr) {
if (i == var_ptr->ndims) {
+
+ if (var_ptr->ntime_levs > 1) {
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
+                                 } else {
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
+                                 }
+
if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024)) {
-
- if (var_ptr->ntime_levs > 1)
- snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
- else
- snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="gray">", struct_deref, var_ptr->super_array);
@@ -1898,12 +2172,19 @@
fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
- }
+ } else {
+ fortprintf(fd2, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+                                 fortprintf(fd2, " call mpas_dmpar_copy_field(%s %% %s)</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " end if</font>
<font color="black"></font>
<font color="gray">");
+                         }
}
i++;
dimlist_ptr = dimlist_ptr -> next;
}
+                 }
if (var_list_ptr) var_list_ptr = var_list_ptr->next;
}
@@ -1911,6 +2192,7 @@
}
fclose(fd);
+ fclose(fd2);
#ifdef LEGACY_CODE
Modified: branches/atmos_physics/src/registry/registry_types.h
===================================================================
--- branches/atmos_physics/src/registry/registry_types.h        2012-10-27 00:03:55 UTC (rev 2280)
+++ branches/atmos_physics/src/registry/registry_types.h        2012-10-27 00:29:44 UTC (rev 2281)
@@ -71,6 +71,7 @@
int timedim;
int ntime_levs;
int iostreams;
+ int decomposed;
struct dimension_list * dimlist;
struct variable * next;
};
</font>
</pre>