<p><b>duda</b> 2012-08-28 18:09:20 -0600 (Tue, 28 Aug 2012)</p><p>BRANCH COMMIT<br>
<br>
Update DCMIP branch to the current trunk.<br>
</p><hr noshade><pre><font color="gray">Index: branches/dcmip
===================================================================
--- branches/dcmip        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip        2012-08-29 00:09:20 UTC (rev 2133)
Property changes on: branches/dcmip
___________________________________________________________________
Modified: svn:mergeinfo
## -17,5 +17,7 ##
/branches/omp_blocks/ddt_reorg:1301-1414
/branches/omp_blocks/halo:1570-1638
/branches/omp_blocks/io:1639-1787
+/branches/omp_blocks/multiple_blocks:1803-2084
/branches/source_renaming:1082-1113
/branches/time_manager:924-962
+/trunk/mpas:2014-2130
\ No newline at end of property
Modified: branches/dcmip/Makefile
===================================================================
--- branches/dcmip/Makefile        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/Makefile        2012-08-29 00:09:20 UTC (rev 2133)
@@ -124,7 +124,7 @@
        "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form" \
        "CFLAGS_OPT = -O3 -m64" \
        "LDFLAGS_OPT = -O3 -m64" \
-        "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form -fbounds-check" \
+        "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form -fbounds-check -fbacktrace" \
        "CFLAGS_DEBUG = -g -m64" \
        "LDFLAGS_DEBUG = -g -m64" \
        "CORE = $(CORE)" \
Modified: branches/dcmip/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F
===================================================================
--- branches/dcmip/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_atmos_physics/mpas_atmphys_driver_radiation_sw.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -3,7 +3,6 @@
use mpas_grid_types
use mpas_timer
- use mpas_atmphys_manager
use mpas_atmphys_constants
use mpas_atmphys_manager, only: gmt,curr_julday,julday,year
use mpas_atmphys_camrad_init
Modified: branches/dcmip/src/core_atmos_physics/mpas_atmphys_todynamics.F
===================================================================
--- branches/dcmip/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -227,7 +227,8 @@
!local variables:
!-----------------
type(block_type),pointer :: block
- type (field2DReal):: tempField
+ type (field2DReal), pointer :: tempField
+ type (field2DReal), target :: tempFieldTarget
integer:: iCell,iEdge,k,j,nCells,nCellsSolve,nVertLevels
integer,dimension(:),pointer :: nEdgesOnCell
integer,dimension(:,:),pointer:: edgesOnCell
@@ -249,8 +250,8 @@
nEdgesOnCell => mesh % nEdgesOnCell % array
edge_normal => mesh % edgeNormalVectors % array
- allocate(Ux_tend_halo(nVertLevels,nCells))
- allocate(Uy_tend_halo(nVertLevels,nCells))
+ allocate(Ux_tend_halo(nVertLevels,nCells+1))
+ allocate(Uy_tend_halo(nVertLevels,nCells+1))
Ux_tend_halo(:,:) = 0.
Uy_tend_halo(:,:) = 0.
@@ -261,11 +262,15 @@
enddo
enddo
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = nVertLevels
tempField % dimSizes(2) = nCellsSolve
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => Ux_tend_halo
call mpas_dmpar_exch_halo_field(tempField)
Modified: branches/dcmip/src/core_hyd_atmos/Registry
===================================================================
--- branches/dcmip/src/core_hyd_atmos/Registry        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_hyd_atmos/Registry        2012-08-29 00:09:20 UTC (rev 2133)
@@ -21,6 +21,7 @@
namelist logical sw_model config_monotonic true
namelist integer sw_model config_mp_physics 0
namelist real sw_model config_apvm_upwinding 0.5
+namelist integer sw_model config_num_halos 2
namelist integer dimensions config_nvertlevels 26
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
Modified: branches/dcmip/src/core_hyd_atmos/mpas_atmh_time_integration.F
===================================================================
--- branches/dcmip/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -1420,7 +1420,8 @@
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
integer, dimension(:,:), pointer :: cellsOnEdge
- type (field3DReal) :: tempField
+ type (field3DReal), pointer :: tempField
+ type (field3DReal), target :: tempFieldTarget
real (kind=RKIND), dimension( s_old % num_scalars, grid % nEdges+1) :: h_flux
real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1) :: v_flux, v_flux_upwind, s_update
@@ -1628,12 +1629,16 @@
end do ! end loop over cells to compute scale factor
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = 2
tempField % dimSizes(2) = num_scalars
tempField % dimSizes(3) = grid % nCells
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => scale_in
call mpas_dmpar_exch_halo_field(tempField)
Modified: branches/dcmip/src/core_init_nhyd_atmos/Registry
===================================================================
--- branches/dcmip/src/core_init_nhyd_atmos/Registry        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_init_nhyd_atmos/Registry        2012-08-29 00:09:20 UTC (rev 2133)
@@ -7,6 +7,7 @@
namelist character nhyd_model config_stop_time none
namelist integer nhyd_model config_theta_adv_order 3
namelist real nhyd_model config_coef_3rd_order 0.25
+namelist integer nhyd_model config_num_halos 2
namelist character dcmip config_dcmip_case 2-0-0
namelist real dcmip config_planet_scale 1.0
namelist real dcmip config_rotation_rate_scale 1.0
Modified: branches/dcmip/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F
===================================================================
--- branches/dcmip/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -2297,7 +2297,8 @@
real (kind=RKIND), dimension(:,:), pointer :: v
real (kind=RKIND), dimension(:,:), pointer :: sorted_arr
- type (field1DReal):: tempField
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
real(kind=RKIND), dimension(:), pointer :: hs, hs1
real(kind=RKIND) :: hm, zh, dzmin, dzmina, dzmina_global, dzminf, sm
@@ -3272,11 +3273,15 @@
end do
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nCells
tempField % sendList => parinfo % cellsToSend
tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
call mpas_dmpar_exch_halo_field(tempField)
@@ -4092,7 +4097,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % t % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
state % theta_m % array(k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1)
@@ -4107,7 +4112,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % rh % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
state % scalars % array(state % index_qv,k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=0)
@@ -4122,7 +4127,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % z % array(k,iCell)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
fg % gfs_z % array(k,iCell) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1)
@@ -4140,7 +4145,7 @@
end if
sorted_arr(2,k) = log(fg % p % array(k,iCell))
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.5 * (grid % zgrid % array(k,iCell) + grid % zgrid % array(k+1,iCell))
diag % pressure % array(k,iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4158,7 +4163,7 @@
! end if
! sorted_arr(2,k) = log(fg % p % array(k,iCell))
! end do
-! call quicksort(config_nfglevels, sorted_arr)
+! call mpas_quicksort(config_nfglevels, sorted_arr)
! do k=1,grid%nVertLevels+1
! target_z = grid % zgrid % array(k,iCell)
! fg % gfs_p % array(k,iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4177,7 +4182,7 @@
if (vert_level(k) == 200100.0) sorted_arr(1,k) = 99999.0
sorted_arr(2,k) = fg % u % array(k,iEdge)
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
do k=1,grid%nVertLevels
target_z = 0.25 * (grid % zgrid % array(k,cellsOnEdge(1,iEdge)) + grid % zgrid % array(k+1,cellsOnEdge(1,iEdge)) + grid % zgrid % array(k,cellsOnEdge(2,iEdge)) + grid % zgrid % array(k+1,cellsOnEdge(2,iEdge)))
state % u % array(k,iEdge) = vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=0)
@@ -4218,7 +4223,7 @@
end if
sorted_arr(2,k) = log(fg % p % array(k,iCell))
end do
- call quicksort(config_nfglevels, sorted_arr)
+ call mpas_quicksort(config_nfglevels, sorted_arr)
target_z = grid % zgrid % array(1,iCell)
fg % psfc % array(iCell) = exp(vertical_interp(target_z, config_nfglevels, sorted_arr, order=1, extrap=1))
@@ -4668,7 +4673,8 @@
integer :: iter, nsm
integer, dimension(:,:), pointer :: cellsOnCell
- type (field1DReal):: tempField
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
type (block_type), pointer :: block
type (parallel_info), pointer :: parinfo
@@ -4974,11 +4980,15 @@
end do
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nCells
tempField % sendList => parinfo % cellsToSend
tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
call mpas_dmpar_exch_halo_field(tempField)
@@ -5483,7 +5493,8 @@
real (kind=RKIND) :: es, qvs, xnutr, ptemp
integer :: iter, nsm, kz
- type (field1DReal):: tempField
+ type (field1DReal), pointer :: tempField
+ type (field1DReal), target :: tempFieldTarget
type (block_type), pointer :: block
type (parallel_info), pointer :: parinfo
@@ -5766,11 +5777,15 @@
end do
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nCells
tempField % sendList => parinfo % cellsToSend
tempField % recvList => parinfo % cellsToRecv
+ tempField % copyList => parinfo % cellsToCopy
tempField % array => hs
+ tempField % prev => null()
+ tempField % next => null()
call mpas_dmpar_exch_halo_field(tempField)
Modified: branches/dcmip/src/core_nhyd_atmos/Registry
===================================================================
--- branches/dcmip/src/core_nhyd_atmos/Registry        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_nhyd_atmos/Registry        2012-08-29 00:09:20 UTC (rev 2133)
@@ -34,6 +34,7 @@
namelist logical nhyd_model config_newpx false
namelist real nhyd_model config_apvm_upwinding 0.5
namelist logical nhyd_model config_h_ScaleWithMesh false
+namelist integer nhyd_model config_num_halos 2
namelist real damping config_zd 22000.0
namelist real damping config_xnutr 0.0
namelist character io config_input_name init.nc
Modified: branches/dcmip/src/core_nhyd_atmos/mpas_atm_mpas_core.F
===================================================================
--- branches/dcmip/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -58,6 +58,8 @@
call atm_simulation_clock_init(domain, dt, startTimeStamp)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(1) % state % u)
+
block => domain % blocklist
do while (associated(block))
call atm_mpas_init_block(domain % dminfo, block, block % mesh, dt)
@@ -65,6 +67,10 @@
block => block % next
end do
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % pv_edge)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % ru)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % diag % rw)
+
current_outfile_frames = 0
if (config_sfc_update_interval /= "none") then
@@ -172,20 +178,12 @@
type (block_type), intent(inout) :: block
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
-
- call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % u)
if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then
call atm_init_coupled_diagnostics( block % state % time_levs(1) % state, block % diag, mesh)
end if
call atm_compute_solve_diagnostics(dt, block % state % time_levs(1) % state, block % diag, mesh)
- call mpas_dmpar_exch_halo_field(block % diag % pv_edge)
-
- call mpas_dmpar_exch_halo_field(block % diag % ru)
-
- call mpas_dmpar_exch_halo_field(block % diag % rw)
-
call mpas_rbf_interp_initialize(mesh)
call mpas_init_reconstruct(mesh)
call mpas_reconstruct(mesh, block % state % time_levs(1) % state % u % array, &
Modified: branches/dcmip/src/core_nhyd_atmos/mpas_atm_time_integration.F
===================================================================
--- branches/dcmip/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -1433,7 +1433,8 @@
integer, dimension(:), pointer :: nAdvCellsForEdge
real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd
- type (field2DReal) :: tempField
+ type (field2DReal), pointer :: tempField
+ type (field2DReal), target :: tempFieldTarget
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scalar_old, scalar_new
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: s_max, s_min, s_update
@@ -1699,12 +1700,16 @@
!
! WCS_halo_opt_2 - communicate only first halo row in these next two exchanges
!
+ tempField => tempFieldTarget
tempField % block => block
tempField % dimSizes(1) = grid % nVertLevels
tempField % dimSizes(2) = grid % nCells
tempField % sendList => block % parinfo % cellsToSend
tempField % recvList => block % parinfo % cellsToRecv
+ tempField % copyList => block % parinfo % cellsToCopy
+ tempField % prev => null()
+ tempField % next => null()
tempField % array => scale_in
call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
Modified: branches/dcmip/src/core_ocean/Registry
===================================================================
--- branches/dcmip/src/core_ocean/Registry        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/Registry        2012-08-29 00:09:20 UTC (rev 2133)
@@ -13,6 +13,7 @@
namelist logical sw_model config_initial_stats false
namelist logical sw_model config_prescribe_velocity false
namelist logical sw_model config_prescribe_thickness false
+namelist integer sw_model config_num_halos 3
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
Modified: branches/dcmip/src/core_ocean/mpas_ocn_advection.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_advection.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_advection.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -274,12 +274,22 @@
if (ip1 > n-1) ip1 = 1
iEdge = grid % EdgesOnCell % array (i,iCell)
- xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
- xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
- yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
- zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+
+ if(grid % on_a_sphere) then
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))/grid % sphere_radius
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))/grid % sphere_radius
+ else
+ xv1 = grid % xVertex % array(grid % verticesOnEdge % array (1,iedge))
+ yv1 = grid % yVertex % array(grid % verticesOnEdge % array (1,iedge))
+ zv1 = grid % zVertex % array(grid % verticesOnEdge % array (1,iedge))
+ xv2 = grid % xVertex % array(grid % verticesOnEdge % array (2,iedge))
+ yv2 = grid % yVertex % array(grid % verticesOnEdge % array (2,iedge))
+ zv2 = grid % zVertex % array(grid % verticesOnEdge % array (2,iedge))
+ end if
if ( grid % on_a_sphere ) then
call ocn_arc_bisect( xv1, yv1, zv1, &
Modified: branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -74,10 +74,13 @@
! Input: grid - grid metadata
! s - state: tracers
! k_displaced
- ! If k_displaced<=0, state % rho is returned with no displaced
- ! If k_displaced>0,the state % rhoDisplaced is returned, and is for
+ !
+ ! If k_displaced==0, state % rho is returned with no displacement
+ !
+ ! If k_displaced~=0,the state % rhoDisplaced is returned, and is for
! a parcel adiabatically displaced from its original level to level
- ! k_displaced. This does not effect the linear EOS.
+ ! k_displaced. When using the linear EOS, state % rhoDisplaced is
+ ! still filled, but depth (i.e. pressure) does not modify the output.
!
! Output: s - state: computed density
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -103,19 +106,19 @@
indexT = s % index_temperature
indexS = s % index_salinity
- if (linearEos) then
+ ! Choose to fill the array rho or rhoDisplaced
+ if (k_displaced == 0) then
rho => s % rho % array
+ else
+ rho => s % rhoDisplaced % array
+ endif
+ if (linearEos) then
+
call ocn_equation_of_state_linear_rho(grid, indexT, indexS, tracers, rho, err)
elseif (jmEos) then
- if(k_displaced == 0) then
- rho => s % rho % array
- else
- rho => s % rhoDisplaced % array
- endif
-
call ocn_equation_of_state_jm_rho(grid, k_displaced, displacement_type, indexT, indexS, tracers, rho, err)
endif
Modified: branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state_linear.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -82,6 +82,13 @@
integer, intent(in) :: indexT, indexS
integer, intent(out) :: err
+ real (kind=RKIND), parameter :: rho_ref = 1025.022 ! kg / m^3
+ real (kind=RKIND), parameter :: alpha = 2.55e-1 ! kg / m^3 / K (dT/dRho)
+ real (kind=RKIND), parameter :: beta = 7.64e-1 ! kg / m^3 / psu (dS/dRho)
+ real (kind=RKIND), parameter :: T_ref = 19.0 ! K
+ real (kind=RKIND), parameter :: S_ref = 35.0 ! psu
+ real (kind=RKIND), parameter :: rho_prime_ref = rho_ref + alpha * T_ref - beta * S_ref
+
integer, dimension(:), pointer :: maxLevelCell
integer :: nCells, iCell, k
type (dm_info) :: dminfo
@@ -94,9 +101,8 @@
do iCell=1,nCells
do k=1,maxLevelCell(iCell)
! Linear equation of state
- rho(k,iCell) = 1000.0*( 1.0 &
- - 2.5e-4*tracers(indexT,k,iCell) &
- + 7.6e-4*tracers(indexS,k,iCell))
+ ! rho = rho_ref - alpha * (T - T_ref) + beta * (S - S_ref)
+ rho(k,iCell) = rho_prime_ref - alpha*tracers(indexT,k,iCell) + beta*tracers(indexS,k,iCell)
end do
end do
Modified: branches/dcmip/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_mpas_core.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_mpas_core.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -259,7 +259,7 @@
call ocn_diagnostic_solve(dt, block % state % time_levs(1) % state, mesh)
call mpas_timer_stop("diagnostic solve", initDiagSolveTimer)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
block % state % time_levs(1) % state % uTransport % array(:,:) &
= block % state % time_levs(1) % state % u % array(:,:) &
+ block % state % time_levs(1) % state % uBolusGM % array(:,:)
@@ -382,7 +382,11 @@
call mpas_timer_stop("time integration", timeIntTimer)
! Move time level 2 fields back into time level 1 for next time step
- call mpas_shift_time_levels_state(domain % blocklist % state)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ call mpas_shift_time_levels_state(block_ptr % state)
+ block_ptr => block_ptr % next
+ end do
if (mpas_is_alarm_ringing(clock, outputAlarmID, ierr=ierr)) then
call mpas_reset_clock_alarm(clock, outputAlarmID, ierr=ierr)
Modified: branches/dcmip/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_tendency.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_tendency.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -949,7 +949,10 @@
h_tend_col(k) = - zstarWeight(k)*h(k,iCell)*div_hu_btr(iCell)
hSum = hSum + zstarWeight(k)*h(k,iCell)
end do
- h_tend_col = h_tend_col / hSum
+ if(hSum > 0.0) then
+ h_tend_col = h_tend_col / hSum
+ else
+ end if
! Vertical velocity through layer interface at top and
! bottom is zero.
Modified: branches/dcmip/src/core_ocean/mpas_ocn_time_average.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_time_average.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_time_average.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -116,14 +116,16 @@
acc_u => state % acc_u % array
acc_uVar => state % acc_uVar % array
- acc_ssh = acc_ssh / nAccumulate
- acc_sshVar = acc_sshVar / nAccumulate
- acc_uReconstructZonal = acc_uReconstructZonal / nAccumulate
- acc_uReconstructMeridional = acc_uReconstructMeridional / nAccumulate
- acc_uReconstructZonalVar = acc_uReconstructZonalVar / nAccumulate
- acc_uReconstructMeridionalVar = acc_uReconstructMeridionalVar / nAccumulate
- acc_u = acc_u / nAccumulate
- acc_uVar = acc_uVar / nAccumulate
+ if(nAccumulate > 0) then
+ acc_ssh = acc_ssh / nAccumulate
+ acc_sshVar = acc_sshVar / nAccumulate
+ acc_uReconstructZonal = acc_uReconstructZonal / nAccumulate
+ acc_uReconstructMeridional = acc_uReconstructMeridional / nAccumulate
+ acc_uReconstructZonalVar = acc_uReconstructZonalVar / nAccumulate
+ acc_uReconstructMeridionalVar = acc_uReconstructMeridionalVar / nAccumulate
+ acc_u = acc_u / nAccumulate
+ acc_uVar = acc_uVar / nAccumulate
+ end if
end subroutine ocn_time_average_normalize!}}}
end module ocn_time_average
Modified: branches/dcmip/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -78,26 +78,23 @@
integer :: iCell, k, i, err
type (block_type), pointer :: block
- type (state_type), target :: provis
- type (state_type), pointer :: provis_ptr
integer :: rk_step
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
- integer :: nCells
- real (kind=RKIND), dimension(:,:), pointer :: u, h, h_edge, vertViscTopOfEdge, vertDiffTopOfCell, ke_edge
+ integer :: nCells, nEdges, nVertLevels, num_tracers
+ real (kind=RKIND) :: coef
+ real (kind=RKIND), dimension(:,:), pointer :: &
+ u, h, h_edge, vertViscTopOfEdge, vertDiffTopOfCell, ke_edge
real (kind=RKIND), dimension(:,:,:), pointer :: tracers
- integer, dimension(:), pointer :: maxLevelCell
+ integer, dimension(:), pointer :: &
+ maxLevelCell, maxLevelEdgeTop
+ real (kind=RKIND), dimension(:), allocatable:: A,C,uTemp
+ real (kind=RKIND), dimension(:,:), allocatable:: tracersTemp
- block => domain % blocklist
- call mpas_allocate_state(block, provis, &
- block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
- block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, block % mesh % nMonths )
+ call mpas_setup_provis_states(domain % blocklist)
- provis_ptr => provis
- call mpas_create_state_links(provis_ptr)
-
!
! Initialize time_levs(2) with state at current time
! Initialize first RK state
@@ -106,19 +103,18 @@
!
block => domain % blocklist
do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ do iCell=1,block % mesh % nCells ! couple tracers to h
+ do k=1,block % mesh % maxLevelCell % array(iCell)
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell)
+ end do
+ end do
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
- do iCell=1,block % mesh % nCells ! couple tracers to h
- do k=1,block % mesh % maxLevelCell % array(iCell)
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell)
- end do
- end do
+ call mpas_copy_state(block % provis, block % state % time_levs(1) % state)
- call mpas_copy_state(provis, block % state % time_levs(1) % state)
-
- block => block % next
+ block => block % next
end do
rk_weights(1) = dt/6.
@@ -140,10 +136,10 @@
! --- update halos for diagnostic variables
call mpas_timer_start("RK4-diagnostic halo update")
- call mpas_dmpar_exch_halo_field(provis % Vor_edge)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % Vor_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field(provis % divergence)
- call mpas_dmpar_exch_halo_field(provis % vorticity)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % vorticity)
end if
call mpas_timer_stop("RK4-diagnostic halo update")
@@ -154,21 +150,21 @@
do while (associated(block))
! mrp 111206 put ocn_wtop call at top for ALE
- call ocn_wtop(provis, provis, block % mesh)
+ call ocn_wtop(block % provis, block % provis, block % mesh)
if (.not.config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, provis, block % diagnostics, err)
+ call ocn_vmix_coefs(block % mesh, block % provis, block % diagnostics, err)
end if
- call ocn_tend_h(block % tend, provis, block % mesh)
- call ocn_tend_u(block % tend, provis, block % diagnostics, block % mesh)
+ call ocn_tend_h(block % tend, block % provis, block % mesh)
+ call ocn_tend_u(block % tend, block % provis, block % diagnostics, block % mesh)
! mrp 110718 filter btr mode out of u_tend
! still got h perturbations with just this alone. Try to set uBtr=0 after full u computation
if (config_rk_filter_btr_mode) then
- call ocn_filter_btr_mode_tend_u(block % tend, provis, block % mesh)
+ call ocn_filter_btr_mode_tend_u(block % tend, block % provis, block % mesh)
endif
- call ocn_tend_scalar(block % tend, provis, block % diagnostics, block % mesh, dt)
+ call ocn_tend_scalar(block % tend, block % provis, block % diagnostics, block % mesh, dt)
block => block % next
end do
call mpas_timer_stop("RK4-tendency computations")
@@ -188,47 +184,44 @@
block => domain % blocklist
do while (associated(block))
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
do iCell=1,block % mesh % nCells
do k=1,block % mesh % maxLevelCell % array(iCell)
- provis % tracers % array(:,k,iCell) = ( &
- block % state % time_levs(1) % state % h % array(k,iCell) * &
- block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
- ) / provis % h % array(k,iCell)
+ block % provis % tracers % array(:,k,iCell) = ( block % state % time_levs(1) % state % h % array(k,iCell) * &
+ block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
+ ) / block % provis % h % array(k,iCell)
end do
end do
if (config_test_case == 1) then ! For case 1, wind field should be fixed
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
if (config_prescribe_velocity) then
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
if (config_prescribe_thickness) then
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
end if
- call ocn_diagnostic_solve(dt, provis, block % mesh)
+ call ocn_diagnostic_solve(dt, block % provis, block % mesh)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
- provis % uTransport % array(:,:) &
- = provis % u % array(:,:) &
- + provis % uBolusGM % array(:,:)
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
+ block % provis % uTransport % array(:,:) &
+ = block % provis % u % array(:,:) &
+ + block % provis % uBolusGM % array(:,:)
block => block % next
end do
end if
call mpas_timer_stop("RK4-update diagnostic variables")
-
-
!--- accumulate update (for RK4)
call mpas_timer_start("RK4-RK4 accumulate update")
@@ -243,8 +236,9 @@
do iCell=1,block % mesh % nCells
do k=1,block % mesh % maxLevelCell % array(iCell)
block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
- + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
+ ( block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
+ + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell) )&
+ / block % state % time_levs(2) % state % h % array(k, iCell)
end do
end do
@@ -262,66 +256,28 @@
! A little clean up at the end: decouple new scalar fields and compute diagnostics for new state
!
call mpas_timer_start("RK4-cleaup phase")
- block => domain % blocklist
- do while (associated(block))
+ if (config_implicit_vertical_mix) then
+ call mpas_timer_start("RK4-implicit vert mix")
+ block => domain % blocklist
+ do while(associated(block))
+ call ocn_vmix_implicit(dt, block % mesh, block % diagnostics, block % state % time_levs(2) % state, err)
+ block => block % next
+ end do
- u => block % state % time_levs(2) % state % u % array
- tracers => block % state % time_levs(2) % state % tracers % array
- h => block % state % time_levs(2) % state % h % array
- h_edge => block % state % time_levs(2) % state % h_edge % array
- ke_edge => block % state % time_levs(2) % state % ke_edge % array
- vertViscTopOfEdge => block % diagnostics % vertViscTopOfEdge % array
- vertDiffTopOfCell => block % diagnostics % vertDiffTopOfCell % array
- maxLevelCell => block % mesh % maxLevelCell % array
-
- nCells = block % mesh % nCells
+ ! Update halo on u and tracers, which were just updated for implicit vertical mixing. If not done,
+ ! this leads to lack of volume conservation. It is required because halo updates in RK4 are only
+ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to
+ ! communicate the change due to implicit vertical mixing across the boundary.
+ call mpas_timer_start("RK4-implicit vert mix halos")
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % tracers)
+ call mpas_timer_stop("RK4-implicit vert mix halos")
- do iCell=1,nCells
- do k=1,maxLevelCell(iCell)
- tracers(:,k,iCell) = tracers(:,k,iCell) / h(k,iCell)
- end do
- end do
-
- if (config_implicit_vertical_mix) then
- call mpas_timer_start("RK4-implicit vert mix")
-
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
-
- !
- ! Implicit vertical solve for momentum
- !
- call ocn_vel_vmix_tend_implicit(block % mesh, dt, ke_edge, vertViscTopOfEdge, h, h_edge, u, err)
-
- ! mrp 110718 filter btr mode out of u
- if (config_rk_filter_btr_mode) then
- call ocn_filter_btr_mode_u(block % state % time_levs(2) % state, block % mesh)
- !block % tend % h % array(:,:) = 0.0 ! I should not need this
- endif
-
- !
- ! Implicit vertical solve for tracers
- !
-
- call ocn_tracer_vmix_tend_implicit(block % mesh, dt, vertDiffTopOfCell, h, tracers, err)
- call mpas_timer_stop("RK4-implicit vert mix")
- end if
-
- block => block % next
- end do
-
- ! Update halo on u and tracers, which weres just updated for implicit vertical mixing. If not done,
- ! this leads to lack of volume conservation. It is required because halo updates in RK4 are only
- ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to
- ! communicate the change due to implicit vertical mixing across the boundary.
-
- if (config_implicit_vertical_mix) then
- call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
- call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % tracers)
+ call mpas_timer_stop("RK4-implicit vert mix")
end if
block => domain % blocklist
do while (associated(block))
-
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
@@ -336,7 +292,7 @@
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- ! Compute velocity transport, used in advection terms of h and tracer tendancy
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
block % state % time_levs(2) % state % uTransport % array(:,:) &
= block % state % time_levs(2) % state % u % array(:,:) &
+ block % state % time_levs(2) % state % uBolusGM % array(:,:)
@@ -365,7 +321,7 @@
end do
call mpas_timer_stop("RK4-cleaup phase")
- call mpas_deallocate_state(provis)
+ call mpas_deallocate_provis_states(domain % blocklist)
end subroutine ocn_time_integrator_rk4!}}}
Modified: branches/dcmip/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_time_integration_split.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_time_integration_split.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -293,7 +293,7 @@
! uTranport = uBcl + uBolus
! This is u used in advective terms for h and tracers
- ! in tendancy calls in stage 3.
+ ! in tendency calls in stage 3.
block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
= block % mesh % edgeMask % array(k,iEdge) &
*( block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
@@ -419,7 +419,7 @@
flux = ((1.0-config_btr_gam1_uWt1) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ config_btr_gam1_uWt1 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
* hSum
-
+
block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) - flux * block % mesh % dvEdge % array(iEdge)
block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) + flux * block % mesh % dvEdge % array(iEdge)
@@ -639,7 +639,7 @@
! uTranport = uBtr + uBcl + uBolus + uCorrection
! This is u used in advective terms for h and tracers
- ! in tendancy calls in stage 3.
+ ! in tendency calls in stage 3.
block % state % time_levs(2) % state % uTransport % array(k,iEdge) &
= block % mesh % edgeMask % array(k,iEdge) &
*( block % state % time_levs(2) % state % uBtr % array( iEdge) &
@@ -825,37 +825,29 @@
! END large iteration loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- block => domain % blocklist
- do while (associated(block))
+ if (config_implicit_vertical_mix) then
+ call mpas_timer_start("se implicit vert mix")
+ block => domain % blocklist
+ do while(associated(block))
+ call ocn_vmix_implicit(dt, block % mesh, block % diagnostics, block % state % time_levs(2) % state, err)
+ block => block % next
+ end do
+ ! Update halo on u and tracers, which were just updated for implicit vertical mixing. If not done,
+ ! this leads to lack of volume conservation. It is required because halo updates in stage 3 are only
+ ! conducted on tendencies, not on the velocity and tracer fields. So this update is required to
+ ! communicate the change due to implicit vertical mixing across the boundary.
+ call mpas_timer_start("se implicit vert mix halos")
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % tracers)
+ call mpas_timer_stop("se implicit vert mix halos")
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Implicit vertical mixing, done after timestep is complete
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ call mpas_timer_stop("se implicit vert mix")
+ end if
- u => block % state % time_levs(2) % state % u % array
- tracers => block % state % time_levs(2) % state % tracers % array
- h => block % state % time_levs(2) % state % h % array
- h_edge => block % state % time_levs(2) % state % h_edge % array
- ke_edge => block % state % time_levs(2) % state % ke_edge % array
- num_tracers = block % state % time_levs(2) % state % num_tracers
- vertViscTopOfEdge => block % diagnostics % vertViscTopOfEdge % array
- vertDiffTopOfCell => block % diagnostics % vertDiffTopOfCell % array
- maxLevelCell => block % mesh % maxLevelCell % array
- maxLevelEdgeTop => block % mesh % maxLevelEdgeTop % array
+ block => domain % blocklist
+ do while (associated(block))
- if (config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
-
- ! Implicit vertical solve for momentum
- call ocn_vel_vmix_tend_implicit(block % mesh, dt, ke_edge, vertvisctopofedge, h, h_edge, u, err)
-
- ! Implicit vertical solve for tracers
- call ocn_tracer_vmix_tend_implicit(block % mesh, dt, vertdifftopofcell, h, tracers, err)
- end if
-
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
@@ -870,12 +862,18 @@
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
+ ! Compute velocity transport, used in advection terms of h and tracer tendency
+ block % state % time_levs(2) % state % uTransport % array(:,:) &
+ = block % state % time_levs(2) % state % u % array(:,:) &
+ + block % state % time_levs(2) % state % uBolusGM % array(:,:)
+
call mpas_reconstruct(block % mesh, block % state % time_levs(2) % state % u % array, &
- block % state % time_levs(2) % state % uReconstructX % array, &
- block % state % time_levs(2) % state % uReconstructY % array, &
- block % state % time_levs(2) % state % uReconstructZ % array, &
- block % state % time_levs(2) % state % uReconstructZonal % array, &
- block % state % time_levs(2) % state % uReconstructMeridional % array)
+ block % state % time_levs(2) % state % uReconstructX % array, &
+ block % state % time_levs(2) % state % uReconstructY % array, &
+ block % state % time_levs(2) % state % uReconstructZ % array, &
+ block % state % time_levs(2) % state % uReconstructZonal % array, &
+ block % state % time_levs(2) % state % uReconstructMeridional % array &
+ )
!TDR
call mpas_reconstruct(block % mesh, block % mesh % u_src % array, &
@@ -887,12 +885,11 @@
)
!TDR
-
call ocn_time_average_accumulate(block % state % time_levs(2) % state, block % state % time_levs(1) % state)
-
block => block % next
end do
+
call mpas_timer_stop("se timestep", timer_main)
end subroutine ocn_time_integrator_split!}}}
Modified: branches/dcmip/src/core_ocean/mpas_ocn_tracer_advection.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_tracer_advection.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_tracer_advection.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -18,6 +18,8 @@
use mpas_kind_types
use mpas_grid_types
use mpas_configure
+ use mpas_sort
+ use mpas_hash
use mpas_ocn_tracer_advection_std
use mpas_ocn_tracer_advection_mono
@@ -58,10 +60,13 @@
integer, dimension(:,:), pointer :: cellsOnCell, cellsOnEdge, advCellsForEdge, highOrderAdvectionMask, lowOrderAdvectionMask, boundaryCell
integer, dimension(:), pointer :: nEdgesOnCell, nAdvCellsForEdge, maxLevelCell
- integer, dimension(:), pointer :: cell_list, ordered_cell_list
- integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell, k, nVertLevels
+ integer, dimension(:), pointer :: cell_indices
+ integer, dimension(:,:), pointer :: sorted_cell_indices
+ integer :: cell1, cell2, iEdge, n, i, j, j_in, iCell, k, nVertLevels, nCells
logical :: addcell, highOrderAdvection
+ type (hashtable) :: cell_hash
+
deriv_two => grid % deriv_two % array
adv_coefs => grid % adv_coefs % array
adv_coefs_2nd => grid % adv_coefs_2nd % array
@@ -76,24 +81,21 @@
maxLevelCell => grid % maxLevelCell % array
nAdvCellsForEdge => grid % nAdvCellsForEdge % array
+ nCells = grid % nCells
nVertLevels = grid % nVertLevels
- allocate(cell_list(grid % maxEdges2 + 2))
- allocate(ordered_cell_list(grid % maxEdges2 + 2))
+ allocate(cell_indices(grid % maxEdges2 + 2))
+ allocate(sorted_cell_indices(2, grid % maxEdges2 + 2))
err = 0
highOrderAdvectionMask = 0
lowOrderAdvectionMask = 0
- if(config_horiz_tracer_adv_order == 2) then
-
- end if
do iEdge = 1, grid % nEdges
nAdvCellsForEdge(iEdge) = 0
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
-
do k = 1, nVertLevels
if (boundaryCell(k, cell1) == 1 .or. boundaryCell(k, cell2) == 1) then
@@ -108,131 +110,108 @@
!
! do only if this edge flux is needed to update owned cells
!
- if (cell1 <= grid%nCells .or. cell2 <= grid%nCells) then
+ if (cell1 <= grid % nCells .and. cell2 <= grid % nCells) then
+ ! Insert cellsOnEdge to list of advection cells
+ call mpas_hash_init(cell_hash)
+ call mpas_hash_insert(cell_hash, cell1)
+ call mpas_hash_insert(cell_hash, cell2)
+ cell_indices(1) = cell1
+ cell_indices(2) = cell2
+ sorted_cell_indices(1, 1) = grid % indexToCellID % array(cell1)
+ sorted_cell_indices(2, 1) = cell1
+ sorted_cell_indices(1, 2) = grid % indexToCellID % array(cell2)
+ sorted_cell_indices(2, 2) = cell2
+ n = 2
- cell_list(1) = cell1
- cell_list(2) = cell2
- n = 2
+ ! Build unique list of cells used for advection on edge
+ do i = 1, nEdgesOnCell(cell1)
+ if(.not. mpas_hash_search(cell_hash, cellsOnCell(i, cell1))) then
+ n = n + 1
+ cell_indices(n) = cellsOnCell(i, cell1)
+ sorted_cell_indices(1, n) = grid % indexToCellID % array(cellsOnCell(i, cell1))
+ sorted_cell_indices(2, n) = cellsOnCell(i, cell1)
+ call mpas_hash_insert(cell_hash, cellsOnCell(i, cell1))
+ end if
+ end do ! loop over i
- ! add cells surrounding cell 1. n is number of cells currently in list
- do i = 1, nEdgesOnCell(cell1)
- if(cellsOnCell(i,cell1) /= cell2) then
- n = n + 1
- cell_list(n) = cellsOnCell(i,cell1)
- end if
- end do
+ do i = 1, nEdgesOnCell(cell2)
+ if(.not. mpas_hash_search(cell_hash, cellsOnCell(i, cell2))) then
+ n = n + 1
+ cell_indices(n) = cellsOnCell(i, cell2)
+ sorted_cell_indices(1, n) = grid % indexToCellID % array(cellsOnCell(i, cell2))
+ sorted_cell_indices(2, n) = cellsOnCell(i, cell2)
+ call mpas_hash_insert(cell_hash, cellsOnCell(i, cell2))
+ end if
+ end do ! loop over i
- ! add cells surrounding cell 2 (brute force approach)
- do iCell = 1, nEdgesOnCell(cell2)
- addcell = .true.
- do i=1,n
- if(cell_list(i) == cellsOnCell(iCell,cell2)) addcell = .false.
- end do
- if(addcell) then
- n = n+1
- cell_list(n) = cellsOnCell(iCell,cell2)
- end if
- end do
+ call mpas_hash_destroy(cell_hash)
- ! order the list by increasing cell number (brute force approach)
+ call mpas_quicksort(n, sorted_cell_indices)
- do i=1,n
- ordered_cell_list(i) = grid % nCells + 2
- j_in = 1
- do j=1,n
- if(ordered_cell_list(i) > cell_list(j) ) then
- j_in = j
- ordered_cell_list(i) = cell_list(j)
- end if
- end do
-! ordered_cell_list(i) = cell_list(j_in)
- cell_list(j_in) = grid % nCells + 3
- end do
+ nAdvCellsForEdge(iEdge) = n
+ do iCell = 1, nAdvCellsForEdge(iEdge)
+ advCellsForEdge(iCell, iEdge) = sorted_cell_indices(2, iCell)
+ end do ! loop over iCell
- nAdvCellsForEdge(iEdge) = n
- do iCell = 1, nAdvCellsForEdge(iEdge)
- advCellsForEdge(iCell,iEdge) = ordered_cell_list(iCell)
- end do
+ adv_coefs(:,iEdge) = 0.
+ adv_coefs_2nd(:,iEdge) = 0.
+ adv_coefs_3rd(:,iEdge) = 0.
- ! we have the ordered list, now construct coefficients
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell1))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(1,1,iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(1,1,iEdge)
+ end if
- adv_coefs(:,iEdge) = 0.
- adv_coefs_2nd(:,iEdge) = 0.
- adv_coefs_3rd(:,iEdge) = 0.
-
- ! pull together third and fourth order contributions to the flux
- ! first from cell1
+ do iCell = 1, nEdgesOnCell(cell1)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cellsOnCell(iCell,cell1)))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(iCell+1, 1, iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(iCell+1, 1, iEdge)
+ end if
+ end do ! loop over iCell
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell1 ) j_in = j
- end do
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,1,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(1,1,iEdge)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell2))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(1,2,iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(1,2,iEdge)
+ end if
- do iCell = 1, nEdgesOnCell(cell1)
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cellsOnCell(iCell,cell1) ) j_in = j
- end do
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) + deriv_two(iCell+1,1,iEdge)
- end do
+ do iCell = 1, nEdgesOnCell(cell2)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cellsOnCell(iCell,cell2)))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + deriv_two(iCell+1, 2, iEdge)
+ adv_coefs_3rd(k, iEdge) = adv_coefs_3rd(k, iEdge) + deriv_two(iCell+1, 2, iEdge)
+ end if
+ end do ! loop over iCell
- ! pull together third and fourth order contributions to the flux
- ! now from cell2
+ do iCell = 1,nAdvCellsForEdge(iEdge)
+ adv_coefs (iCell,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs (iCell,iEdge) / 12.
+ adv_coefs_3rd(iCell,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs_3rd(iCell,iEdge) / 12.
+ end do ! loop over iCell
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell2 ) j_in = j
- enddo
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(1,2,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(1,2,iEdge)
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell1))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + 0.5
+ adv_coefs_2nd(k, iEdge) = adv_coefs_2nd(k, iEdge) + 0.5
+ end if
- do iCell = 1, nEdgesOnCell(cell2)
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cellsOnCell(iCell,cell2) ) j_in = j
- enddo
- adv_coefs (j_in,iEdge) = adv_coefs (j_in,iEdge) + deriv_two(iCell+1,2,iEdge)
- adv_coefs_3rd(j_in,iEdge) = adv_coefs_3rd(j_in,iEdge) - deriv_two(iCell+1,2,iEdge)
- end do
+ k = mpas_binary_search(sorted_cell_indices, 2, 1, nAdvCellsForEdge(iEdge), grid % indexToCellID % array(cell2))
+ if(k <= nAdvCellsForEdge(iEdge)) then
+ adv_coefs(k, iEdge) = adv_coefs(k, iEdge) + 0.5
+ adv_coefs_2nd(k, iEdge) = adv_coefs_2nd(k, iEdge) + 0.5
+ end if
- do j = 1,n
- adv_coefs (j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs (j,iEdge) / 12.
- adv_coefs_3rd(j,iEdge) = - (grid % dcEdge % array (iEdge) **2) * adv_coefs_3rd(j,iEdge) / 12.
- end do
-
- ! 2nd order centered contribution - place this in the main flux weights
-
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell1 ) j_in = j
- enddo
- adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
- adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
-
- j_in = 0
- do j=1, n
- if( ordered_cell_list(j) == cell2 ) j_in = j
- enddo
- adv_coefs(j_in,iEdge) = adv_coefs(j_in,iEdge) + 0.5
- adv_coefs_2nd(j_in,iEdge) = adv_coefs_2nd(j_in,iEdge) + 0.5
-
- ! multiply by edge length - thus the flux is just dt*ru times the results of the vector-vector multiply
-
- do j=1,n
- adv_coefs (j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs (j,iEdge)
- adv_coefs_2nd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_2nd(j,iEdge)
- adv_coefs_3rd(j,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_3rd(j,iEdge)
- end do
-
- end if ! only do for edges of owned-cells
-
+ do iCell=1,nAdvCellsForEdge(iEdge)
+ adv_coefs (iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs (iCell,iEdge)
+ adv_coefs_2nd(iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_2nd(iCell,iEdge)
+ adv_coefs_3rd(iCell,iEdge) = grid % dvEdge % array(iEdge) * adv_coefs_3rd(iCell,iEdge)
+ end do ! loop over iCell
+ end if
end do ! end loop over edges
- deallocate(cell_list)
- deallocate(ordered_cell_list)
+ deallocate(cell_indices)
+ deallocate(sorted_cell_indices)
! If 2nd order advection, set masks appropriately.
if(config_horiz_tracer_adv_order == 2) then
Modified: branches/dcmip/src/core_ocean/mpas_ocn_vel_forcing_windstress.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -43,9 +43,7 @@
!--------------------------------------------------------------------
logical :: windStressOn
- real (kind=RKIND) :: rho_ref
-
!***********************************************************************
contains
@@ -125,7 +123,6 @@
edgeMask => grid % edgeMask % array
do iEdge=1,nEdgesSolve
-
! efficiency note: it would be nice to avoid this
! if within a do. This could be done with
! k = max(maxLevelEdgeTop(iEdge),1)
@@ -133,7 +130,7 @@
do k = 1,min(maxLevelEdgeTop(iEdge),1)
! forcing in top layer only
- tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (u_src(k,iEdge)/rho_ref/h_edge(k,iEdge))
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (u_src(k,iEdge) / config_rho0 / h_edge(k,iEdge))
enddo
enddo
@@ -170,7 +167,6 @@
integer, intent(out) :: err !< Output: error flag
windStressOn = .true.
- rho_ref = 1000.0
err = 0
Modified: branches/dcmip/src/core_ocean/mpas_ocn_vmix.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_vmix.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_vmix.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -47,7 +47,8 @@
ocn_tracer_vmix_tend_explicit, &
ocn_vel_vmix_tend_implicit, &
ocn_tracer_vmix_tend_implicit, &
- ocn_vmix_init
+ ocn_vmix_init, &
+ ocn_vmix_implicit
!--------------------------------------------------------------------
!
@@ -576,6 +577,61 @@
!***********************************************************************
!
+! routine ocn_vmix_implicit
+!
+!> \brief Driver for implicit vertical mixing
+!> \author Doug Jacobsen
+!> \date 19 September 2011
+!> \version SVN:$Id$
+!> \details
+!> This routine is a driver for handling implicit vertical mixing
+!> of both momentum and tracers for a block. It's intended to reduce
+!> redundant code.
+!
+!-----------------------------------------------------------------------
+
+ subroutine ocn_vmix_implicit(dt, grid, diagnostics, state, err)!{{{
+ real (kind=RKIND), intent(in) :: dt
+ type (mesh_type), intent(in) :: grid
+ type (diagnostics_type), intent(inout) :: diagnostics
+ type (state_type), intent(inout) :: state
+ integer, intent(out) :: err
+
+ integer :: nCells
+ real (kind=RKIND), dimension(:,:), pointer :: u, h, h_edge, vertViscTopOfEdge, vertDiffTopOfCell, ke_edge
+ real (kind=RKIND), dimension(:,:,:), pointer :: tracers
+ integer, dimension(:), pointer :: maxLevelCell
+
+ err = 0
+
+ u => state % u % array
+ tracers => state % tracers % array
+ h => state % h % array
+ h_edge => state % h_edge % array
+ ke_edge => state % ke_edge % array
+ vertViscTopOfEdge => diagnostics % vertViscTopOfEdge % array
+ vertDiffTopOfCell => diagnostics % vertDiffTopOfCell % array
+ maxLevelCell => grid % maxLevelCell % array
+
+ nCells = grid % nCells
+
+ call ocn_vmix_coefs(grid, state, diagnostics, err)
+
+ !
+ ! Implicit vertical solve for momentum
+ !
+ call ocn_vel_vmix_tend_implicit(grid, dt, ke_edge, vertViscTopOfEdge, h, h_edge, u, err)
+
+ !
+ ! Implicit vertical solve for tracers
+ !
+
+ call ocn_tracer_vmix_tend_implicit(grid, dt, vertDiffTopOfCell, h, tracers, err)
+
+ end subroutine ocn_vmix_implicit!}}}
+
+!***********************************************************************
+!
! routine ocn_vmix_init
!
!> \brief Initializes ocean vertical mixing quantities
Modified: branches/dcmip/src/core_ocean/mpas_ocn_vmix_coefs_rich.F
===================================================================
--- branches/dcmip/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -327,7 +327,7 @@
maxLevelCell => grid % maxLevelCell % array
vertDiffTopOfCell = 0.0
- coef = -gravity/1000.0/2.0
+ coef = -gravity/config_rho0/2.0
do iCell = 1,nCells
do k = 2,maxLevelCell(iCell)
! mrp 110324 efficiency note: this if is inside iCell and k loops.
@@ -472,7 +472,7 @@
drhoTopOfCell = 0.0
do iCell=1,nCells
do k=2,maxLevelCell(iCell)
- drhoTopOfCell(k,iCell) = rho(k-1,iCell) - rhoDisplaced(k-1,iCell)
+ drhoTopOfCell(k,iCell) = rhoDisplaced(k-1,iCell) - rhoDisplaced(k,iCell)
end do
end do
@@ -517,7 +517,7 @@
! compute RiTopOfEdge using drhoTopOfEdge and du2TopOfEdge
! coef = -g/rho_0/2
RiTopOfEdge = 0.0
- coef = -gravity/1000.0/2.0
+ coef = -gravity/config_rho0/2.0
do iEdge = 1,nEdges
do k = 2,maxLevelEdgeTop(iEdge)
RiTopOfEdge(k,iEdge) = coef*drhoTopOfEdge(k,iEdge) &
@@ -529,7 +529,6 @@
! compute RiTopOfCell using drhoTopOfCell and du2TopOfCell
! coef = -g/rho_0/2
RiTopOfCell = 0.0
- coef = -gravity/1000.0/2.0
do iCell = 1,nCells
do k = 2,maxLevelCell(iCell)
RiTopOfCell(k,iCell) = coef*drhoTopOfCell(k,iCell) &
Modified: branches/dcmip/src/core_sw/Registry
===================================================================
--- branches/dcmip/src/core_sw/Registry        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_sw/Registry        2012-08-29 00:09:20 UTC (rev 2133)
@@ -21,6 +21,7 @@
namelist logical sw_model config_wind_stress false
namelist logical sw_model config_bottom_drag false
namelist real sw_model config_apvm_upwinding 0.5
+namelist integer sw_model config_num_halos 2
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
Modified: branches/dcmip/src/core_sw/mpas_sw_mpas_core.F
===================================================================
--- branches/dcmip/src/core_sw/mpas_sw_mpas_core.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_sw/mpas_sw_mpas_core.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -187,7 +187,11 @@
call mpas_timer_stop("time integration")
! Move time level 2 fields back into time level 1 for next time step
- call mpas_shift_time_levels_state(domain % blocklist % state)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ call mpas_shift_time_levels_state(block_ptr % state)
+ block_ptr => block_ptr % next
+ end do
!TODO: mpas_get_clock_ringing_alarms is probably faster than multiple mpas_is_alarm_ringing...
Modified: branches/dcmip/src/core_sw/mpas_sw_time_integration.F
===================================================================
--- branches/dcmip/src/core_sw/mpas_sw_time_integration.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/core_sw/mpas_sw_time_integration.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -70,122 +70,114 @@
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
- block => domain % blocklist
- call mpas_allocate_state(block, provis, &
- block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
- block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, &
- block % mesh % nTracers)
-
- provis_ptr => provis
- call mpas_create_state_links(provis_ptr)
+ call mpas_setup_provis_states(domain % blocklist)
+
+ !
+ ! Initialize time_levs(2) with state at current time
+ ! Initialize first RK state
+ ! Couple tracers time_levs(2) with h in time-levels
+ ! Initialize RK weights
+ !
+ block => domain % blocklist
+ do while (associated(block))
- !
- ! Initialize time_levs(2) with state at current time
- ! Initialize first RK state
- ! Couple tracers time_levs(2) with h in time-levels
- ! Initialize RK weights
- !
- block => domain % blocklist
- do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
+ do iCell=1,block % mesh % nCells ! couple tracers to h
+ do k=1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell)
+ end do
+ end do
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:)
- do iCell=1,block % mesh % nCells ! couple tracers to h
- do k=1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell)
- end do
- end do
+ call mpas_copy_state(block % provis, block % state % time_levs(1) % state)
- call mpas_copy_state(provis, block % state % time_levs(1) % state)
+ block => block % next
+ end do
- block => block % next
- end do
+ rk_weights(1) = dt/6.
+ rk_weights(2) = dt/3.
+ rk_weights(3) = dt/3.
+ rk_weights(4) = dt/6.
- rk_weights(1) = dt/6.
- rk_weights(2) = dt/3.
- rk_weights(3) = dt/3.
- rk_weights(4) = dt/6.
+ rk_substep_weights(1) = dt/2.
+ rk_substep_weights(2) = dt/2.
+ rk_substep_weights(3) = dt
+ rk_substep_weights(4) = 0.
- rk_substep_weights(1) = dt/2.
- rk_substep_weights(2) = dt/2.
- rk_substep_weights(3) = dt
- rk_substep_weights(4) = 0.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! BEGIN RK loop
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do rk_step = 1, 4
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! BEGIN RK loop
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do rk_step = 1, 4
+! --- update halos for diagnostic variables
-! --- update halos for diagnostic variables
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % pv_edge)
- call mpas_dmpar_exch_halo_field(provis % pv_edge)
-
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field(provis % divergence)
- call mpas_dmpar_exch_halo_field(provis % vorticity)
- end if
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % divergence)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % provis % vorticity)
+ end if
-! --- compute tendencies
+! --- compute tendencies
- block => domain % blocklist
- do while (associated(block))
- call sw_compute_tend(block % tend, provis, block % mesh)
- call sw_compute_scalar_tend(block % tend, provis, block % mesh)
- call sw_enforce_boundary_edge(block % tend, block % mesh)
- block => block % next
- end do
+ block => domain % blocklist
+ do while (associated(block))
+ call sw_compute_tend(block % tend, block % provis, block % mesh)
+ call sw_compute_scalar_tend(block % tend, block % provis, block % mesh)
+ call sw_enforce_boundary_edge(block % tend, block % mesh)
+ block => block % next
+ end do
-! --- update halos for prognostic variables
+! --- update halos for prognostic variables
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
- call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % u)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % h)
+ call mpas_dmpar_exch_halo_field(domain % blocklist % tend % tracers)
-! --- compute next substep state
+! --- compute next substep state
- if (rk_step < 4) then
- block => domain % blocklist
- do while (associated(block))
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
- provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
- + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
- do iCell=1,block % mesh % nCells
- do k=1,block % mesh % nVertLevels
- provis % tracers % array(:,k,iCell) = ( &
- block % state % time_levs(1) % state % h % array(k,iCell) * &
- block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
- + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
- ) / provis % h % array(k,iCell)
- end do
- end do
- if (config_test_case == 1) then ! For case 1, wind field should be fixed
- provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
- end if
- call sw_compute_solve_diagnostics(dt, provis, block % mesh)
- block => block % next
- end do
- end if
+ if (rk_step < 4) then
+ block => domain % blocklist
+ do while (associated(block))
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % u % array(:,:)
+ block % provis % h % array(:,:) = block % state % time_levs(1) % state % h % array(:,:) &
+ + rk_substep_weights(rk_step) * block % tend % h % array(:,:)
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % nVertLevels
+ block % provis % tracers % array(:,k,iCell) = ( block % state % time_levs(1) % state % h % array(k,iCell) * &
+ block % state % time_levs(1) % state % tracers % array(:,k,iCell) &
+ + rk_substep_weights(rk_step) * block % tend % tracers % array(:,k,iCell) &
+ ) / block % provis % h % array(k,iCell)
+ end do
+ end do
+ if (config_test_case == 1) then ! For case 1, wind field should be fixed
+ block % provis % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
+ end if
+ call sw_compute_solve_diagnostics(dt, block % provis, block % mesh)
+ block => block % next
+ end do
+ end if
!--- accumulate update (for RK4)
- block => domain % blocklist
- do while (associated(block))
- block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % u % array(:,:) &
- + rk_weights(rk_step) * block % tend % u % array(:,:)
- block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(2) % state % h % array(:,:) &
- + rk_weights(rk_step) * block % tend % h % array(:,:)
- do iCell=1,block % mesh % nCells
- do k=1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
- + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
- end do
- end do
- block => block % next
- end do
+ block => domain % blocklist
+ do while (associated(block))
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % u % array(:,:) &
+ + rk_weights(rk_step) * block % tend % u % array(:,:)
+ block % state % time_levs(2) % state % h % array(:,:) = block % state % time_levs(2) % state % h % array(:,:) &
+ + rk_weights(rk_step) * block % tend % h % array(:,:)
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) = &
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
+ + rk_weights(rk_step) * block % tend % tracers % array(:,k,iCell)
+ end do
+ end do
+ block => block % next
+ end do
end do
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -223,7 +215,7 @@
block => block % next
end do
- call mpas_deallocate_state(provis)
+ call mpas_deallocate_provis_states(domain % blocklist)
end subroutine sw_rk4
Modified: branches/dcmip/src/framework/Makefile
===================================================================
--- branches/dcmip/src/framework/Makefile        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/Makefile        2012-08-29 00:09:20 UTC (rev 2133)
@@ -16,6 +16,7 @@
mpas_hash.o \
mpas_sort.o \
mpas_block_decomp.o \
+         mpas_block_creator.o \
mpas_dmpar.o \
mpas_io.o \
mpas_io_streams.o \
@@ -41,7 +42,7 @@
mpas_grid_types.o: mpas_kind_types.o mpas_dmpar_types.o mpas_attlist.o
-mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o
+mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o mpas_hash.o
mpas_sort.o: mpas_kind_types.o
@@ -51,11 +52,13 @@
mpas_block_decomp.o: mpas_grid_types.o mpas_hash.o mpas_configure.o
+mpas_block_creator.o: mpas_dmpar.o mpas_hash.o mpas_sort.o mpas_configure.o
+
mpas_io.o: mpas_dmpar_types.o
mpas_io_streams.o: mpas_attlist.o mpas_grid_types.o mpas_timekeeping.o mpas_io.o
-mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o $(ZOLTANOBJ)
+mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_block_creator.o mpas_sort.o mpas_configure.o mpas_timekeeping.o mpas_io_streams.o $(ZOLTANOBJ)
mpas_io_output.o: mpas_grid_types.o mpas_dmpar.o mpas_sort.o mpas_configure.o mpas_io_streams.o
Copied: branches/dcmip/src/framework/mpas_block_creator.F (from rev 2130, trunk/mpas/src/framework/mpas_block_creator.F)
===================================================================
--- branches/dcmip/src/framework/mpas_block_creator.F         (rev 0)
+++ branches/dcmip/src/framework/mpas_block_creator.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -0,0 +1,1243 @@
+!***********************************************************************
+!
+! mpas_block_creator
+!
+!> \brief This module is responsible for the intial creation and setup of the block data structures.
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This module provides routines for the creation of blocks, with both an
+!> arbitrary number of blocks per processor and an arbitrary number of halos for
+!> each block. The provided routines also setup the exchange lists for each
+!> block.
+!
+!-----------------------------------------------------------------------
+
+module mpas_block_creator
+
+ use mpas_dmpar
+ use mpas_dmpar_types
+ use mpas_block_decomp
+ use mpas_hash
+ use mpas_sort
+ use mpas_grid_types
+ use mpas_configure
+
+ contains
+
+!***********************************************************************
+!
+! routine mpas_block_creator_setup_blocks_and_0halo_cells
+!
+!> \brief Initializes the list of blocks, and determines 0 halo cell indices.
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine sets up the linked list of blocks, and creates the
+!> indexToCellID field for the 0 halo. The information required to setup these
+!> structures is provided as input in cellList, blockID, blockStart, and
+!> blockCount.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_setup_blocks_and_0halo_cells(domain, indexToCellID, cellList, blockID, blockStart, blockCount)!{{{
+ type (domain_type), pointer :: domain !< Input: Domain information
+ type (field1dInteger), pointer :: indexToCellID !< Input/Output: indexToCellID field
+ integer, dimension(:), intent(in) :: cellList !< Input: List of cell indices owned by this processor
+ integer, dimension(:), intent(in) :: blockID !< Input: List of block indices owned by this processor
+ integer, dimension(:), intent(in) :: blockStart !< Input: Indices of starting cell id in cellList for each block
+ integer, dimension(:), intent(in) :: blockCount !< Input: Number of cells from cellList owned by each block.
+
+ integer :: nHalos
+ type (block_type), pointer :: blockCursor
+ type (field1dInteger), pointer :: fieldCursor
+
+ integer :: i, iHalo
+ integer :: nBlocks
+
+ nBlocks = size(blockID)
+ nHalos = config_num_halos
+
+ ! Setup first block
+ allocate(domain % blocklist)
+ nullify(domain % blocklist % prev)
+ nullify(domain % blocklist % next)
+
+ ! Setup first block field
+ allocate(indexToCellID)
+ nullify(indexToCellID % next)
+
+ ! Loop over blocks
+ blockCursor => domain % blocklist
+ fieldCursor => indexToCellID
+ do i = 1, nBlocks
+ ! Initialize block information
+ blockCursor % blockID = blockID(i)
+ blockCursor % localBlockID = i - 1
+ blockCursor % domain => domain
+
+ ! Link to block, and setup array size
+ fieldCursor % block => blockCursor
+ fieldCursor % dimSizes(1) = blockCount(i)
+ nullify(fieldCursor % ioinfo)
+
+ ! Initialize exchange lists
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % sendList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % recvList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(fieldCursor % copyList, nHalos)
+
+ ! Allocate array, and copy indices into array
+ allocate(fieldCursor % array(fieldCursor % dimSizes(1)))
+ fieldCursor % array(:) = cellList(blockStart(i)+1:blockStart(i)+blockCount(i))
+ call mpas_quicksort(fieldCursor % dimSizes(1), fieldCursor % array)
+
+ ! Advance cursors, and create new blocks as needed
+ if(i < nBlocks) then
+ allocate(blockCursor % next)
+ allocate(fieldCursor % next)
+
+ blockCursor % next % prev => blockCursor
+
+ blockCursor => blockCursor % next
+ fieldCursor => fieldCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(blockCursor % next)
+ nullify(fieldCursor % next)
+ end do
+ end subroutine mpas_block_creator_setup_blocks_and_0halo_cells!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_0halo_cell_fields
+!
+!> \brief Initializes 0 halo cell based fields requried to work out halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell field, and the blocks of
+!> data read in by other routhers to determine all of the connectivity for the 0
+!> halo cell fields on all blocks on a processor.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_0halo_cell_fields(indexToCellIDBlock, nEdgesOnCellBlock, cellsOnCellBlock, verticesOnCellBlock, edgesOnCellBlock, indexToCellID_0Halo, nEdgesOnCell_0Halo, cellsOnCell_0Halo, verticesOnCell_0Halo, edgesOnCell_0Halo)!{{{
+ type(field1dInteger), pointer :: indexToCellIDBlock !< Input: Block of read in indexToCellID field
+ type(field1dInteger), pointer :: nEdgesOnCellBlock !< Input: Block of read in nEdgesOnCell field
+ type(field2dInteger), pointer :: cellsOnCellBlock !< Input: Block of read in cellsOnCell field
+ type(field2dInteger), pointer :: verticesOnCellBlock !< Input: Block of read in verticesOnCell field
+ type(field2dInteger), pointer :: edgesOnCellBlock !< Input: Block of read in edgesOnCellField
+
+ type(field1dInteger), pointer :: indexToCellID_0Halo !< Input: 0-Halo indices for indexToCellID field
+ type(field1dInteger), pointer :: nEdgesOnCell_0Halo !< Output: nEdgesOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: cellsOnCell_0Halo !< Output: cellsOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: verticesOnCell_0Halo !< Output: verticesOnCell field for 0-Halo cells
+ type(field2dInteger), pointer :: edgesOnCell_0Halo !< Output: edgesOnCell field for 0-Halo cells
+
+ type(field1dInteger), pointer :: indexCursor, nEdgesCursor
+ type(field2dInteger), pointer :: cellsOnCellCursor, verticesOnCellCursor, edgesOnCellCursor
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer :: nCellsInBlock, maxEdges, nHalos
+ integer :: i, iHalo
+
+ nHalos = config_num_halos
+
+ ! Only sending from halo layer 1 for setup
+ allocate(sendingHaloLayers(1))
+ sendingHaloLayers(1) = 1
+
+ maxEdges = cellsOnCellBlock % dimSizes(1)
+
+ ! Build exchange list from the block of read in data to each block's index fields.
+ call mpas_dmpar_get_exch_list(1, indexToCellIDBlock, indexToCellID_0Halo)
+
+ ! Setup header fields if at least 1 block exists
+ allocate(nEdgesOnCell_0Halo)
+ nullify(nEdgesOncell_0Halo % next)
+
+ allocate(cellsOnCell_0Halo)
+ nullify(cellsOnCell_0Halo % next)
+
+ allocate(verticesOnCell_0Halo)
+ nullify(verticesOnCell_0Halo % next)
+
+ allocate(edgesOnCell_0Halo)
+ nullify(edgesOnCell_0Halo % next)
+
+ ! Loop over blocks
+ indexCursor => indexToCellID_0Halo
+ nEdgesCursor => nEdgesOnCell_0Halo
+ cellsOnCellCursor => cellsOnCell_0Halo
+ verticesOnCellCursor => verticesOnCell_0Halo
+ edgesOnCellCursor => edgesOnCell_0Halo
+ do while(associated(indexCursor))
+ nCellsInBlock = indexCursor % dimSizes(1)
+
+ ! Link to block structure
+ nEdgesCursor % block => indexCursor % block
+ cellsOnCellCursor % block => indexCursor % block
+ verticesOnCellCursor % block => indexCursor % block
+ edgesOnCellCursor % block => indexCursor % block
+
+ ! Nullify ioinfo, since this data is not read in
+ nullify(nEdgesCursor % ioinfo)
+ nullify(cellsOnCellCursor % ioinfo)
+ nullify(verticesOnCellCursor % ioinfo)
+ nullify(edgesOnCellCursor % ioinfo)
+
+ ! Setup array sizes
+ nEdgesCursor % dimSizes(1) = nCellsInBlock
+ cellsOnCellCursor % dimSizes(1) = maxEdges
+ cellsOnCellCursor % dimSizes(2) = nCellsInBlock
+ verticesOnCellCursor % dimSizes(1) = maxEdges
+ verticesOnCellCursor % dimSizes(2) = nCellsInBlock
+ edgesOnCellCursor % dimSizes(1) = maxEdges
+ edgesOnCellCursor % dimSizes(2) = nCellsInBlock
+
+ ! Link exchange lists
+ nEdgesCursor % sendList => indexCursor % sendList
+ nEdgesCursor % recvList => indexCursor % recvList
+ nEdgesCursor % copyList => indexCursor % copyList
+ cellsOnCellCursor % sendList => indexCursor % sendList
+ cellsOnCellCursor % recvList => indexCursor % recvList
+ cellsOnCellCursor % copyList => indexCursor % copyList
+ verticesOnCellCursor % sendList => indexCursor % sendList
+ verticesOnCellCursor % recvList => indexCursor % recvList
+ verticesOnCellCursor % copyList => indexCursor % copyList
+ edgesOnCellCursor % sendList => indexCursor % sendList
+ edgesOnCellCursor % recvList => indexCursor % recvList
+ edgesOnCellCursor % copyList => indexCursor % copyList
+
+ ! Allocate arrays
+ allocate(nEdgesCursor % array(nEdgesCursor % dimSizes(1)))
+ allocate(cellsOnCellCursor % array(cellsOnCellCursor % dimSizes(1), cellsOnCellCursor % dimSizes(2)))
+ allocate(verticesOnCellCursor % array(verticesOnCellCursor % dimSizes(1), verticesOnCellCursor % dimSizes(2)))
+ allocate(edgesOnCellCursor % array(edgesOnCellCursor % dimSizes(1), edgesOnCellCursor % dimSizes(2)))
+
+ ! Create new blocks and advance cursors as needed
+ indexCursor => indexCursor % next
+ if(associated(indexCursor)) then
+ allocate(nEdgesCursor % next)
+ allocate(cellsOnCellCursor % next)
+ allocate(verticesOnCellCursor % next)
+ allocate(edgesOnCellCursor % next)
+
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+
+ end if
+
+ ! Nullify next pointers
+ nullify(nEdgesCursor % next)
+ nullify(cellsOnCellCursor % next)
+ nullify(verticesOnCellCursor % next)
+ nullify(edgesOnCellCursor % next)
+ end do ! indexCursor loop over blocks
+
+ ! Communicate data from read in blocks to each block's fields
+ call mpas_dmpar_alltoall_field(nEdgesOnCellBlock, nEdgesOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(cellsOnCellBlock, cellsOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(verticesOnCellBlock, verticesOnCell_0Halo, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(edgesOnCellBlock, edgesOnCell_0Halo, sendingHaloLayers)
+ end subroutine mpas_block_creator_build_0halo_cell_fields!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_0_and_1halo_edge_fields
+!
+!> \brief Initializes 0 and 1 halo edge based fields requried to work out halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell fields, and the blocks of
+!> data read in by other routhers to determine which edges are in a blocks
+!> 0 and 1 halo for all blocks on a processor.
+!> NOTE: This routine can be used on either edges or edges
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_0_and_1halo_edge_fields(indexToEdgeIDBlock, cellsOnEdgeBlock, indexToCellID_0Halo, nEdgesOnCell_0Halo, edgesOnCell_0Halo, indexToEdgeID_0Halo, cellsOnEdge_0Halo, nEdgesSolve)!{{{
+ type (field1dInteger), pointer :: indexToEdgeIDBlock !< Input: indexToEdgeID read in field
+ type (field2dInteger), pointer :: cellsOnEdgeBlock !< Input: cellsOnEdge read in field
+ type (field1dInteger), pointer :: indexToCellID_0Halo !< Input: indexToCellID field on 0 halo
+ type (field1dInteger), pointer :: nEdgesOnCell_0Halo !< Input: nEdgesOnCell field on 0 halo
+ type (field2dInteger), pointer :: edgesOnCell_0Halo !< Input: edgesOnCell field on 0 and 1 halos
+ type (field1dInteger), pointer :: indexToEdgeID_0Halo !< Output: indexToEdgeID field on 0 and 1 halos
+ type (field2dInteger), pointer :: cellsOnEdge_0Halo !< Output: CellsOnEdge field on 0 and 1 halos
+ type (field1dInteger), pointer :: nEdgesSolve !< Output: Array with max index to edges in halos
+
+ type (field0dInteger), pointer :: offSetField, edgeLimitField
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, edgeLimitCursor
+ type (field1dInteger), pointer :: indexToCellCursor, indexToEdgeCursor, nEdgesCursor, haloCursor, nEdgesSolveCursor
+ type (field2dInteger), pointer :: edgesOnCellCursor, cellsOnEdgeCursor, cellsOnCellCursor
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer, dimension(:), pointer :: localEdgeList
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer :: nEdgesLocal, nCellsInBlock, maxEdges, edgeDegree, nHalos
+ integer :: haloStart
+ integer :: iBlock, i, j, k
+
+ ! Setup sendingHaloLayers
+ allocate(sendingHaloLayers(1))
+ sendingHaloLayers(1) = 1
+
+ ! Get dimension information
+ maxEdges = edgesOnCell_0Halo % dimSizes(1)
+ edgeDegree = cellsOnEdgeBlock % dimSizes(1)
+ nHalos = config_num_halos
+
+ ! Setup initial block for each field
+ allocate(cellsOnEdge_0Halo)
+ allocate(indexToEdgeID_0Halo)
+
+ nullify(cellsOnEdge_0Halo % next)
+ nullify(indexToEdgeID_0Halo % next)
+
+ ! Loop over blocks
+ indexToCellCursor => indexToCellID_0Halo
+ edgesOnCellCursor => edgesOnCell_0Halo
+ nEdgesCursor => nEdgesOnCell_0Halo
+ indexToEdgeCursor => indexToEdgeID_0Halo
+ cellsOnEdgeCursor => cellsOnEdge_0Halo
+ do while(associated(indexToCellCursor))
+ ! Determine number of cells in block
+ nCellsInBlock = indexToCellCursor % dimSizes(1)
+
+ ! Determine all edges in block
+ call mpas_block_decomp_all_edges_in_block(maxEdges, nCellsInBlock, nEdgesCursor % array, edgesOnCellCursor % array, nEdgesLocal, localEdgeList)
+
+ ! Setup indexToEdge block
+ indexToEdgeCursor % block => indexToCellCursor % block
+ nullify(indexToEdgeCursor % ioinfo)
+ indexToEdgeCursor % dimSizes(1) = nEdgesLocal
+ allocate(indexToEdgeCursor % array(indexToEdgeCursor % dimSizes(1)))
+ indexToEdgeCursor % array(:) = localEdgeList(:)
+
+ ! Setup cellsOnEdge block
+ cellsOnEdgeCursor % block => indexToCellCursor % block
+ nullify(cellsOnEdgeCursor % ioinfo)
+ cellsOnEdgeCursor % dimSizes(1) = edgeDegree
+ cellsOnEdgeCursor % dimSizes(2) = nEdgesLocal
+ allocate(cellsOnEdgeCursor % array(cellsOnEdgeCursor % dimSizes(1), cellsOnEdgeCursor % dimSizes(2)))
+
+ ! Setup exchange lists
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeCursor % copyList, nHalos+1)
+
+ ! Link exchange lists
+ cellsOnEdgeCursor % sendList => indexToEdgeCursor % sendList
+ cellsOnEdgeCursor % recvList => indexToEdgeCursor % recvList
+ cellsOnEdgeCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Remove localEdgeList array
+ deallocate(localEdgeList)
+
+ ! Advance cursors, and create new blocks if needed
+ indexToCellCursor => indexToCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ nEdgescursor => nEdgesCursor % next
+ if(associated(indexToCellCursor)) then
+ allocate(indexToEdgeCursor % next)
+ indexToEdgeCursor => indexToEdgeCursor % next
+
+ allocate(cellsOnEdgeCursor % next)
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(indexToEdgeCursor % next)
+ nullify(cellsOnEdgeCursor % next)
+ end do ! indexToCursor loop over blocks
+
+ ! Build exchangel ists from read in blocks to owned blocks.
+ call mpas_dmpar_get_exch_list(1, indexToEdgeIDBlock, indexToEdgeID_0Halo)
+
+ ! Perform all to all to get owned block data
+ call mpas_dmpar_alltoall_field(cellsOnEdgeBlock, cellsOnEdge_0Halo, sendingHaloLayers)
+
+ ! Setup first block's fields if there is at least 1 block.
+ if(associated(indexToEdgeID_0Halo)) then
+ allocate(haloIndices)
+ allocate(offSetField)
+ allocate(edgeLimitField)
+ allocate(nEdgesSolve)
+ else
+ nullify(haloIndices)
+ nullify(offSetField)
+ nullify(edgeLimitField)
+ nullify(nEdgesSolve)
+ end if
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID_0Halo
+ cellsOnEdgeCursor => cellsOnEdge_0Halo
+ indexToCellCursor => indexToCellID_0Halo
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ edgeLimitCursor => edgeLimitField
+ nEdgesSolveCursor => nEdgesSolve
+ do while(associated(indexToEdgeCursor))
+ ! Determine 0 and 1 halo edges
+ call mpas_block_decomp_partitioned_edge_list(indexToCellCursor % dimSizes(1), indexToCellCursor % array, &
+ edgeDegree, indexToEdgeCursor % dimSizes(1), cellsOnEdgeCursor % array, &
+ indexToEdgeCursor % array, haloStart)
+
+ ! Link blocks
+ haloCursor % block => indexToEdgeCursor % block
+ offSetCursor % block => indexToEdgeCursor % block
+ edgeLimitCursor % block => indexToEdgeCursor % block
+ nEdgesSolveCursor % block => indexToEdgeCursor % block
+
+ ! Nullify io info
+ nullify(haloCursor % ioinfo)
+ nullify(offSetCursor % ioinfo)
+ nullify(edgeLimitCursor % ioinfo)
+ nullify(nEdgesSolveCursor % ioinfo)
+
+ ! Setup haloIndices
+ haloCursor % dimSizes(1) = indexToEdgeCursor % dimSizes(1) - (haloStart-1)
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+ haloCursor % array(:) = indexToEdgeCursor % array(haloStart:indexToEdgeCursor % dimSizes(1))
+
+ ! Link exchange lists
+ haloCursor % sendList => indexToEdgeCursor % sendList
+ haloCursor % recvList => indexToEdgeCursor % recvList
+ haloCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Determine offSet and limit on 0 halo edges for exchange list creation
+ offSetCursor % scalar = haloStart - 1
+ edgeLimitCursor % scalar = haloStart - 1
+
+ ! Setup nEdgesSolve
+ nEdgesSolveCursor % dimSizes(1) = nHalos+2
+ allocate(nEdgesSolveCursor % array(nEdgesSolve % dimSizes(1)))
+ nEdgesSolveCursor % array = -1
+ nEdgesSolveCursor % array(1) = haloStart - 1
+ nEdgesSolveCursor % array(2) = indexToEdgeCursor % dimSizes(1)
+
+ ! Advance cursors, and create new blocks if needed
+ indexToEdgeCursor => indexToEdgeCursor % next
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ indexToCellCursor => indexToCellCursor % next
+ if(associateD(indexToEdgeCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+
+ allocate(offSetcursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(edgeLimitCursor % next)
+ edgeLimitCursor => edgeLimitCursor % next
+
+ allocate(nEdgesSolveCursor % next)
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(haloCursor % next)
+ nullify(offSetCursor % next)
+ nullify(edgeLimitCursor % next)
+ nullify(nEdgesSolveCursor % next)
+ end do
+
+ ! Create exchange lists from 0 halo to 1 haloedges
+ call mpas_dmpar_get_exch_list(1, indexToEdgeID_0Halo, haloIndices, offSetField, edgeLimitField)
+
+ ! Deallocate fields that are not needed anymore.
+ call mpas_deallocate_field(haloIndices)
+ call mpas_deallocate_field(offSetField)
+ call mpas_deallocate_field(edgeLimitCursor)
+ deallocate(sendingHaloLayers)
+
+ end subroutine mpas_block_creator_build_0_and_1halo_edge_fields!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_cell_halos
+!
+!> \brief Builds cell halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 halo cell fields to determine
+!> which cells fall in each halo layer for a block. During this process, each
+!> halo's exchange lists are created. This process is performed for all blocks on
+!> a processor.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_cell_halos(indexToCellID, nEdgesOnCell, cellsOnCell, verticesOnCell, edgesOnCell, nCellsSolve)!{{{
+ type (field1dInteger), pointer :: indexToCellID !< Input/Output: indexToCellID field for all halos
+ type (field1dInteger), pointer :: nEdgesOnCell !< Input/Output: nEdgesOnCell field for all halos
+ type (field2dInteger), pointer :: cellsOnCell !< Input/Output: cellsOnCell field for all halos
+ type (field2dInteger), pointer :: verticesOnCell !< Input/Output: verticesOnCell field for all halos
+ type (field2dInteger), pointer :: edgesOnCell !< Input/Output: edgesOnCell field for all halos
+ type (field1dInteger), pointer :: nCellsSolve !< Output: Field with indices to end of each halo
+
+ type (dm_info), pointer :: dminfo
+
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, cellLimitCursor
+ type (field1dInteger), pointer :: indexCursor, nEdgesCursor, haloCursor, nCellsSolveCursor
+ type (field2dInteger), pointer :: cellsOnCellCursor, verticesOnCellCursor, edgesOnCellCursor
+
+ type (field0dInteger), pointer :: offSetField
+ type (field0dInteger), pointer :: cellLimitField
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer, dimension(:), pointer :: field1dArrayHolder
+ integer, dimension(:,:), pointer :: field2dArrayHolder
+
+ type (graph), pointer :: blockGraph, blockGraphWithHalo
+
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ integer :: nHalos, nCellsInBlock, nCellsInHalo, maxEdges
+ integer :: iHalo, iBlock, i
+
+ nHalos = config_num_halos
+ dminfo => indexToCellID % block % domain % dminfo
+ allocate(sendingHaloLayers(1))
+
+ ! Setup header fields
+ allocate(nCellsSolve)
+ allocate(cellLimitField)
+ allocate(offSetField)
+
+ nullify(nCellsSolve % next)
+ nullify(cellLimitField % next)
+ nullify(offSetField % next)
+
+ ! Loop over blocks
+ offSetCursor => offsetField
+ cellLimitCursor => cellLimitField
+ indexCursor => indexToCellID
+ nCellsSolveCursor => nCellsSolve
+ do while (associated(indexCursor))
+ ! Setup offset
+ offSetCursor % scalar = indexCursor % dimSizes(1)
+ offSetCursor % block => indexCursor % block
+ nullify(offSetCursor % ioinfo)
+
+ ! Setup nCellsSolve
+ nCellsSolveCursor % dimSizes(1) = nHalos+1
+ allocate(nCellsSolveCursor % array(nCellsSolveCursor % dimSizes(1)))
+ nCellsSolveCursor % array(1) = indexCursor % dimSizes(1)
+ nCellsSolveCursor % block => indexCursor % block
+ nullify(nCellsSolveCursor % ioinfo)
+
+ ! Setup owned cellLimit
+ cellLimitCursor % scalar = indexCursor % dimSizes(1)
+ cellLimitCursor % block => indexCursor % block
+ nullify(cellLimitCursor % ioinfo)
+
+ ! Advance cursors and create new blocks if needed
+ indexCursor => indexCursor % next
+ if(associated(indexCursor)) then
+ allocate(offSetCursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(nCellsSolveCursor % next)
+ nCellsSolveCursor => nCellsSolveCursor % next
+
+ allocate(cellLimitCursor % next)
+ cellLimitCursor => cellLimitCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(offSetCursor % next)
+ nullify(nCellssolveCursor % next)
+ nullify(cellLimitCursor % next)
+ end do
+
+ ! Loop over halos
+ do iHalo = 1, nHalos
+ ! Sending halo layer is the current halo
+ sendingHaloLayers(1) = iHalo
+
+ if(associated(indexToCellID)) then
+ allocate(haloIndices)
+ nullify(haloIndices % next)
+ else
+ nullify(haloIndices)
+ end if
+
+ ! Loop over blocks
+ indexCursor => indexToCellID
+ nEdgesCursor => nEdgesOnCell
+ cellsOnCellCursor => cellsOnCell
+ verticesOnCellCursor => verticesOnCell
+ edgesOnCellCursor => edgesOnCell
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ do while(associated(indexCursor))
+ ! Determine block dimensions
+ nCellsInBlock = indexCursor % dimSizes(1)
+ maxEdges = cellsOnCellCursor % dimSizes(1)
+
+ ! Setup offSet
+ offSetCursor % scalar = nCellsInBlock
+
+ ! Setup block graphs
+ allocate(blockGraphWithHalo)
+ allocate(blockGraph)
+ allocate(blockGraph % vertexID(nCellsInBlock))
+ allocate(blockGraph % nAdjacent(nCellsInBlock))
+ allocate(blockGraph % adjacencyList(maxEdges, nCellsInBlock))
+
+ blockGraph % nVertices = nCellsInBlock
+ blockGraph % nVerticesTotal = nCellsInBlock
+ blockGraph % maxDegree = maxEdges
+ blockGraph % ghostStart = nCellsInBlock + 1
+
+ blockGraph % vertexID(:) = indexCursor % array(:)
+ blockGraph % nAdjacent(:) = nEdgesCursor % array(:)
+ blockGraph % adjacencyList(:,:) = cellsOnCellCursor % array(:,:)
+
+ ! Determine all cell id's with the next halo added
+ call mpas_block_decomp_add_halo(dminfo, blockGraph, blockGraphWithHalo)
+
+ ! Setup haloIndices
+ haloCursor % dimSizes(1) = blockGraphWithHalo % nVerticesTotal - blockGraphWithHalo % nVertices
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+ haloCursor % array(:) = blockGraphWithHalo % vertexID(blockGraphWithHalo % nVertices+1:blockGraphWithHalo % nVerticesTotal)
+ call mpas_quicksort(haloCursor % dimSizes(1), haloCursor % array)
+ haloCursor % sendList => indexCursor % sendList
+ haloCursor % recvList => indexCursor % recvList
+ haloCursor % copyList => indexCursor % copyList
+ haloCursor % block => indexCursor % block
+ nullify(haloCursor % ioinfo)
+
+ ! Deallocate block graphs
+ deallocate(blockGraphWithHalo % vertexID)
+ deallocate(blockGraphWithHalo % nAdjacent)
+ deallocate(blockGraphWithHalo % adjacencyList)
+ deallocate(blockGraphWithHalo)
+
+ deallocate(blockGraph % vertexID)
+ deallocate(blockGraph % nAdjacent)
+ deallocate(blockGraph % adjacencyList)
+ deallocate(blockGraph)
+
+ ! Advance cursors and create new block if needed
+ indexCursor => indexCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ offSetCursor => offSetCursor % next
+ if(associated(indexCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+ end if
+ ! Nullify next pointer
+ nullify(haloCursor % next)
+ end do ! indexCursor loop over blocks
+
+ ! Create exchange lists for current halo layer
+ call mpas_dmpar_get_exch_list(iHalo, indexToCellID, haloIndices, offSetField, cellLimitField)
+
+ ! Loop over blocks
+ indexCursor => indexToCellID
+ nEdgesCursor => nEdgesOnCell
+ cellsOnCellCursor => cellsOnCell
+ verticesOnCellCursor => verticesOnCell
+ edgesOnCellCursor => edgesOnCell
+ haloCursor => haloIndices
+ nCellsSolveCursor => nCellsSolve
+ do while(associated(indexCursor))
+ ! Determine block dimensions
+ nCellsInBlock = indexCursor % dimSizes(1)
+ nCellsInHalo = haloCursor % dimSizes(1)
+
+ ! Setup new layer's nCellsSolve
+ nCellsSolveCursor % array(iHalo+1) = nCellsInBlock + nCellsInHalo
+
+ ! Copy cell indices into indexToCellID field
+ field1dArrayHolder => indexCursor % array
+ indexCursor % dimSizes(1) = nCellsSolveCursor % array(iHalo+1)
+ allocate(indexCursor % array(indexCursor % dimSizes(1)))
+ indexCursor % array(1:nCellsInBlock) = field1dArrayHolder(:)
+ indexCursor % array(nCellsInBlock+1:nCellsSolveCursor % array(iHalo+1)) = haloCursor % array(1:nCellsInHalo)
+ deallocate(field1dArrayHolder)
+
+ ! Allocate space in nEdgesOnCell
+ field1dArrayHolder => nEdgesCursor % array
+ nEdgesCursor % dimSizes(1) = nCellsSolveCursor % array(iHalo+1)
+ allocate(nEdgesCursor % array(nEdgesCursor % dimSizes(1)))
+ nEdgesCursor % array = -1
+ nEdgesCursor % array(1:nCellsInBlock) = field1dArrayHolder(:)
+ deallocate(field1dArrayHolder)
+
+ ! Allocate space in cellsOnCell
+ field2dArrayHolder => cellsOnCellCursor % array
+ cellsOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(cellsOnCellCursor % array(cellsOnCellCursor % dimSizes(1), cellsOnCellCursor % dimSizes(2)))
+ cellsOnCellCursor % array = -1
+ cellsOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ ! Allocate space in verticesOnCell
+ field2dArrayHolder => verticesOnCellCursor % array
+ verticesOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(verticesOnCellCursor % array(verticesOnCellCursor % dimSizes(1), verticesOnCellCursor % dimSizes(2)))
+ verticesOnCellCursor % array = -1
+ verticesOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ ! Allocate space in edgesOnCell
+ field2dArrayHolder => edgesOnCellCursor % array
+ edgesOnCellCursor % dimSizes(2) = nCellsSolveCursor % array(iHalo+1)
+ allocate(edgesOnCellCursor % array(edgesOnCellCursor % dimSizes(1), edgesOnCellCursor % dimSizes(2)))
+ edgesOnCellCursor % array = -1
+ edgesOnCellCursor % array(:,1:nCellsInBlock) = field2dArrayHolder(:,:)
+ deallocate(field2dArrayHolder)
+
+ indexCursor => indexCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ cellsOnCellCursor => cellsOnCellCursor % next
+ verticesOnCellCursor => verticesOnCellCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ haloCursor => haloCursor % next
+ nCellsSolveCursor => nCellsSolveCursor % next
+ end do
+
+ ! Perform allToAll communications
+ call mpas_dmpar_alltoall_field(indexToCellID, indexToCellID, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(nEdgesOnCell, nEdgesOncell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(cellsOnCell, cellsOnCell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(verticesOnCell, verticesOnCell, sendingHaloLayers)
+ call mpas_dmpar_alltoall_field(edgesOnCell, edgesOnCell, sendingHaloLayers)
+
+ ! Deallocate haloindices field
+ call mpas_deallocate_field(haloIndices)
+ end do ! iHalo loop over nHalos
+
+ ! Deallocate array and field.
+ deallocate(sendingHaloLayers)
+ call mpas_deallocate_field(offSetField)
+
+ end subroutine mpas_block_creator_build_cell_halos!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_build_edge_halos
+!
+!> \brief Builds edge halos
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine uses the previously setup 0 and 1 edge fields and 0 halo cell fields to determine
+!> which edges fall in each halo layer for a block. During this process, each
+!> halo's exchange lists are created. This process is performed for all blocks on
+!> a processor.
+!> NOTE: This routine can be used on either edges or edges
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_build_edge_halos(indexToCellID, nEdgesOnCell, nCellsSolve, edgesOnCell, indexToEdgeID, cellsOnEdge, nEdgesSolve)!{{{
+ type (field1dInteger), pointer :: indexToCellID !< Input: indexToCellID field for all halos
+ type (field1dInteger), pointer :: nEdgesOnCell !< Input: nEdgesOnCell field for all halos
+ type (field1dInteger), pointer :: nCellsSolve !< Input: nCellsSolve field for all halos
+ type (field2dInteger), pointer :: edgesOnCell !< Input/Output: edgesOnCell field for all halos
+ type (field1dInteger), pointer :: indexToEdgeID !< Input/Output: indexToEdgeID field for halos 0 and 1, but output for all halos
+ type (field2dInteger), pointer :: cellsOnEdge !< Output: cellsOnEdge field for all halos
+ type (field1dInteger), pointer :: nEdgesSolve !< Input/Output: nEdgesSolve field for halos 0 and 1, but output for all halos
+
+ type (field0dInteger), pointer :: offSetField, edgeLimitField
+ type (field1dInteger), pointer :: haloIndices
+
+ type (field0dInteger), pointer :: offSetCursor, edgeLimitCursor
+ type (field1dInteger), pointer :: indexToCellCursor, nEdgesCursor, nCellsSolveCursor, indexToEdgeCursor, nEdgesSolveCursor, haloCursor
+ type (field2dInteger), pointer :: edgesOnCellCursor, cellsOnEdgeCursor
+
+ integer, dimension(:), pointer :: sendingHaloLayers
+ integer, dimension(:), pointer :: array1dHolder, localEdgeList
+ integer, dimension(:,:), pointer :: array2dHolder
+
+ integer :: iHalo, iBlock, i, j, k
+ integer :: nHalos, nBlocks, nCellsInBlock, nEdgesLocal, haloStart, haloEnd, haloSize
+ integer :: maxEdges, edgeDegree
+
+ type (hashtable), dimension(:), pointer :: edgeList
+
+ ! Determine dimensions
+ nHalos = config_num_halos
+ maxEdges = edgesOnCell % dimSizes(1)
+ edgeDegree = cellsOnEdge % dimSizes(1)
+
+ ! Allocate some needed arrays and fields
+ allocate(sendingHaloLayers(1))
+
+ allocate(haloIndices)
+ allocate(offSetField)
+ allocate(edgeLimitField)
+
+ nullify(haloIndices % next)
+ nullify(offSetField % next)
+ nullify(edgeLimitField % next)
+
+ ! Determine number of blocks, and setup field lists
+ ! Loop over blocks
+ nBlocks = 0
+ indexToEdgeCursor => indexToEdgeID
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ edgeLimitCursor => edgeLimitField
+ nEdgesSolveCursor => nEdgesSolve
+ do while(associated(indexToEdgeCursor))
+ nBlocks = nBlocks + 1
+
+ ! Setup edgeLimit and offSet
+ edgeLimitCursor % scalar = nEdgesSolveCursor % array(1)
+ offSetCursor % scalar = nEdgesSolveCursor % array(2)
+
+ ! Link blocks
+ edgeLimitCursor % block => indexToEdgeCursor % block
+ offSetCursor % block => indexToEdgeCursor % block
+ haloCursor % block => indexToEdgeCursor % block
+
+ ! Nullify ioinfo
+ nullify(edgeLimitCursor % ioinfo)
+ nullify(offSetCursor % ioinfo)
+ nullify(haloCursor % ioinfo)
+
+ ! Link exchange lists
+ haloCursor % sendList => indexToEdgeCursor % sendList
+ haloCursor % recvList => indexToEdgeCursor % recvList
+ haloCursor % copyList => indexToEdgeCursor % copyList
+
+ ! Advance cursors and create new blocks if needed
+ indexToEdgeCursor => indexToEdgeCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ if(associated(indexToEdgeCursor)) then
+ allocate(haloCursor % next)
+ haloCursor => haloCursor % next
+
+ allocate(offSetCursor % next)
+ offSetCursor => offSetCursor % next
+
+ allocate(edgeLimitCursor % next)
+ edgeLimitCursor =>edgeLimitCursor % next
+ end if
+
+ ! Nullify next pointers
+ nullify(haloCursor % next)
+ nullify(offSetCursor % next)
+ nullify(edgeLimitCursor % next)
+ end do
+
+ ! Allocate and initialize hashtables
+ allocate(edgeList(nBlocks))
+ do iBlock = 1, nBlocks
+ call mpas_hash_init(edgeList(iBlock))
+ end do
+
+ ! Build unique 0 and 1 halo list for each block
+ indexToEdgeCursor => indexToEdgeID
+ do while(associated(indexToEdgeCursor))
+ iBlock = indexToEdgeCursor % block % localBlockID + 1
+
+ do i = 1, indexToEdgeCursor % dimSizes(1)
+ if(.not. mpas_hash_search(edgeList(iBlock), indexToEdgeCursor % array(i))) then
+ call mpas_hash_insert(edgeList(iBlock), indexToEdgeCursor % array(i))
+ end if
+ end do
+
+ indexToEdgeCursor => indexToEdgeCursor % next
+ end do
+
+ ! Append new unique edge id's to indexToEdgeID field.
+ do iHalo = 3, nHalos+2
+ sendingHaloLayers(1) = iHalo-1
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID
+ nEdgesCursor => nEdgesOnCell
+ nCellsSolveCursor => nCellsSolve
+ edgesOnCellCursor => edgesOnCell
+ nEdgesSolveCursor => nEdgesSolve
+ haloCursor => haloIndices
+ offSetCursor => offSetField
+ do while(associated(indexToEdgeCursor))
+ iBlock = indexToEdgeCursor % block % localBlockID+1
+ nCellsInBlock = nCellsSolveCursor % array(iHalo-1)
+ offSetCursor % scalar = nEdgesSolveCursor % array(iHalo-1)
+
+ ! Determine all edges in block
+ call mpas_block_decomp_all_edges_in_block(maxEdges, nCellsInBlock, nEdgesCursor % array, edgesOnCellCursor % array, nEdgesLocal, localEdgeList)
+
+ nEdgesSolveCursor % array(iHalo) = nEdgesLocal
+ haloSize = nEdgesLocal - nEdgesSolveCursor % array(iHalo-1)
+ haloCursor % dimSizes(1) = haloSize
+
+ allocate(haloCursor % array(haloCursor % dimSizes(1)))
+
+ ! Add all edges into block, and figure out which are new edges meaning they belong to the new halo layer
+ j = 1
+ do i = 1, nEdgesLocal
+ if(.not. mpas_hash_search(edgeList(iBlock), localEdgeList(i))) then
+ call mpas_hash_insert(edgeList(iBlock), localEdgeList(i))
+ haloCursor % array(j) = localEdgeList(i)
+ j = j + 1
+ end if
+ end do
+
+ deallocate(localEdgeList)
+
+ ! Advance Cursors
+ indexToEdgeCursor => indexToEdgeCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ nCellsSolveCursor => nCellsSolveCursor % next
+ edgesOnCellCursor => edgesOnCellCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ haloCursor => haloCursor % next
+ offSetCursor => offSetCursor % next
+ end do
+
+ ! Build current layers exchange list
+ call mpas_dmpar_get_exch_list(iHalo-1, indexToEdgeID, haloIndices, offSetField, edgeLimitField)
+
+ ! Loop over blocks
+ indexToEdgeCursor => indexToEdgeID
+ cellsOnEdgeCursor => cellsOnEdge
+ nEdgesSolveCursor => nEdgesSolve
+ haloCursor => haloIndices
+ do while(associated(indexToEdgeCursor))
+ ! Copy in new halo indices
+ array1dHolder => indexToEdgeCursor % array
+ indexToEdgeCursor % dimSizes(1) = nEdgesSolveCursor % array(iHalo)
+ allocate(indexToEdgeCursor % array(indexToEdgeCursor % dimSizes(1)))
+ indexToEdgeCursor % array(1:nEdgesSolveCursor % array(iHalo-1)) = array1dHolder(:)
+ indexToEdgeCursor % array(nEdgesSolveCursor % array(iHalo-1)+1:nEdgesSolveCursor % array(iHalo)) = haloCursor % array(:)
+ deallocate(array1dHolder)
+
+ ! Allocate space in cellsOnEdge
+ array2dHolder => cellsOnEdgeCursor % array
+ cellsOnEdgeCursor % dimSizes(2) = nEdgesSolveCursor % array(iHalo)
+ allocate(cellsOnEdgeCursor % array(cellsOnEdgeCursor % dimSizes(1), cellsOnEdgeCursor % dimSizes(2)))
+ cellsOnEdgeCursor % array(:,1:nEdgesSolveCursor % array(iHalo-1)) = array2dHolder(:,:)
+ deallocate(array2dHolder)
+
+ ! Deallocate haloCursor array
+ deallocate(haloCursor % array)
+
+ ! Advance cursors
+ indexToEdgeCursor => indexToEdgeCursor % next
+ cellsOnEdgeCursor => cellsOnEdgeCursor % next
+ nEdgesSolveCursor => nEdgesSolveCursor % next
+ haloCursor => haloCursor % next
+ end do
+
+ ! Performe allToAll communication
+ call mpas_dmpar_alltoall_field(cellsOnEdge, cellsOnEdge, sendingHaloLayers)
+ end do
+
+ ! Deallocate fields, hashtables, and arrays
+ call mpas_deallocate_field(haloIndices)
+ call mpas_deallocate_field(edgeLimitField)
+ call mpas_deallocate_field(offSetField)
+ do iBlock=1,nBlocks
+ call mpas_hash_destroy(edgeList(iBlock))
+ end do
+ deallocate(edgeList)
+ deallocate(sendingHaloLayers)
+
+
+ end subroutine mpas_block_creator_build_edge_halos!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_finalize_block_init
+!
+!> \brief Finalize block creation
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine finalizes the block initialization processor. It calls
+!> mpas_block_allocate to allocate space for all fields in a block. Then the 0
+!> halo indices for each element and the exchange lists are copied into the
+!> appropriate block. A halo update is required after this routien is called
+!> to make sure all data in a block is valid.
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_finalize_block_init(blocklist, & !{{{
+#include "dim_dummy_args.inc"
+ , nCellsSolve, nEdgesSolve, nVerticesSolve, indexToCellID, indexToEdgeID, indexToVertexID)
+ type (block_type), pointer :: blocklist !< Input/Output: Linked List of blocks
+#include "dim_dummy_decls_inout.inc"
+ type (field1dInteger), pointer :: nCellsSolve !< Input: nCellsSolve field information
+ type (field1dInteger), pointer :: nEdgesSolve !< Input: nEdgesSolve field information
+ type (field1dInteger), pointer :: nVerticesSolve !< Input: nVerticesSolve field information
+ type (field1dInteger), pointer :: indexToCellID !< Input: indexToCellID field information
+ type (field1dInteger), pointer :: indexToEdgeID !< Input: indexToEdgeID field information
+ type (field1dInteger), pointer :: indexToVertexID !< Input: indexToVertexID field information
+
+ type (domain_type), pointer :: domain
+
+ type (block_type), pointer :: block_ptr
+ type (field1dInteger), pointer :: nCellsCursor, nEdgesCursor, nVerticesCursor
+ type (field1dInteger), pointer :: indexToCellCursor, indexToEdgeCursor, indexToVertexCursor
+
+ integer :: nHalos
+ integer :: nCellsSolve_0Halo, nVerticesSolve_0Halo, nEdgesSolve_0Halo
+ integer :: blockID, localBlockID
+
+ nHalos = config_num_halos
+ domain => blocklist % domain
+
+ ! Loop over blocks
+ block_ptr => blocklist
+ nCellsCursor => nCellsSolve
+ nEdgesCursor => nEdgesSolve
+ nVerticesCursor => nVerticesSolve
+ indexToCellCursor => indexToCellID
+ indexToEdgeCursor => indexToEdgeID
+ indexToVertexCursor => indexToVertexID
+ do while(associated(block_ptr))
+ ! Determine block dimensions
+ nCells = nCellsCursor % array(nHalos+1)
+ nEdges = nEdgesCursor % array(nHalos+2)
+ nVertices = nVerticesCursor % array(nHalos+2)
+
+ nCellsSolve_0Halo = nCellsCursor % array(1)
+ nEdgesSolve_0Halo = nEdgesCursor % array(1)
+ nVerticesSolve_0Halo = nVerticesCursor % array(1)
+
+ ! Determine block IDs
+ blockID = block_ptr % blockID
+ localBlockID = block_ptr % localBlockID
+
+ ! Allocate fields in block
+ call mpas_allocate_block(nHalos, block_ptr, domain, blockID, &
+#include "dim_dummy_args.inc"
+ )
+
+ allocate(block_ptr % mesh % nCellsArray(0:nHalos))
+ allocate(block_ptr % mesh % nEdgesArray(0:nHalos+1))
+ allocate(block_ptr % mesh % nVerticesArray(0:nHalos+1))
+
+ block_ptr % mesh % nCellsArray(:) = nCellsCursor % array(:)
+ block_ptr % mesh % nEdgesArray(:) = nEdgesCursor % array(:)
+ block_ptr % mesh % nVerticesArray(:) = nVerticesCursor % array(:)
+
+ ! Set block's local id
+ block_ptr % localBlockID = localBlockID
+
+ ! Set block's *Solve dimensions
+ block_ptr % mesh % nCellsSolve = nCellsSolve_0Halo
+ block_ptr % mesh % nEdgesSolve = nEdgesSolve_0Halo
+ block_ptr % mesh % nVerticesSolve = nVerticesSolve_0Halo
+
+ ! Set block's 0 halo indices
+ block_ptr % mesh % indexToCellID % array(1:nCellsSolve_0Halo) = indexToCellCursor % array(1:nCellsSolve_0Halo)
+ block_ptr % mesh % indexToEdgeID % array(1:nEdgesSolve_0Halo) = indexToEdgeCursor % array(1:nEdgesSolve_0Halo)
+ block_ptr % mesh % indexToVertexID % array(1:nVerticesSolve_0Halo) = indexToVertexCursor % array(1:nVerticesSolve_0Halo)
+
+ ! Set block's exchange lists and nullify unneeded exchange lists
+ block_ptr % parinfo % cellsToSend => indexToCellCursor % sendList
+ block_ptr % parinfo % cellsToRecv => indexToCellCursor % recvList
+ block_ptr % parinfo % cellsToCopy => indexToCellCursor % copyList
+ nullify(indexToCellCursor % sendList)
+ nullify(indexToCellCursor % recvList)
+ nullify(indexToCellCursor % copyList)
+
+ block_ptr % parinfo % edgesToSend => indexToEdgeCursor % sendList
+ block_ptr % parinfo % edgesToRecv => indexToEdgeCursor % recvList
+ block_ptr % parinfo % edgesToCopy => indexToEdgeCursor % copyList
+ nullify(indexToEdgeCursor % sendList)
+ nullify(indexToEdgeCursor % recvList)
+ nullify(indexToEdgeCursor % copyList)
+
+ block_ptr % parinfo % verticesToSend => indexToVertexCursor % sendList
+ block_ptr % parinfo % verticesToRecv => indexToVertexCursor % recvList
+ block_ptr % parinfo % verticesToCopy => indexToVertexCursor % copyList
+ nullify(indexToVertexCursor % sendList)
+ nullify(indexToVertexCursor % recvList)
+ nullify(indexToVertexCursor % copyList)
+
+ ! Advance cursors
+ block_ptr => block_ptr % next
+ nCellsCursor => nCellsCursor % next
+ nEdgesCursor => nEdgesCursor % next
+ nVerticesCursor => nVerticesCursor % next
+ indexToCellCursor => indexToCellCursor % next
+ indexToEdgeCursor => indexToEdgeCursor % next
+ indexToVertexCursor => indextoVertexcursor % next
+ end do
+
+ ! Link fields between blocks
+ block_ptr => blocklist
+ do while(associated(block_ptr))
+ call mpas_create_field_links(block_ptr)
+
+ block_ptr => block_ptr % next
+ end do
+ end subroutine mpas_block_creator_finalize_block_init!}}}
+
+!***********************************************************************
+!
+! routine mpas_block_creator_reindex_block_fields
+!
+!> \brief Reindex mesh connectivity arrays
+!> \author Doug Jacobsen
+!> \date 05/31/12
+!> \version SVN:$Id$
+!> \details
+!> This routine re-indexes the connectivity arrays for the mesh data
+!> structure. Prior to this routine, all indices are given as global index (which
+!> can later be found in the indexTo* arrays). After this routine is called,
+!> indices are provided as local indices now (1:nCells+1 ... etc).
+!
+!-----------------------------------------------------------------------
+
+ subroutine mpas_block_creator_reindex_block_fields(blocklist)!{{{
+ type (block_type), pointer :: blocklist !< Input/Output: Linked list of blocks
+
+ type (block_type), pointer :: block_ptr
+
+ integer :: i, j, k
+ integer, dimension(:,:), pointer :: cellIDSorted, edgeIDSorted, vertexIDSorted
+
+ ! Loop over blocks
+ block_ptr => blocklist
+ do while(associated(block_ptr))
+ !
+ ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
+ !
+ allocate(cellIDSorted(2, block_ptr % mesh % nCells))
+ allocate(edgeIDSorted(2, block_ptr % mesh % nEdges))
+ allocate(vertexIDSorted(2, block_ptr % mesh % nVertices))
+
+ do i=1,block_ptr % mesh % nCells
+ cellIDSorted(1,i) = block_ptr % mesh % indexToCellID % array(i)
+ cellIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nCells, cellIDSorted)
+
+ do i=1,block_ptr % mesh % nEdges
+ edgeIDSorted(1,i) = block_ptr % mesh % indexToEdgeID % array(i)
+ edgeIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nEdges, edgeIDSorted)
+
+ do i=1,block_ptr % mesh % nVertices
+ vertexIDSorted(1,i) = block_ptr % mesh % indexToVertexID % array(i)
+ vertexIDSorted(2,i) = i
+ end do
+ call mpas_quicksort(block_ptr % mesh % nVertices, vertexIDSorted)
+
+
+ do i=1,block_ptr % mesh % nCells
+ do j=1,block_ptr % mesh % nEdgesOnCell % array(i)
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnCell % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnCell % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, block_ptr % mesh % nVertices, &
+ block_ptr % mesh % verticesOnCell % array(j,i))
+ if (k <= block_ptr % mesh % nVertices) then
+ block_ptr % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
+ else
+ block_ptr % mesh % verticesOnCell % array(j,i) = block_ptr % mesh % nVertices + 1
+ end if
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nEdges
+ do j=1,2
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnEdge % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, block_ptr % mesh % nVertices, &
+ block_ptr % mesh % verticesOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nVertices) then
+ block_ptr % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
+ else
+ block_ptr % mesh % verticesOnEdge % array(j,i) = block_ptr % mesh % nVertices + 1
+ end if
+
+ end do
+
+ do j=1,block_ptr % mesh % nEdgesOnEdge % array(i)
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnEdge % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnEdge % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nVertices
+ do j=1,block_ptr % mesh % vertexDegree
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, block_ptr % mesh % nCells, &
+ block_ptr % mesh % cellsOnVertex % array(j,i))
+ if (k <= block_ptr % mesh % nCells) then
+ block_ptr % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
+ else
+ block_ptr % mesh % cellsOnVertex % array(j,i) = block_ptr % mesh % nCells + 1
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, block_ptr % mesh % nEdges, &
+ block_ptr % mesh % edgesOnVertex % array(j,i))
+ if (k <= block_ptr % mesh % nEdges) then
+ block_ptr % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
+ else
+ block_ptr % mesh % edgesOnVertex % array(j,i) = block_ptr % mesh % nEdges + 1
+ end if
+ end do
+ end do
+
+ deallocate(cellIDSorted)
+ deallocate(edgeIDSorted)
+ deallocate(vertexIDSorted)
+
+ block_ptr => block_ptr % next
+ end do
+
+ end subroutine mpas_block_creator_reindex_block_fields!}}}
+
+end module mpas_block_creator
Modified: branches/dcmip/src/framework/mpas_block_decomp.F
===================================================================
--- branches/dcmip/src/framework/mpas_block_decomp.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_block_decomp.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -1,8 +1,11 @@
module mpas_block_decomp
use mpas_dmpar
+ use mpas_dmpar_types
use mpas_hash
use mpas_sort
+ use mpas_grid_types
+ use mpas_configure
type graph
integer :: nVerticesTotal
@@ -45,6 +48,10 @@
integer, dimension(:), pointer :: local_nvertices
character (len=StrKIND) :: filename
+ logical :: no_blocks
+
+ no_blocks = .false.
+
if(config_number_of_blocks == 0) then
total_blocks = dminfo % nProcs
else
@@ -139,72 +146,84 @@
call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
global_start, local_nvertices, global_block_list, local_block_list)
end if
-
- allocate(sorted_local_cell_list(2, local_nvertices(dminfo % my_proc_id + 1)))
- allocate(block_id(blocks_per_proc))
- allocate(block_start(blocks_per_proc))
- allocate(block_count(blocks_per_proc))
- do i = 1, blocks_per_proc
- block_start = 0
- block_count = 0
- end do
+ if(blocks_per_proc == 0) then
+ no_blocks = .true.
+ blocks_per_proc = 1
+ end if
- do i = 1,local_nvertices(dminfo % my_proc_id +1)
- call mpas_get_local_block_id(dminfo, local_block_list(i), local_block_id)
-
- block_id(local_block_id+1) = local_block_list(i)
-
- sorted_local_cell_list(1, i) = local_block_list(i)
- sorted_local_cell_list(2, i) = local_cell_list(i)
-
- block_count(local_block_id+1) = block_count(local_block_id+1) + 1
- end do
+ if(no_blocks) then
+ allocate(block_id(blocks_per_proc))
+ allocate(block_start(blocks_per_proc))
+ allocate(block_count(blocks_per_proc))
- call quicksort(local_nvertices(dminfo % my_proc_id + 1), sorted_local_cell_list)
+ block_id(1) = config_number_of_blocks + 1
+ block_start(1) = 0
+ block_count(1) = 0
+ else
+ allocate(sorted_local_cell_list(2, local_nvertices(dminfo % my_proc_id + 1)))
+ allocate(block_id(blocks_per_proc))
+ allocate(block_start(blocks_per_proc))
+ allocate(block_count(blocks_per_proc))
+
+ do i = 1, blocks_per_proc
+ block_start = 0
+ block_count = 0
+ end do
+
+ do i = 1,local_nvertices(dminfo % my_proc_id +1)
+ call mpas_get_local_block_id(dminfo, local_block_list(i), local_block_id)
+
+ block_id(local_block_id+1) = local_block_list(i)
+
+ sorted_local_cell_list(1, i) = local_block_list(i)
+ sorted_local_cell_list(2, i) = local_cell_list(i)
+
+ block_count(local_block_id+1) = block_count(local_block_id+1) + 1
+ end do
+
+ call mpas_quicksort(local_nvertices(dminfo % my_proc_id + 1), sorted_local_cell_list)
+
+ do i = 1, local_nvertices(dminfo % my_proc_id+1)
+ local_cell_list(i) = sorted_local_cell_list(2, i)
+ end do
+
+ do i = 2,blocks_per_proc
+ block_start(i) = block_start(i-1) + block_count(i-1)
+ end do
- do i = 1, local_nvertices(dminfo % my_proc_id+1)
- local_cell_list(i) = sorted_local_cell_list(2, i)
- end do
+ deallocate(sorted_local_cell_list)
+ deallocate(local_block_list)
+ deallocate(local_nvertices)
+ deallocate(global_start)
+ deallocate(global_cell_list)
+ deallocate(global_block_list)
+ end if
+ else
- do i = 2,blocks_per_proc
- block_start(i) = block_start(i-1) + block_count(i-1)
- end do
-
- !dwj 01/31/12 debugging multiple blocks
-! do i=1,local_nvertices(dminfo % my_proc_id +1)
-! call mpas_get_local_block_id(dminfo, sorted_local_cell_list(1, i), local_block_id)
-! write(*,*) sorted_local_cell_list(1, i), local_block_id, sorted_local_cell_list(2,i)
-! end do
-
- deallocate(sorted_local_cell_list)
- deallocate(local_block_list)
- deallocate(local_nvertices)
- deallocate(global_start)
- deallocate(global_cell_list)
- deallocate(global_block_list)
- else
- allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
- allocate(block_id(1))
- allocate(block_start(1))
- allocate(block_count(1))
- block_id(1) = 0
- block_start(1) = 0
- block_count(1) = size(local_cell_list)
- do i=1,size(local_cell_list)
- local_cell_list(i) = i
- end do
+ if (dminfo % my_proc_id == IO_NODE) then
+ allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
+ allocate(block_id(1))
+ allocate(block_start(1))
+ allocate(block_count(1))
+ block_id(1) = 0
+ block_start(1) = 0
+ block_count(1) = size(local_cell_list)
+ do i=1,size(local_cell_list)
+ local_cell_list(i) = i
+ end do
+ else
+ allocate(local_cell_list(1))
+ allocate(block_id(1))
+ allocate(block_start(1))
+ allocate(block_count(1))
+ local_cell_list(1) = 0
+ block_id(1) = config_number_of_blocks + 1
+ block_start(1) = 0
+ block_count(1) = 0
+ end if
end if
- !dwj 01/31/12 debugging multiple blocks
-! write(*,*) 'Blocks per proc = ', blocks_per_proc, 'total_blocks = ', total_blocks
-
-! do i=1,blocks_per_proc
-! write(*,*) block_id(i), block_start(i), block_count(i)
-! end do
-
-! call mpas_dmpar_abort(dminfo)
-
end subroutine mpas_block_decomp_cells_for_proc!}}}
subroutine mpas_block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)!{{{
@@ -395,17 +414,37 @@
integer, intent(out) :: blocks_per_proc !< Output: Number of blocks proc_number computes on
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+ integer :: i, owning_proc, local_block_id
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
+ if(.not. explicitDecomp) then
+ if(total_blocks > dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ blocks_per_proc = blocks_per_proc_min
+
+ if(proc_number < remaining_blocks) then
+ blocks_per_proc = blocks_per_proc + 1
+ end if
+ else
+ if(dminfo % my_proc_id < total_blocks) then
+ blocks_per_proc = 1
+ else
+ blocks_per_proc = 0
+ end if
+ end if
+ else
+ blocks_per_proc = 0
+ do i = 1, total_blocks
+ call mpas_get_owning_proc(dminfo, i, owning_proc)
+ if(owning_proc == proc_number) then
+ call mpas_get_local_block_id(dminfo, i, local_block_id)
+ blocks_per_proc = max(blocks_per_proc, local_block_id)
+ end if
+ end do
+ end if
- blocks_per_proc = blocks_per_proc_min
-
- if(proc_number .le. remaining_blocks) then
- block_per_proc = blocks_per_proc + 1
- endif
-
end subroutine mpas_get_blocks_per_proc!}}}
subroutine mpas_get_local_block_id(dminfo, global_block_number, local_block_number)!{{{
@@ -416,14 +455,18 @@
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
if(.not.explicitDecomp) then
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
-
- if(global_block_number > even_blocks) then
- local_block_number = blocks_per_proc_min
+ if(total_blocks > dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
+
+ if(global_block_number > even_blocks) then
+ local_block_number = blocks_per_proc_min
+ else
+ local_block_number = mod(global_block_number, blocks_per_proc_min)
+ end if
else
- local_block_number = mod(global_block_number, blocks_per_proc_min)
+ local_block_number = 0
end if
else
local_block_number = block_local_id_list(global_block_number+1)
@@ -438,14 +481,18 @@
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
if(.not.explicitDecomp) then
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
+ if(total_blocks >= dminfo % nProcs) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
- if(global_block_number > even_blocks) then
- owning_proc = global_block_number - even_blocks
+ if(global_block_number > even_blocks) then
+ owning_proc = global_block_number - even_blocks
+ else
+ owning_proc = global_block_number / blocks_per_proc_min
+ end if
else
- owning_proc = global_block_number / blocks_per_proc_min
+ owning_proc = global_block_number
end if
else
owning_proc = block_proc_list(global_block_number+1)
Modified: branches/dcmip/src/framework/mpas_dmpar.F
===================================================================
--- branches/dcmip/src/framework/mpas_dmpar.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_dmpar.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -3,6 +3,7 @@
use mpas_dmpar_types
use mpas_grid_types
use mpas_sort
+ use mpas_hash
#ifdef _MPI
include 'mpif.h'
@@ -18,7 +19,6 @@
integer, parameter :: IO_NODE = 0
integer, parameter :: BUFSIZE = 6000
-
interface mpas_dmpar_alltoall_field
module procedure mpas_dmpar_alltoall_field1d_integer
module procedure mpas_dmpar_alltoall_field2d_integer
@@ -50,12 +50,26 @@
private :: mpas_dmpar_exch_halo_field2d_real
private :: mpas_dmpar_exch_halo_field3d_real
+ interface mpas_dmpar_copy_field
+ module procedure mpas_dmpar_copy_field1d_integer
+ module procedure mpas_dmpar_copy_field2d_integer
+ module procedure mpas_dmpar_copy_field3d_integer
+ module procedure mpas_dmpar_copy_field1d_real
+ module procedure mpas_dmpar_copy_field2d_real
+ module procedure mpas_dmpar_copy_field3d_real
+ end interface
+ private :: mpas_dmpar_copy_field1d_integer
+ private :: mpas_dmpar_copy_field2d_integer
+ private :: mpas_dmpar_copy_field3d_integer
+ private :: mpas_dmpar_copy_field1d_real
+ private :: mpas_dmpar_copy_field2d_real
+ private :: mpas_dmpar_copy_field3d_real
+
contains
+ subroutine mpas_dmpar_init(dminfo, mpi_comm)!{{{
- subroutine mpas_dmpar_init(dminfo, mpi_comm)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -94,11 +108,10 @@
dminfo % using_external_comm = .false.
#endif
- end subroutine mpas_dmpar_init
+ end subroutine mpas_dmpar_init!}}}
+ subroutine mpas_dmpar_finalize(dminfo)!{{{
- subroutine mpas_dmpar_finalize(dminfo)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -111,11 +124,10 @@
end if
#endif
- end subroutine mpas_dmpar_finalize
+ end subroutine mpas_dmpar_finalize!}}}
+ subroutine mpas_dmpar_abort(dminfo)!{{{
- subroutine mpas_dmpar_abort(dminfo)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -128,11 +140,10 @@
stop
- end subroutine mpas_dmpar_abort
+ end subroutine mpas_dmpar_abort!}}}
+ subroutine mpas_dmpar_global_abort(mesg)!{{{
- subroutine mpas_dmpar_global_abort(mesg)
-
implicit none
character (len=*), intent(in) :: mesg
@@ -147,11 +158,10 @@
write(0,*) trim(mesg)
stop
- end subroutine mpas_dmpar_global_abort
+ end subroutine mpas_dmpar_global_abort!}}}
+ subroutine mpas_dmpar_bcast_int(dminfo, i)!{{{
- subroutine mpas_dmpar_bcast_int(dminfo, i)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -163,11 +173,10 @@
call MPI_Bcast(i, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_int
+ end subroutine mpas_dmpar_bcast_int!}}}
+ subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)!{{{
- subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -180,11 +189,10 @@
call MPI_Bcast(iarray, n, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_ints
+ end subroutine mpas_dmpar_bcast_ints!}}}
+ subroutine mpas_dmpar_bcast_real(dminfo, r)!{{{
- subroutine mpas_dmpar_bcast_real(dminfo, r)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -196,11 +204,10 @@
call MPI_Bcast(r, 1, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_real
+ end subroutine mpas_dmpar_bcast_real!}}}
+ subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)!{{{
- subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -213,11 +220,10 @@
call MPI_Bcast(rarray, n, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_reals
+ end subroutine mpas_dmpar_bcast_reals!}}}
+ subroutine mpas_dmpar_bcast_logical(dminfo, l)!{{{
- subroutine mpas_dmpar_bcast_logical(dminfo, l)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -244,11 +250,10 @@
end if
#endif
- end subroutine mpas_dmpar_bcast_logical
+ end subroutine mpas_dmpar_bcast_logical!}}}
+ subroutine mpas_dmpar_bcast_char(dminfo, c)!{{{
- subroutine mpas_dmpar_bcast_char(dminfo, c)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -260,11 +265,10 @@
call MPI_Bcast(c, len(c), MPI_CHARACTER, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_char
+ end subroutine mpas_dmpar_bcast_char!}}}
+ subroutine mpas_dmpar_sum_int(dminfo, i, isum)!{{{
- subroutine mpas_dmpar_sum_int(dminfo, i, isum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -279,11 +283,10 @@
isum = i
#endif
- end subroutine mpas_dmpar_sum_int
+ end subroutine mpas_dmpar_sum_int!}}}
+ subroutine mpas_dmpar_sum_real(dminfo, r, rsum)!{{{
- subroutine mpas_dmpar_sum_real(dminfo, r, rsum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -298,11 +301,10 @@
rsum = r
#endif
- end subroutine mpas_dmpar_sum_real
+ end subroutine mpas_dmpar_sum_real!}}}
+ subroutine mpas_dmpar_min_int(dminfo, i, imin)!{{{
- subroutine mpas_dmpar_min_int(dminfo, i, imin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -317,11 +319,10 @@
imin = i
#endif
- end subroutine mpas_dmpar_min_int
+ end subroutine mpas_dmpar_min_int!}}}
+ subroutine mpas_dmpar_min_real(dminfo, r, rmin)!{{{
- subroutine mpas_dmpar_min_real(dminfo, r, rmin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -336,11 +337,10 @@
rmin = r
#endif
- end subroutine mpas_dmpar_min_real
+ end subroutine mpas_dmpar_min_real!}}}
+ subroutine mpas_dmpar_max_int(dminfo, i, imax)!{{{
- subroutine mpas_dmpar_max_int(dminfo, i, imax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -355,11 +355,10 @@
imax = i
#endif
- end subroutine mpas_dmpar_max_int
+ end subroutine mpas_dmpar_max_int!}}}
+ subroutine mpas_dmpar_max_real(dminfo, r, rmax)!{{{
- subroutine mpas_dmpar_max_real(dminfo, r, rmax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -374,11 +373,10 @@
rmax = r
#endif
- end subroutine mpas_dmpar_max_real
+ end subroutine mpas_dmpar_max_real!}}}
+ subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -394,10 +392,9 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_int_array
+ end subroutine mpas_dmpar_sum_int_array!}}}
-
- subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)
+ subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)!{{{
implicit none
@@ -414,11 +411,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_int_array
+ end subroutine mpas_dmpar_min_int_array!}}}
+ subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -434,11 +430,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_int_array
+ end subroutine mpas_dmpar_max_int_array!}}}
+ subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -454,11 +449,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_real_array
+ end subroutine mpas_dmpar_sum_real_array!}}}
+ subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -474,11 +468,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_real_array
+ end subroutine mpas_dmpar_min_real_array!}}}
+ subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -494,11 +487,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_real_array
+ end subroutine mpas_dmpar_max_real_array!}}}
+ subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)!{{{
- subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -513,10 +505,9 @@
call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_scatter_ints
+ end subroutine mpas_dmpar_scatter_ints!}}}
-
- subroutine mpas_dmpar_get_index_range(dminfo, &
+ subroutine mpas_dmpar_get_index_range(dminfo, &!{{{
global_start, global_end, &
local_start, local_end)
@@ -529,10 +520,9 @@
local_start = nint(real(dminfo % my_proc_id) * real(global_end - global_start + 1) / real(dminfo % nprocs)) + 1
local_end = nint(real(dminfo % my_proc_id + 1) * real(global_end - global_start + 1) / real(dminfo % nprocs))
- end subroutine mpas_dmpar_get_index_range
+ end subroutine mpas_dmpar_get_index_range!}}}
-
- subroutine mpas_dmpar_compute_index_range(dminfo, &
+ subroutine mpas_dmpar_compute_index_range(dminfo, &!{{{
local_start, local_end, &
global_start, global_end)
@@ -566,1630 +556,4078 @@
end if
- end subroutine mpas_dmpar_compute_index_range
+ end subroutine mpas_dmpar_compute_index_range!}}}
+ ! ----- NEW ROUTINES BELOW ----- !
- subroutine mpas_dmpar_get_owner_list(dminfo, &
- nOwnedList, nNeededList, &
- ownedList, neededList, &
- sendList, recvList, inOffset)
+subroutine mpas_dmpar_get_exch_list(haloLayer, ownedListField, neededListField, offsetListField, ownedLimitField)!{{{
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nOwnedList, nNeededList
- integer, dimension(nOwnedList), intent(in) :: ownedList
- integer, dimension(nNeededList), intent(in) :: neededList
- type (exchange_list), pointer :: sendList
- type (exchange_list), pointer :: recvList
- integer, optional :: inOffset
+ integer, intent(in) :: haloLayer
+ type (field1dInteger), pointer :: ownedListField, neededListField
+ type (field0dInteger), pointer, optional :: offsetListField
+ type (field0dInteger), pointer, optional :: ownedLimitField
- integer :: i, j, k, kk
+ type (dm_info), pointer :: dminfo
+
+ integer :: i, j, k, kk, iBlock
integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc, offset
- integer :: numToSend, numToRecv
- integer, dimension(nOwnedList) :: recipientList
- integer, dimension(2,nOwnedList) :: ownedListSorted
+ integer :: totalSent, totalRecv
+ integer, allocatable, dimension(:) :: numToSend, numToRecv
+ integer, allocatable, dimension(:) :: ownedList, ownedListIndex, ownedBlock, neededList, neededListIndex, neededBlock
+ integer, allocatable, dimension(:) :: offsetList, ownedLimitList
+ integer, allocatable, dimension(:,:) :: ownedListSorted, ownedBlockSorted, recipientList
integer, allocatable, dimension(:) :: ownerListIn, ownerListOut
- type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer, allocatable, dimension(:) :: packingOrder
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (field1dInteger), pointer :: fieldCursor, fieldCursor2
+ type (field0dInteger), pointer :: offsetCursor, ownedLimitCursor
+ integer :: nOwnedBlocks, nNeededBlocks
+ integer :: nOwnedList, nNeededList
integer :: mpi_ierr, mpi_rreq, mpi_sreq
+ type (hashtable) :: neededHash
+ integer :: nUniqueNeededList
+ integer, dimension(:,:), pointer :: uniqueSortedNeededList
+
+ !
+ ! *** NB: This code assumes that block % blockID values are local block IDs and are in the range [1, numBlocks]
+ ! where numBlocks is the number of blocks owned by each task
+ !
+
+
+ ! For the ownedListField:
+ ! - ownedList contains a list of the global indices owned by all blocks
+ ! - ownedListIndex contains a list of the block-local indices of the global indices owned by all blocks
+ ! - ownedBlock contains the local block ID associated with each index
+ !
+ ! Example:
+ ! ownedList := ( 21 13 15 01 05 06 33 42 44 45 ) ! Global indices from all blocks on this task
+ ! ownedListIndex := ( 1 2 3 4 1 2 3 4 5 6 ) ! Local indices of global indices on each block
+ ! ownedBlock := ( 1 1 1 1 2 2 2 2 2 2 ) ! Local indices of global indices on each block
+ !
+
+ ! For the neededListField:
+ ! similar to the owneListField...
+
+ dminfo => ownedListField % block % domain % dminfo
+
+ !
+ ! Determine total number of owned blocks on this task
+ !
+ nOwnedBlocks = 0
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ nOwnedBlocks = nOwnedBlocks + 1
+ if(associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % sendList % halos(haloLayer) % exchList)
+ end if
+
+ if(associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % copyList % halos(haloLayer) % exchList)
+ end if
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Determine total number of needed indices on this task
+ !
+ nNeededList = 0
+ nNeededBlocks = 0
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ nNeededBlocks = nNeededBlocks + 1
+ nNeededList = nNeededList + fieldCursor % dimSizes(1)
+ if(associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then
+ call mpas_dmpar_destroy_exchange_list(fieldCursor % recvList % halos(haloLayer) % exchList)
+ end if
+
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Determine unique list of needed elements.
+ !
+ nUniqueNeededList = 0
+ call mpas_hash_init(neededHash)
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do i = 1, fieldCursor % dimSizes(1)
+ if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then
+ nUniqueNeededList = nUniqueNeededList + 1
+ call mpas_hash_insert(neededHash, fieldCursor % array(i))
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ kk = mpas_hash_size(neededHash)
+
+ nUniqueNeededList = mpas_hash_size(neededHash)
+ allocate(uniqueSortedNeededList(2,nUniqueNeededList))
+ allocate(packingOrder(nUniqueNeededList))
+ call mpas_hash_destroy(neededHash)
+ call mpas_hash_init(neededHash)
+
+ j = 0
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do i = 1, fieldCursor % dimSizes(1)
+ if(.not. mpas_hash_search(neededHash, fieldCursor % array(i))) then
+ j = j +1
+ uniqueSortedNeededList(1, j) = fieldCursor % array(i)
+ uniqueSortedNeededList(2, j) = fieldCursor % block % localBlockID
+ call mpas_hash_insert(neededHash, fieldCursor % array(i))
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ kk = mpas_hash_size(neededHash)
+
+ call mpas_hash_destroy(neededHash)
+ call mpas_quicksort(nUniqueNeededList, uniqueSortedNeededList)
+
+ !
+ ! Get list of index offsets for all blocks
+ !
+ allocate(offsetList(nNeededBlocks))
+ if (present(offsetListField)) then
+ offsetCursor => offsetListField
+ do while (associated(offsetCursor))
+ offsetList(offsetCursor % block % localBlockID+1) = offsetCursor % scalar
+ offsetCursor => offsetCursor % next
+ end do
+ else
+ offsetList(:) = 0
+ end if
+
+ !
+ ! Get list of bounds limit for owned elements
+ !
+ allocate(ownedLimitList(nOwnedBlocks))
+ if(present(ownedLimitField)) then
+ ownedLimitCursor => ownedLimitField
+ do while(associated(ownedLimitCursor))
+ ownedLimitList(ownedLimitCursor % block % localBlockID+1) = ownedLimitCursor % scalar
+ ownedLimitCursor => ownedLimitCursor % next
+ end do
+ else
+ fieldCursor => ownedListField
+ do while(associated(fieldCursor))
+ ownedLimitList(fieldCursor % block % localBlockID+1) = fieldCursor % dimSizes(1)
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+
+ !
+ ! Determine total number of owned indices on this task, and
+ ! initialize output send and recv lists for ownedListField
+ !
+ nOwnedList = 0
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldcursor % block % localBlockID + 1
+ nOwnedList = nOwnedList + ownedLimitList(iBlock)
+ fieldCursor => fieldCursor % next
+ end do
+
#ifdef _MPI
- allocate(sendList)
- allocate(recvList)
- nullify(sendList % next)
- nullify(recvList % next)
- sendListPtr => sendList
- recvListPtr => recvList
+ !
+ ! Gather list of all owned indices and their associated blocks on this task
+ !
+ allocate(ownedList(nOwnedList))
+ allocate(ownedBlock(nOwnedList))
+ ownedBlock = -1
+ ownedList = -1
+ fieldCursor => ownedListField
+ i = 1
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+ ownedList(i:i+ownedLimitList(iBlock)-1) = fieldCursor % array(1:ownedLimitList(iBlock))
+ ownedBlock(i:i+ownedLimitList(iBlock)-1) = fieldCursor % block % localBlockID
+ i = i + ownedLimitList(iBlock)
+ fieldCursor => fieldCursor % next
+ end do
- offset = 0
- if(present(inOffset)) then
- offset = inOffset
- end if
-
+ !
+ ! Gather list of all needed indices and their associated blocks on this task
+ !
+ allocate(neededList(nNeededList))
+ allocate(neededBlock(nNeededList))
+ fieldCursor => neededListField
+ i = 1
+ do while (associated(fieldCursor))
+ neededList(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % array(:)
+ neededBlock(i:i+fieldCursor % dimSizes(1)-1) = fieldCursor % block % localBlockID
+ i = i + fieldCursor % dimSizes(1)
+ fieldCursor => fieldCursor % next
+ end do
+
+ !
+ ! Obtain sorted list of global indices owned by this task and the associated local indices and block IDs
+ !
+ allocate(ownedListIndex(nOwnedList))
+ allocate(ownedListSorted(2,nOwnedList))
+ allocate(recipientList(2,nOwnedList))
+ j = 1
+ k = 1
do i=1,nOwnedList
- ownedListSorted(1,i) = ownedList(i)
- ownedListSorted(2,i) = i
+ ownedListSorted(1,i) = ownedList(i)
+ if (i > 1) then
+ if(ownedBlock(i) /= ownedBlock(i-1)) k = 1
+ end if
+ ownedListIndex(i) = k
+ ownedListSorted(2,i) = j
+ j = j + 1
+ k = k + 1
end do
- call quicksort(nOwnedList, ownedListSorted)
+ call mpas_quicksort(nOwnedList, ownedListSorted)
- call MPI_Allreduce(nNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+ allocate(ownedBlockSorted(2,nOwnedList))
+ do i=1,nOwnedList
+ ownedBlockSorted(1,i) = ownedList(i)
+ ownedBlockSorted(2,i) = ownedBlock(i)
+ end do
+ call mpas_quicksort(nOwnedList, ownedBlockSorted)
+
+ allocate(neededListIndex(nNeededList))
+ j = 1
+ do i=1,nNeededList
+ if (i > 1) then
+ if(neededBlock(i) /= neededBlock(i-1)) j = 1
+ end if
+ neededListIndex(i) = j
+ j = j + 1
+ end do
+
+ !
+ ! Set totalSize to the maximum number of items in any task's needed list
+ !
+ call MPI_Allreduce(nUniqueNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+
allocate(ownerListIn(totalSize))
allocate(ownerListOut(totalSize))
- nMesgRecv = nNeededList
- ownerListIn(1:nNeededList) = neededList(1:nNeededList)
+ nMesgSend = nUniqueNeededList
+ nMesgRecv = nUniqueNeededList
+ ownerListOut(1:nUniqueNeededList) = uniqueSortedNeededList(1,1:nUniqueNeededList)
recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
- do i=1, dminfo % nprocs
+ allocate(numToSend(nOwnedBlocks))
+ allocate(numToRecv(nNeededBlocks))
- recipientList(:) = -1
- numToSend = 0
+ ! Initial send of data to neighbors.
+ if(dminfo % nProcs == 1) then
+ ownerListIn = ownerListOut
+ else
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ end if
- currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
- do j=1,nMesgRecv
- if (ownerListIn(j) > 0) then
- k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
- if (k <= nOwnedList) then
- ownerListOut(j) = -1 * dminfo % my_proc_id
- numToSend = numToSend + 1
- recipientList(ownedListSorted(2,k)) = numToSend
- else
- ownerListOut(j) = ownerListIn(j)
- end if
+ !
+ ! For each processor (not including ourself), mark the indices that we will provide to
+ ! that processor in ownerListOut, and build a send list for that processor if we
+ ! do need to send any indices
+ !
+ do i=2, dminfo % nprocs
+ recipientList = -1
+ numToSend(:) = 0
+ totalSent = 0
+
+ currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
+ do j=1,nMesgRecv
+ if (ownerListIn(j) > 0) then
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
+ if (k <= nOwnedList) then
+ iBlock = ownedBlock(ownedListSorted(2,k)) + 1
+ numToSend(iBlock) = numToSend(iBlock) + 1
+ totalSent = totalSent + 1
+
+ ! recipientList(1,:) represents the index in the srcList to place this data
+ recipientList(1,ownedListSorted(2,k)) = numToSend(iBlock)
+ ! recipientList(2,:) represnets the index in the buffer to place this data
+ recipientList(2,ownedListSorted(2,k)) = totalSent
+
+ ownerListOut(j) = -1 * dminfo % my_proc_id
else
- ownerListOut(j) = ownerListIn(j)
+ ownerListOut(j) = ownerListIn(j)
end if
- end do
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ end do
- if (numToSend > 0) then
- allocate(sendListPtr % next)
- sendListPtr => sendListPtr % next
- sendListPtr % procID = currentProc
- sendListPtr % blockID = currentProc ! Currently, we have just one block per task, so blockID = procID
- sendListPtr % nlist = numToSend
- allocate(sendListPtr % list(numToSend))
- nullify(sendListPtr % next)
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+
+ if (numToSend(iBlock) > 0) then
+ ! Find end of send list
+ if(.not.associated(fieldCursor % sendList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % sendList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ exchListPtr => fieldCursor % sendList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % sendList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = currentProc
+ exchListPtr % nlist = numToSend(iBlock)
+ allocate(exchListPtr % srcList(numToSend(iBlock)))
+ allocate(exchListPtr % destList(numToSend(iBlock)))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
kk = 1
do j=1,nOwnedList
- if (recipientList(j) /= -1) then
- sendListPtr % list(recipientList(j)) = j
+ if (recipientList(1,j) /= -1) then
+ if(ownedBlock(j) == fieldCursor % block % localBlockID) then
+ exchListPtr % srcList(recipientList(1,j)) = ownedListIndex(j)
+ exchListPtr % destList(recipientList(1,j)) = recipientList(2,j)
kk = kk + 1
- end if
+ end if
+ end if
end do
- end if
+ end if
- nMesgSend = nMesgRecv
- call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ fieldCursor => fieldCursor % next
+ end do
+
+ nMesgSend = nMesgRecv
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, recvNeighbor, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, dminfo % my_proc_id, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
end do
+ !
+ ! With our needed list returned to us, build receive lists based on which indices were
+ ! marked by other tasks
+ !
do i=0, dminfo % nprocs - 1
+ if(i == dminfo % my_proc_id) cycle
- numToRecv = 0
- do j=1,nNeededList
- if (ownerListIn(j) == -i) numToRecv = numToRecv + 1
- end do
- if (numToRecv > 0) then
- allocate(recvListPtr % next)
- recvListPtr => recvListPtr % next
- recvListPtr % procID = i
- recvListPtr % blockID = i ! Currently, we have just one block per task, so blockID = procID
- recvListPtr % nlist = numToRecv
- allocate(recvListPtr % list(numToRecv))
- nullify(recvListPtr % next)
- kk = 1
- do j=1,nNeededList
- if (ownerListIn(j) == -i) then
- recvListPtr % list(kk) = j + offset
+ numToRecv(:) = 0
+ packingOrder = 0
+
+ k = 0
+ do j=1,nUniqueNeededList
+ if (ownerListIn(j) == -i) then
+ k = k + 1
+ packingOrder(j) = k
+ end if
+ end do
+
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ do j = 1, fieldCursor % dimSizes(1)
+ k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j))
+ if(k <= nUniqueNeededList) then
+ if(ownerListIn(k) == -i) then
+ iBlock = fieldCursor % block % localBlockID + 1
+ numToRecv(iBlock) = numToRecv(iBlock) + 1
+ end if
+ end if
+ end do
+ fieldCursor => fieldCursor % next
+ end do
+
+ fieldCursor => neededListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+
+ if (numToRecv(iBlock) > 0) then
+ if(.not.associated(fieldCursor % recvList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % recvList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ ! Find end of recv list
+ exchListPtr => fieldCursor % recvList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % recvList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = i
+ exchListPtr % nlist = numToRecv(iBlock)
+ allocate(exchListPtr % srcList(exchListPtr % nList))
+ allocate(exchListPtr % destList(exchListPtr % nList))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
+ kk = 0
+ do j=1,fieldCursor % dimSizes(1)
+ k = mpas_binary_search(uniqueSortedNeededList, 2, 1, nUniqueNeededList, fieldCursor % array(j))
+ if(k <= nUniqueNeededList) then
+ if (ownerListIn(k) == -i) then
kk = kk + 1
- end if
+ exchListPtr % srcList(kk) = packingOrder(k)
+ exchListPtr % destList(kk) = j + offsetList(iBlock)
+ end if
+ end if
end do
- end if
+ end if
+ fieldCursor => fieldCursor % next
+ end do
end do
+ !
+ ! Free up memory
+ !
+ deallocate(numToSend)
+ deallocate(numToRecv)
+ deallocate(ownedList)
+ deallocate(ownedListIndex)
+ deallocate(ownedBlock)
+ deallocate(neededList)
+ deallocate(neededListIndex)
+ deallocate(neededBlock)
+ deallocate(ownedListSorted)
+ deallocate(ownedBlockSorted)
+ deallocate(recipientList)
deallocate(ownerListIn)
deallocate(ownerListOut)
+ deallocate(uniqueSortedNeededList)
+ deallocate(packingOrder)
+#endif
- sendListPtr => sendList
- sendList => sendList % next
- deallocate(sendListPtr)
+ ! Build Copy Lists
+ allocate(numToSend(1))
+ fieldCursor => ownedListField
+ do while (associated(fieldCursor))
+ iBlock = fieldCursor % block % localBlockID + 1
+ nOwnedList = ownedLimitList(iBlock)
+ allocate(ownedListSorted(2, nOwnedList))
+ allocate(recipientList(2, nOwnedList))
- recvListPtr => recvList
- recvList => recvList % next
- deallocate(recvListPtr)
+ do i = 1, nOwnedList
+ ownedListSorted(1, i) = fieldCursor % array(i)
+ ownedListSorted(2, i) = i
+ end do
-#else
- allocate(recvList)
- recvList % procID = dminfo % my_proc_id
- recvList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
- recvList % nlist = nNeededList
- allocate(recvList % list(nNeededList))
- nullify(recvList % next)
- do j=1,nNeededList
- recvList % list(j) = j
- end do
+ call mpas_quicksort(nOwnedList, ownedListSorted)
- allocate(sendList)
- sendList % procID = dminfo % my_proc_id
- sendList % blockID = dminfo % my_proc_id ! Currently, we have just one block per task, so blockID = procID
- sendList % nlist = nOwnedList
- allocate(sendList % list(nOwnedList))
- nullify(sendList % next)
- do j=1,nOwnedList
- sendList % list(j) = j
+ fieldCursor2 => neededListField
+ do while(associated(fieldCursor2))
+ if(associated(fieldCursor, fieldCursor2)) then
+ fieldCursor2 => fieldCursor2 % next
+ cycle
+ end if
+
+ numToSend = 0
+ recipientList = -1
+
+ do i = 1, fieldCursor2 % dimSizes(1)
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, fieldCursor2 % array(i))
+ if (k <= nOwnedList) then
+ numToSend(1) = numToSend(1) + 1
+ ! recipientList(1,:) represents the needed block id
+ recipientList(1,ownedListSorted(2,k)) = fieldCursor2 % block % localBlockID
+ ! recipientList(2,:) represnets the index in the buffer to place this data
+ recipientList(2,ownedListSorted(2,k)) = i
+ end if
+ end do
+
+ if(numToSend(1) > 0) then
+ if(.not.associated(fieldCursor % copyList % halos(haloLayer) % exchList)) then
+ allocate(fieldCursor % copyList % halos(haloLayer) % exchList)
+ exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList
+ nullify(exchListPtr % next)
+ else
+ ! Find end of copy list
+ exchListPtr => fieldCursor % copyList % halos(haloLayer) % exchList
+ exchListPtr2 => fieldCursor % copyList % halos(haloLayer) % exchList % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr % next
+ exchListPtr2 => exchListPtr % next
+ end do
+
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
+ end if
+
+ exchListPtr % endPointID = fieldCursor2 % block % localBlockID
+ exchListPtr % nlist = numToSend(1)
+ allocate(exchListPtr % srcList(numToSend(1)))
+ allocate(exchListPtr % destList(numToSend(1)))
+ exchListPtr % srcList = -1
+ exchListPtr % destList = -1
+
+ kk = 1
+ do j=1,nOwnedList
+ if(recipientList(1,j) == fieldCursor2 % block % localBlockID) then
+ exchListPtr % srcList(kk) = j
+ exchListPtr % destList(kk) = recipientList(2,j) + offSetList(fieldCursor2 % block % localBlockID+1)
+ kk = kk + 1
+ end if
+ end do
+ end if
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ deallocate(recipientList)
+ deallocate(ownedListSorted)
+ fieldCursor => fieldCursor % next
end do
-#endif
+ deallocate(numToSend)
+ deallocate(offSetList)
- end subroutine mpas_dmpar_get_owner_list
+ end subroutine mpas_dmpar_get_exch_list!}}}
- subroutine mpas_dmpar_alltoall_field1d_integer(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field1d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, dimension(*), intent(in) :: arrayIn
- integer, dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dInteger), pointer :: fieldIn
+ type (field1dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ type (field1dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = 0
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ commListPtr % ibuffer = 0
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % destList(i) + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % srcList(i) + bufferOffset
+ fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field1d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field1d_integer!}}}
- subroutine mpas_dmpar_alltoall_field2d_integer(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field2d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- integer, dimension(dim1,*), intent(in) :: arrayIn
- integer, dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field2dInteger), pointer :: fieldIn
+ type (field2dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ type (field2dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset
+ fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field2d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field2d_integer!}}}
- subroutine mpas_dmpar_alltoall_field1d_real(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field3d_integer(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- real (kind=RKIND), dimension(*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field3dInteger), pointer :: fieldIn
+ type (field3dInteger), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ type (field3dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(2)
+ do k = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset
+ commListPtr % ibuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nlist, MPI_INTEGERKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(2)
+ do k = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset
+ fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer(iBuffer)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_alltoall_field1d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_alltoall_field3d_integer!}}}
- subroutine mpas_dmpar_alltoall_field2d_real(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field1d_real(fieldIn, fieldout, haloLayersIn)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dReal), pointer :: fieldIn
+ type (field1dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ type (field1dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
-#endif
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % destList(i) + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_dmpar_alltoall_field2d_real
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(exchListPtr % destList(i)) = fieldInPtr % array(exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
- subroutine mpas_dmpar_alltoall_field3d_real(dminfo, arrayIn, arrayOut, dim1, dim2, nOwnedList, nNeededList, sendList, recvList)
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- implicit none
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,dim2,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ iBuffer = exchListPtr % srcList(i) + bufferOffset
+ fieldOutPtr % array(exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d3
+ commListPtr => commListPtr % next
+ end do
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_alltoall_field1d_real!}}}
+
+ subroutine mpas_dmpar_alltoall_field2d_real(fieldIn, fieldout, haloLayersIn)!{{{
+
+ implicit none
+
+ type (field2dReal), pointer :: fieldIn
+ type (field2dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
+
+ type (field2dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Setup recieve lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,:,recvListPtr % list(i)) = arrayIn(:,:,sendListPtr % list(i))
+ exchListPtr => exchListPtr % next
end do
- end if
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => commListPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, nNeededList, arrayOut, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,:,1:nNeededList) = arrayIn(:,:,1:nOwnedList)
- end if
-#endif
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) + j + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_dmpar_alltoall_field3d_real
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
-
- subroutine mpas_pack_send_buf1d_integer(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ commListPtr => commListPtr % next
+ end do
- implicit none
+#endif
- integer, intent(in) :: nField, nBuffer, startPackIdx
- integer, dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, exchListPtr % destList(i)) = fieldInPtr % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- integer :: i
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(1) + j + bufferOffset
+ fieldOutPtr % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- end subroutine mpas_pack_send_buf1d_integer
+ commListPtr => commListPtr % next
+ end do
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- subroutine mpas_pack_send_buf2d_integer(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- implicit none
+ deallocate(haloLayers)
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- integer, dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ end subroutine mpas_dmpar_alltoall_field2d_real!}}}
- integer :: i, n
+ subroutine mpas_dmpar_alltoall_field3d_real(fieldIn, fieldout, haloLayersIn)!{{{
- n = de-ds+1
+ implicit none
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
+ type (field3dReal), pointer :: fieldIn
+ type (field3dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ type (field3dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
- end subroutine mpas_pack_send_buf2d_integer
+ logical :: comm_list_found
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
- subroutine mpas_pack_send_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ dminfo => fieldIn % block % domain % dminfo
- implicit none
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
- integer :: i, j, k, n
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)
+ end if
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
+ exchListPtr => exchListPtr % next
end do
- end do
- lastPackedIdx = sendList % nlist
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
- end subroutine mpas_pack_send_buf3d_integer
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
- subroutine mpas_pack_send_buf1d_real(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ commListPtr => commListPtr % next
+ end do
- implicit none
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- integer, intent(in) :: nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- integer :: i
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(2)
+ do k = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) + (j-1) * fieldInPtr % dimSizes(1) + k + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
- end subroutine mpas_pack_send_buf1d_real
+ commListPtr => commListPtr % next
+ end do
+#endif
- subroutine mpas_pack_send_buf2d_real(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
- implicit none
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(2)
+ do k = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) + (j-1) * fieldOutPtr % dimSizes(1) + k + bufferOffset
+ fieldOutPtr % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- integer :: i, n
+ commListPtr => commListPtr % next
+ end do
- n = de-ds+1
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
+ deallocate(haloLayers)
- end subroutine mpas_pack_send_buf2d_real
+ end subroutine mpas_dmpar_alltoall_field3d_real!}}}
- subroutine mpas_pack_send_buf3d_real(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{
implicit none
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
+ type (field1DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- integer :: i, j, k, n
+ type (dm_info), pointer :: dminfo
+ type (field1DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ logical :: comm_list_found
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
end if
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
- end do
- end do
- lastPackedIdx = sendList % nlist
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header node.
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- end subroutine mpas_pack_send_buf3d_real
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+ dminfo = field % block % domain % dminfo
- subroutine mpas_unpack_recv_buf1d_integer(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- implicit none
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- integer, dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ exit
+ end if
- integer :: i
+ commListPtr => commListPtr % next
+ end do
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- end subroutine mpas_unpack_recv_buf1d_integer
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+ exchListPtr => exchListPtr % next
+ end do
- subroutine mpas_unpack_recv_buf2d_integer(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- implicit none
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- integer, dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ commListPtr => commListPtr % next
+ end do
- integer :: i, n
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- n = de-ds+1
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf2d_integer
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
- subroutine mpas_unpack_recv_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
+ ! Determine size of recieve lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- implicit none
+ commListPtr => commListPtr % next
+ end do
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
- integer :: i, j, k, n
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
+ commListPtr => commListPtr % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_integer
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ commListPtr % ibuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
- subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayers)
+ exchListPtr => exchListPtr % next
+ end do
- implicit none
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- type (field1DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer, dimension(size(field % dimSizes)) :: dims
+ commListPtr => commListPtr % next
+ end do
+#endif
-#ifdef _MPI
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ exchListPtr => exchListPtr % next
+ end do
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ fieldCursor => fieldCursor % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor % array(exchListPtr % destList(i)) = commListPtr % ibuffer(exchListPtr % srcList(i) + bufferOffset)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
-
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_integer
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field1d_integer!}}}
- subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayersIn)!{{{
implicit none
- type (field2DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field2DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field2DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d2
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i, j
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dims(1) * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ commListPtr => commListPtr % next
+ end do
-#endif
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- end subroutine mpas_dmpar_exch_halo_field2d_integer
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+ exchListPtr => exchListPtr % next
+ end do
- subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayers)
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- implicit none
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- type (field3DInteger), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ commListPtr => commListPtr % next
+ end do
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d3
- integer, dimension(size(field % dimSizes)) :: dims
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
-#ifdef _MPI
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ exchListPtr => exchListPtr % next
+ end do
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ fieldCursor => fieldCursor % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d3))
- call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recieve list buffers
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d3))
- call mpas_pack_send_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+#ifdef _MPI
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) + j + bufferOffset)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_integer
+ deallocate(haloLayers)
-
- subroutine mpas_unpack_recv_buf1d_real(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ end subroutine mpas_dmpar_exch_halo_field2d_integer!}}}
+ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayersIn)!{{{
+
implicit none
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ type (field3DInteger), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- integer :: i
+ type (dm_info), pointer :: dminfo
+ type (field3DInteger), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
+ logical :: comm_list_found
- end subroutine mpas_unpack_recv_buf1d_real
+ dminfo => field % block % domain % dminfo
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
- subroutine mpas_unpack_recv_buf2d_real(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- implicit none
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ dminfo = field % block % domain % dminfo
- integer :: i, n
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- n = de-ds+1
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ exit
+ end if
- end subroutine mpas_unpack_recv_buf2d_real
+ commListPtr => commListPtr % next
+ end do
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- subroutine mpas_unpack_recv_buf3d_real(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ end if
- implicit none
+ exchListPtr => exchListPtr % next
+ end do
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
- integer :: i, j, k, n
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
- n = (d1e-d1s+1) * (d2e-d2s+1)
+ commListPtr => commListPtr % next
+ end do
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_real
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
- subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayers)
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- implicit none
+ commListPtr => commListPtr % next
+ end do
- type (field1DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ call MPI_Irecv(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer, dimension(size(field % dimSizes)) :: dims
+ commListPtr => commListPtr % next
+ end do
-#ifdef _MPI
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % ibuffer(commListPtr % nList))
+ nullify(commListPtr % rbuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ commListPtr % ibuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end if
- dminfo = field % block % domain % dminfo
- dims = field % dimSizes
+ exchListPtr => exchListPtr % next
+ end do
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ call MPI_Isend(commListPtr % ibuffer, commListPtr % nList, MPI_INTEGERKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % ibuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
end do
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
-
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field3d_integer!}}}
- subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayersIn)!{{{
implicit none
- type (field2DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field1dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field1dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d2
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dims(1) * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dims(1) * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ commListPtr % rbuffer(exchListPtr % destList(i) + bufferOffset) = fieldCursor % array(exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(exchListPtr % destList(i)) = fieldCursor % array(exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor % array(exchListPtr % destList(i)) = commListPtr % rbuffer(exchListPtr % srcList(i) + bufferOffset)
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field2d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field1d_real!}}}
- subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayers)
+ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayersIn)!{{{
implicit none
- type (field3DReal), intent(inout) :: field
- integer, dimension(:), intent(in), optional :: haloLayers
+ type (field2dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- type (dm_info) :: dminfo
- type (exchange_list), pointer :: sendList, recvList
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ type (dm_info), pointer :: dminfo
+ type (field2dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
integer :: mpi_ierr
- integer :: d3
- integer, dimension(size(field % dimSizes)) :: dims
+ integer :: nHaloLayers, iHalo, i, j
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+ logical :: comm_list_found
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
dminfo = field % block % domain % dminfo
- dims = field % dimSizes
- call mpas_aggregate_exchange_lists(dminfo % my_proc_id, haloLayers, field % sendList, field % recvList, sendList, recvList)
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dims(1) * dims(2) * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dims(1) * dims(2) * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) + j + bufferOffset) = fieldCursor % array(j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
end do
+#endif
- call mpas_destroy_exchange_list(sendList)
- call mpas_destroy_exchange_list(recvList)
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, exchListPtr % destList(i)) = fieldCursor % array(:, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizeS(1) + j + bufferOffset)
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_real
+ deallocate(haloLayers)
+ end subroutine mpas_dmpar_exch_halo_field2d_real!}}}
- subroutine mpas_aggregate_exchange_lists(myProcID, haloLayersIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)
+ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayersIn)!{{{
implicit none
- !--- in variables ---!
- integer, intent(in) :: myProcID
- integer, dimension(:), intent(in), target, optional :: haloLayersIn
- type (exchange_list), dimension(:), pointer :: sendListArray, recvListArray
-
- !--- out variabls ---!
- type (exchange_list), pointer :: aggregateSendList, aggregateRecvList
+ type (field3dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
- !--- local variables ---!
- integer :: i, j
+ type (dm_info), pointer :: dminfo
+ type (field3dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k
+ integer :: bufferOffset, nAdded
integer, dimension(:), pointer :: haloLayers
- type (exchange_list), pointer :: inListPtr, aggListPtr
- logical :: blockAdded
- logical :: listInitilized
- if (present(haloLayersIn)) then
- haloLayers => haloLayersIn
+ logical :: comm_list_found
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
else
- allocate(haloLayers(size(sendListArray)))
- do i=1, size(haloLayers)
- haloLayers(i) = i
- end do
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
end if
- nullify(aggregateSendList)
- nullify(aggregateRecvList)
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
- do i=1, size(haloLayers)
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
- inListPtr => sendListArray(haloLayers(i)) % next
- do while(associated(inListPtr))
+ dminfo = field % block % domain % dminfo
- blockAdded = .false.
- aggListPtr => aggregateSendList
-
- do while(associated(aggListPtr))
- if(inListPtr % blockID == aggListPtr % blockID) then
- if(inListPtr % procID .ne. myProcID) then
- call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
- end if
- blockAdded = .true.
- exit
- end if
- aggListPtr => aggListPtr % next
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
+
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
end do
- if(.not. blockAdded) then
-
- if (.not. associated(aggregateSendList)) then
- allocate(aggregateSendList)
- nullify(aggregateSendList % next)
- aggListPtr => aggregateSendList
- else
- aggListPtr => aggregateSendList
- do while(associated(aggListPtr % next))
- aggListPtr => aggListPtr % next
- end do
- allocate(aggListPtr % next)
- aggListPtr => aggListPtr % next
- end if
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
- nullify(aggListPtr % next)
- aggListPtr % procID = inListPtr % procID
- aggListPtr % blockID = inListPtr % blockID
- aggListPtr % nlist = inListPtr % nlist
- allocate(aggListPtr % list(inListPtr % nlist))
- aggListPtr % list = inListPtr % list
- aggListPtr % reqID = inListPtr % reqID
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2)
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
end if
- inListPtr => inListPtr % next
- end do
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+ fieldCursor => fieldCursor % next
+ end do
- inListPtr => recvListArray(haloLayers(i)) % next
- do while(associated(inListPtr))
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
- blockAdded = .false.
- aggListPtr => aggregateRecvList
- do while(associated(aggListPtr))
- if(inListPtr % blockID == aggListPtr % blockID) then
- if(inListPtr % procID .ne. myProcID) then
- call mpas_merge_integer_arrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
- end if
- blockAdded = .true.
- exit
- end if
- aggListPtr => aggListPtr % next
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
- if(.not. blockAdded) then
+ commListPtr => commListPtr % next
+ end do
- if (.not. associated(aggregateRecvList)) then
- allocate(aggregateRecvList)
- nullify(aggregateRecvList % next)
- aggListPtr => aggregateRecvList
- else
- aggListPtr => aggregateRecvList
- do while(associated(aggListPtr % next))
- aggListPtr => aggListPtr % next
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1) * fieldCursor % dimSizes(1) + k + bufferOffset) = fieldCursor % array(k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
end do
+ end do
+ end if
- allocate(aggListPtr % next)
- aggListPtr => aggListPtr % next
- nullify(aggListPtr % next)
- end if
-
- aggListPtr % procID = inListPtr % procID
- aggListPtr % blockID = inListPtr % blockID
- aggListPtr % nlist = inListPtr % nlist
- allocate(aggListPtr % list(inListPtr % nlist))
- aggListPtr % list = inListPtr % list
- aggListPtr % reqID = inListPtr % reqID
+ exchListPtr => exchListPtr % next
+ end do
- end if
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
- inListPtr => inListPtr % next
- end do
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+#endif
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
end do
- if (.not. present(haloLayersIn)) then
- deallocate(haloLayers)
- end if
+#ifdef _MPI
- end subroutine mpas_aggregate_exchange_lists
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(2)
+ do k = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (j-1)*fieldCursor % dimSizes(1) + k + bufferOffset)
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
- subroutine mpas_destroy_exchange_list(exchangeList)
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
- implicit none
+ deallocate(haloLayers)
- !--- in variables ---!
- type (exchange_list), pointer :: exchangeList
+ end subroutine mpas_dmpar_exch_halo_field3d_real!}}}
- !--- local variables ---!
- type (exchange_list), pointer :: exchangeListPtr
+ subroutine mpas_dmpar_init_mulithalo_exchange_list(exchList, nHalos)!{{{
+ type (mpas_multihalo_exchange_list), pointer :: exchList
+ integer, intent(in) :: nHalos
- do while (associated(exchangeList))
- exchangeListPtr => exchangeList % next
+ integer :: i
- deallocate(exchangeList % list)
- deallocate(exchangeList)
- exchangeList => exchangeListPtr
- end do
+ allocate(exchList)
+ allocate(exchList % halos(nHalos))
+ do i = 1, nHalos
+ nullify(exchList % halos(i) % exchList)
+ end do
+ end subroutine mpas_dmpar_init_mulithalo_exchange_list!}}}
- end subroutine mpas_destroy_exchange_list
+ subroutine mpas_dmpar_destroy_mulithalo_exchange_list(exchList)!{{{
+ type (mpas_multihalo_exchange_list), pointer :: exchList
+ integer :: nHalos
+ integer :: i
- subroutine mpas_merge_integer_arrays(mergeArray, nMergeArray, dataToAppend)
+ nHalos = size(exchList % halos)
- implicit none
+ do i = 1, nHalos
+ call mpas_dmpar_destroy_exchange_list(exchList % halos(i) % exchList)
+ end do
- !--- inout variables ---!
- integer, dimension(:), pointer :: mergeArray
- integer, intent(inout) :: nMergeArray
+ deallocate(exchList % halos)
+ deallocate(exchList)
+ nullify(exchList)
+ end subroutine mpas_dmpar_destroy_mulithalo_exchange_list!}}}
- !--- in variables ---!
- integer, dimension(:), pointer :: dataToAppend
+ subroutine mpas_dmpar_destroy_communication_list(commList)!{{{
+ type (mpas_communication_list), pointer :: commList
+ type (mpas_communication_list), pointer :: commListPtr
- !--- local variables ---!
- integer :: nDataToAppend, newSize
- integer, dimension(nMergeArray) :: mergeArrayCopy
-
-
- nDataToAppend = size(dataToAppend)
- newSize = nMergeArray + nDataToAppend
- mergeArrayCopy = mergeArray
- deallocate(mergeArray)
- allocate(mergeArray(newSize))
- mergeArray(1:nMergeArray) = mergeArrayCopy
- mergeArray(nMergeArray+1:newSize) = dataToAppend
- nMergeArray = newSize
+ commListPtr => commList
+ do while(associated(commListPtr))
+ if(associated(commList % next)) then
+ commList => commList % next
+ else
+ nullify(commList)
+ end if
- end subroutine mpas_merge_integer_arrays
+ if(associated(commListPtr % ibuffer)) then
+ deallocate(commListPtr % ibuffer)
+ end if
+ if(associated(commListPtr % rbuffer)) then
+ deallocate(commListPtr % rbuffer)
+ end if
+ deallocate(commListPtr)
+ commListPtr => commList
+ end do
+
+ end subroutine mpas_dmpar_destroy_communication_list!}}}
+
+ subroutine mpas_dmpar_destroy_exchange_list(exchList)!{{{
+ type (mpas_exchange_list), pointer :: exchList
+ type (mpas_exchange_list), pointer :: exchListPtr
+
+ exchListPtr => exchList
+ do while(associated(exchList))
+ if(associated(exchList % next)) then
+ exchList => exchList % next
+ else
+ nullify(exchList)
+ end if
+
+ if(associated(exchListPtr % srcList)) then
+ deallocate(exchListPtr % srcList)
+ end if
+
+ if(associated(exchListPtr % destList)) then
+ deallocate(exchListPtr % destList)
+ end if
+
+ deallocate(exchListPtr)
+ exchListPtr => exchList
+ end do
+
+ end subroutine mpas_dmpar_destroy_exchange_list!}}}
+
+ subroutine mpas_dmpar_copy_field1d_integer(field)!{{{
+ type (field1dInteger), pointer :: field
+ type (field1dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field1d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field2d_integer(field)!{{{
+ type (field2dInteger), pointer :: field
+ type (field2dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field2d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field3d_integer(field)!{{{
+ type (field3dInteger), pointer :: field
+ type (field3dInteger), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field3d_integer!}}}
+
+ subroutine mpas_dmpar_copy_field1d_real(field)!{{{
+ type (field1dReal), pointer :: field
+ type (field1dReal), pointer :: fieldCursor
+
+
+ if(associated(field % next)) then
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ fieldCursor % array(:) = field % array(:)
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field1d_real!}}}
+
+ subroutine mpas_dmpar_copy_field2d_real(field)!{{{
+ type (field2dReal), pointer :: field
+ type (field2dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field2d_real!}}}
+
+ subroutine mpas_dmpar_copy_field3d_real(field)!{{{
+ type (field3dReal), pointer :: field
+ type (field3dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field3d_real!}}}
+
end module mpas_dmpar
Modified: branches/dcmip/src/framework/mpas_dmpar_types.F
===================================================================
--- branches/dcmip/src/framework/mpas_dmpar_types.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_dmpar_types.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -3,21 +3,50 @@
use mpas_kind_types
type dm_info
- integer :: nprocs, my_proc_id, comm, info
- logical :: using_external_comm
+ integer :: nprocs, my_proc_id, comm, info
+ logical :: using_external_comm
end type dm_info
type exchange_list
- integer :: procID
- integer :: blockID
- integer :: nlist
- integer, dimension(:), pointer :: list
- type (exchange_list), pointer :: next
- real (kind=RKIND), dimension(:), pointer :: rbuffer
- integer, dimension(:), pointer :: ibuffer
- integer :: reqID
+ integer :: procID
+ integer :: blockID
+ integer :: nlist
+ integer, dimension(:), pointer :: list
+ type (mpas_exchange_list), pointer :: next
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
end type exchange_list
+ type mpas_exchange_list
+ integer :: endPointID
+ integer :: nlist
+ integer, dimension(:), pointer :: srcList
+ integer, dimension(:), pointer :: destList
+ type (mpas_exchange_list), pointer :: next
+
+ end type mpas_exchange_list
+
+ type mpas_exchange_list_pointer
+ type (mpas_exchange_list), pointer :: exchList
+ end type mpas_exchange_list_pointer
+
+ type mpas_multihalo_exchange_list
+ type (mpas_exchange_list_pointer), dimension(:), pointer :: halos
+ end type mpas_multihalo_exchange_list
+
+
+ type mpas_communication_list
+ integer :: procID
+ integer :: nlist
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
+ type (mpas_communication_list), pointer :: next
+
+ end type mpas_communication_list
+
+
end module mpas_dmpar_types
Modified: branches/dcmip/src/framework/mpas_framework.F
===================================================================
--- branches/dcmip/src/framework/mpas_framework.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_framework.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -13,18 +13,19 @@
contains
- subroutine mpas_framework_init(dminfo, domain)
+ subroutine mpas_framework_init(dminfo, domain, mpi_comm)
implicit none
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
+ integer, intent(in), optional :: mpi_comm
integer :: pio_num_iotasks
integer :: pio_stride
allocate(dminfo)
- call mpas_dmpar_init(dminfo)
+ call mpas_dmpar_init(dminfo, mpi_comm)
call mpas_read_namelist(dminfo)
Modified: branches/dcmip/src/framework/mpas_grid_types.F
===================================================================
--- branches/dcmip/src/framework/mpas_grid_types.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_grid_types.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -44,9 +44,9 @@
type (field3DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field3DReal
@@ -73,9 +73,9 @@
type (field2DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field2DReal
@@ -102,9 +102,9 @@
type (field1DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DReal
@@ -129,9 +129,9 @@
type (field0DReal), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DReal
@@ -158,9 +158,9 @@
type (field3DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field3DInteger
@@ -187,9 +187,9 @@
type (field2DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field2DInteger
@@ -216,9 +216,9 @@
type (field1DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DInteger
@@ -243,9 +243,9 @@
type (field0DInteger), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DInteger
@@ -272,9 +272,9 @@
type (field1DChar), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field1DChar
@@ -299,9 +299,9 @@
type (field0DChar), pointer :: prev, next
! Halo communication lists
- type (exchange_list), dimension(:), pointer :: sendList
- type (exchange_list), dimension(:), pointer :: recvList
- type (exchange_list), dimension(:), pointer :: copyList
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
end type field0DChar
@@ -325,17 +325,17 @@
! Type for storing (possibly architecture specific) information concerning to parallelism
type parallel_info
- type (exchange_list), dimension(:), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
- type (exchange_list), dimension(:), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
- type (exchange_list), dimension(:), pointer :: cellsToCopy ! List of types describing which cells to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: cellsToCopy ! List of types describing which cells to copy from other blocks
- type (exchange_list), dimension(:), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
- type (exchange_list), dimension(:), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
- type (exchange_list), dimension(:), pointer :: edgesToCopy ! List of types describing which edges to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: edgesToCopy ! List of types describing which edges to copy from other blocks
- type (exchange_list), dimension(:), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
- type (exchange_list), dimension(:), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
- type (exchange_list), dimension(:), pointer :: verticesToCopy ! List of types describing which vertices to copy from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
+ type (mpas_multihalo_exchange_list), pointer :: verticesToCopy ! List of types describing which vertices to copy from other blocks
end type parallel_info
@@ -363,7 +363,20 @@
type (dm_info), pointer :: dminfo
end type domain_type
+ interface mpas_deallocate_field
+ module procedure mpas_deallocate_field0d_integer
+ module procedure mpas_deallocate_field1d_integer
+ module procedure mpas_deallocate_field2d_integer
+ module procedure mpas_deallocate_field3d_integer
+ module procedure mpas_deallocate_field0d_real
+ module procedure mpas_deallocate_field1d_real
+ module procedure mpas_deallocate_field2d_real
+ module procedure mpas_deallocate_field3d_real
+ module procedure mpas_deallocate_field0d_char
+ module procedure mpas_deallocate_field1d_char
+ end interface
+
contains
@@ -381,41 +394,26 @@
end subroutine mpas_allocate_domain
- subroutine mpas_allocate_block(b, dom, blockID, &
+ subroutine mpas_allocate_block(nHaloLayers, b, dom, blockID, &
#include "dim_dummy_args.inc"
)
implicit none
+ integer, intent(in) :: nHaloLayers
type (block_type), pointer :: b
type (domain_type), pointer :: dom
integer, intent(in) :: blockID
#include "dim_dummy_decls.inc"
- integer, parameter :: nHaloLayers = 2
integer :: i
b % blockID = blockID
- nullify(b % prev)
- nullify(b % next)
-
allocate(b % parinfo)
- allocate(b % parinfo % cellsToSend(nHaloLayers))
- allocate(b % parinfo % cellsToRecv(nHaloLayers))
- allocate(b % parinfo % cellsToCopy(nHaloLayers))
-
- allocate(b % parinfo % edgesToSend(nHaloLayers + 1)) ! first index is owned-cell edges
- allocate(b % parinfo % edgesToRecv(nHaloLayers + 1)) ! first index is owned-cell edges
- allocate(b % parinfo % edgesToCopy(nHaloLayers + 1)) ! first index is owned-cell edges
-
- allocate(b % parinfo % verticesToSend(nHaloLayers + 1)) ! first index is owned-cell vertices
- allocate(b % parinfo % verticesToRecv(nHaloLayers + 1)) ! first index is owned-cell vertices
- allocate(b % parinfo % verticesToCopy(nHaloLayers + 1)) ! first index is owned-cell vertices
-
b % domain => dom
#include "block_allocs.inc"
@@ -425,9 +423,11 @@
#include "group_alloc_routines.inc"
+#include "provis_alloc_routines.inc"
- subroutine mpas_deallocate_domain(dom)
+ subroutine mpas_deallocate_domain(dom)!{{{
+
implicit none
type (domain_type), pointer :: dom
@@ -442,10 +442,268 @@
deallocate(dom)
- end subroutine mpas_deallocate_domain
+ end subroutine mpas_deallocate_domain!}}}
+ subroutine mpas_deallocate_field0d_integer(f)!{{{
+ type (field0dInteger), pointer :: f
+ type (field0dInteger), pointer :: f_cursor
- subroutine mpas_deallocate_block(b)
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_integer!}}}
+
+ subroutine mpas_deallocate_field1d_integer(f)!{{{
+ type (field1dInteger), pointer :: f
+ type (field1dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_integer!}}}
+
+ subroutine mpas_deallocate_field2d_integer(f)!{{{
+ type (field2dInteger), pointer :: f
+ type (field2dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field2d_integer!}}}
+
+ subroutine mpas_deallocate_field3d_integer(f)!{{{
+ type (field3dInteger), pointer :: f
+ type (field3dInteger), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field3d_integer!}}}
+
+ subroutine mpas_deallocate_field0d_real(f)!{{{
+ type (field0dReal), pointer :: f
+ type (field0dReal), pointer :: f_cursor
+
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_real!}}}
+
+ subroutine mpas_deallocate_field1d_real(f)!{{{
+ type (field1dReal), pointer :: f
+ type (field1dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_real!}}}
+
+ subroutine mpas_deallocate_field2d_real(f)!{{{
+ type (field2dReal), pointer :: f
+ type (field2dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field2d_real!}}}
+
+ subroutine mpas_deallocate_field3d_real(f)!{{{
+ type (field3dReal), pointer :: f
+ type (field3dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field3d_real!}}}
+
+ subroutine mpas_deallocate_field0d_char(f)!{{{
+ type (field0dChar), pointer :: f
+ type (field0dChar), pointer :: f_cursor
+
+ f_cursor => f
+
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ deallocate(f_cursor)
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field0d_char!}}}
+
+ subroutine mpas_deallocate_field1d_char(f)!{{{
+ type (field1dChar), pointer :: f
+ type (field1dChar), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field1d_char!}}}
+
+ subroutine mpas_deallocate_block(b)!{{{
implicit none
@@ -472,7 +730,7 @@
#include "block_deallocs.inc"
- end subroutine mpas_deallocate_block
+ end subroutine mpas_deallocate_block!}}}
#include "group_dealloc_routines.inc"
Modified: branches/dcmip/src/framework/mpas_hash.F
===================================================================
--- branches/dcmip/src/framework/mpas_hash.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_hash.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -106,7 +106,7 @@
mpas_hash_search = .false.
hashval = mod(key, TABLESIZE) + 1
-
+
cursor => h%table(hashval)%p
do while(associated(cursor))
if (cursor%key == key) then
Modified: branches/dcmip/src/framework/mpas_io_input.F
===================================================================
--- branches/dcmip/src/framework/mpas_io_input.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_io_input.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -3,6 +3,7 @@
use mpas_grid_types
use mpas_dmpar
use mpas_block_decomp
+ use mpas_block_creator
use mpas_sort
use mpas_configure
use mpas_timekeeping
@@ -26,26 +27,20 @@
end type io_input_object
-
- type (exchange_list), pointer :: sendCellList, recvCellList
- type (exchange_list), pointer :: sendEdgeList, recvEdgeList
- type (exchange_list), pointer :: sendVertexList, recvVertexList
- type (exchange_list), pointer :: sendVertLevelList, recvVertLevelList
-
integer :: readCellStart, readCellEnd, nReadCells
integer :: readEdgeStart, readEdgeEnd, nReadEdges
integer :: readVertexStart, readVertexEnd, nReadVertices
- integer :: readVertLevelStart, readVertLevelEnd, nReadVertLevels
-
contains
+ subroutine mpas_input_state_for_domain(domain)!{{{
- subroutine mpas_input_state_for_domain(domain)
-
implicit none
type (domain_type), pointer :: domain
+
+ type (block_type), pointer :: block_ptr
+ type (block_type), pointer :: readingBlock
integer :: i, j, k
type (io_input_object) :: input_obj
@@ -58,111 +53,80 @@
integer, dimension(:), pointer :: readIndices
type (MPAS_IO_Handle_type) :: inputHandle
- type (field1dInteger) :: indexToCellIDField
- type (field1dInteger) :: indexToEdgeIDField
- type (field1dInteger) :: indexToVertexIDField
- type (field1dInteger) :: nEdgesOnCellField
- type (field2dInteger) :: cellsOnCellField
- type (field2dInteger) :: edgesOnCellField
- type (field2dInteger) :: verticesOnCellField
- type (field2dInteger) :: cellsOnEdgeField
- type (field2dInteger) :: cellsOnVertexField
+ type (field1dInteger), pointer :: indexToCellIDField
+ type (field1dInteger), pointer :: indexToEdgeIDField
+ type (field1dInteger), pointer :: indexToVertexIDField
+ type (field1dInteger), pointer :: nEdgesOnCellField
+ type (field2dInteger), pointer :: cellsOnCellField
+ type (field2dInteger), pointer :: edgesOnCellField
+ type (field2dInteger), pointer :: verticesOnCellField
+ type (field2dInteger), pointer :: cellsOnEdgeField
+ type (field2dInteger), pointer :: cellsOnVertexField
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- type (field1dReal) :: xCellField, yCellField, zCellField
- type (field1dReal) :: xEdgeField, yEdgeField, zEdgeField
- type (field1dReal) :: xVertexField, yVertexField, zVertexField
-#endif
-#endif
+ type (field1dReal), pointer :: xCellField, yCellField, zCellField
+ type (field1dReal), pointer :: xEdgeField, yEdgeField, zEdgeField
+ type (field1dReal), pointer :: xVertexField, yVertexField, zVertexField
type (field1DChar) :: xtime
-
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
- integer, dimension(:,:), pointer :: cellsOnCell_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_2Halo
+ type (field1dInteger), pointer :: nCellsSolveField
+ type (field1dInteger), pointer :: nVerticesSolveField
+ type (field1dInteger), pointer :: nEdgesSolveField
- integer, dimension(:,:), pointer :: edgesOnCell_2Halo
- integer, dimension(:,:), pointer :: verticesOnCell_2Halo
+ type (field1DInteger), pointer :: indexToCellID_Block
+ type (field1DInteger), pointer :: nEdgesOnCell_Block
+ type (field2DInteger), pointer :: cellsOnCell_Block
+ type (field2DInteger), pointer :: verticesOnCell_Block
+ type (field2DInteger), pointer :: edgesOnCell_Block
- integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
- integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
+ type (field1DInteger), pointer :: indexToVertexID_Block
+ type (field2DInteger), pointer :: cellsOnVertex_Block
- integer, dimension(:,:), pointer :: cellIDSorted
- integer, dimension(:,:), pointer :: edgeIDSorted
- integer, dimension(:,:), pointer :: vertexIDSorted
+ type (field1DInteger), pointer :: indexToEdgeID_Block
+ type (field2DInteger), pointer :: cellsOnEdge_Block
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- real (kind=RKIND), dimension(:), pointer :: xCell, yCell, zCell
- real (kind=RKIND), dimension(:), pointer :: xEdge, yEdge, zEdge
- real (kind=RKIND), dimension(:), pointer :: xVertex, yVertex, zVertex
-#endif
-#endif
+ type (field1DReal), pointer :: xCell, yCell, zCell
+ type (field1DReal), pointer :: xEdge, yEdge, zEdge
+ type (field1DReal), pointer :: xVertex, yVertex, zVertex
- integer, dimension(:), pointer :: local_cell_list, local_edge_list, local_vertex_list
+ integer, dimension(:), pointer :: local_cell_list
integer, dimension(:), pointer :: block_id, block_start, block_count
- integer, dimension(:), pointer :: local_vertlevel_list, needed_vertlevel_list
- integer :: nlocal_edges, nlocal_vertices
- type (exchange_list), pointer :: send1Halo, recv1Halo
- type (exchange_list), pointer :: send2Halo, recv2Halo
type (graph) :: partial_global_graph_info
- type (graph) :: block_graph_0Halo, block_graph_1Halo, block_graph_2Halo
- integer :: ghostEdgeStart, ghostVertexStart
type (MPAS_Time_type) :: startTime
- type (MPAS_Time_type) :: sliceTime
- type (MPAS_TimeInterval_type) :: timeDiff
- type (MPAS_TimeInterval_type) :: minTimeDiff
character(len=StrKIND) :: timeStamp
character(len=StrKIND) :: filename
- integer, parameter :: nHalos = 2
- integer, dimension(nHalos+1) :: nCellsCumulative ! own cells, halo 1 cells, halo 2 cells
- integer, dimension(nHalos+2) :: nEdgesCumulative ! own edges, own cell's edges, halo 1 edges, halo 2 edges
- integer, dimension(nHalos+2) :: nVerticesCumulative ! own vertices, own cell's vertices, halo 1 vertices, halo 2 vertices
+ integer :: nHalos
- integer, dimension(nHalos) :: nCellsHalo ! halo 1 cells, halo 2 cells
- integer, dimension(nHalos+1) :: nEdgesHalo ! own cell's edges, halo 1 edges, halo 2 edges
- integer, dimension(nHalos+1) :: nVerticesHalo ! own cell's vertices, halo 1 vertices, halo 2 vertices
+ nHalos = config_num_halos
- integer, dimension(:), pointer :: tempIDs
- integer :: ntempIDs, offset
-
- integer :: nHalo, nOwnCells, nOwnEdges, nOwnVertices, cellCount, edgeCount, vertexCount, iEdge, iVertex
- type (hashtable) :: edgeHash, vertexHash
-
-
if (config_do_restart) then
+ ! this get followed by set is to ensure that the time is in standard format
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
+ call mpas_get_time(curr_time=startTime, dateTimeString=timeStamp)
- ! this get followed by set is to ensure that the time is in standard format
- call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
- call mpas_get_time(curr_time=startTime, dateTimeString=timeStamp)
+ call mpas_insert_string_suffix(trim(config_restart_name), timeStamp, filename)
- call mpas_insert_string_suffix(trim(config_restart_name), timeStamp, filename)
-
- input_obj % filename = trim(filename)
- input_obj % stream = STREAM_RESTART
+ input_obj % filename = trim(filename)
+ input_obj % stream = STREAM_RESTART
else
- input_obj % filename = trim(config_input_name)
- input_obj % stream = STREAM_INPUT
+ input_obj % filename = trim(config_input_name)
+ input_obj % stream = STREAM_INPUT
end if
inputHandle = MPAS_io_open(trim(input_obj % filename), MPAS_IO_READ, MPAS_IO_PNETCDF, ierr)
if (ierr /= MPAS_IO_NOERR) then
- write(0,*) ' '
- if (input_obj % stream == STREAM_RESTART) then
- write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_INPUT) then
- write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_SFC) then
- write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
- end if
- write(0,*) ' '
- call mpas_dmpar_abort(domain % dminfo)
+ write(0,*) ' '
+ if (input_obj % stream == STREAM_RESTART) then
+ write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_INPUT) then
+ write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_SFC) then
+ write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
+ end if
+ write(0,*) ' '
+ call mpas_dmpar_abort(domain % dminfo)
end if
-
!
! Read global number of cells/edges/vertices
@@ -182,256 +146,23 @@
call mpas_dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
nReadVertices = readVertexEnd - readVertexStart + 1
- readVertLevelStart = 1
- readVertLevelEnd = nVertLevels
- nReadVertLevels = nVertLevels
-
-
+ allocate(readingBlock)
+ readingBlock % domain => domain
+ readingBlock % blockID = domain % dminfo % my_proc_id
+ readingBlock % localBlockID = 0
+
!
! Allocate and read fields that we will need in order to ultimately work out
! which cells/edges/vertices are owned by each block, and which are ghost
!
- ! Global cell indices
- allocate(indexToCellIDField % ioinfo)
- indexToCellIDField % ioinfo % fieldName = 'indexToCellID'
- indexToCellIDField % ioinfo % start(1) = readCellStart
- indexToCellIDField % ioinfo % count(1) = nReadCells
- allocate(indexToCellIDField % array(nReadCells))
- allocate(readIndices(nReadCells))
- do i=1,nReadCells
- readIndices(i) = i + readCellStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Cell x-coordinates (in 3d Cartesian space)
- allocate(xCellField % ioinfo)
- xCellField % ioinfo % fieldName = 'xCell'
- xCellField % ioinfo % start(1) = readCellStart
- xCellField % ioinfo % count(1) = nReadCells
- allocate(xCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xCell', xCellField % array, ierr)
+ call mpas_io_setup_cell_block_fields(inputHandle, nreadCells, readCellStart, readingBlock, maxEdges, indexTocellIDField, xCellField, &
+ yCellField, zCellField, nEdgesOnCellField, cellsOnCellField, edgesOnCellField, verticesOnCellField)
- ! Cell y-coordinates (in 3d Cartesian space)
- allocate(yCellField % ioinfo)
- yCellField % ioinfo % fieldName = 'yCell'
- yCellField % ioinfo % start(1) = readCellStart
- yCellField % ioinfo % count(1) = nReadCells
- allocate(yCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yCell', yCellField % array, ierr)
+ call mpas_io_setup_edge_block_fields(inputHandle, nReadEdges, readEdgeStart, readingBlock, indexToEdgeIDField, xEdgeField, yEdgeField, zEdgeField, cellsOnEdgeField)
- ! Cell z-coordinates (in 3d Cartesian space)
- allocate(zCellField % ioinfo)
- zCellField % ioinfo % fieldName = 'zCell'
- zCellField % ioinfo % start(1) = readCellStart
- zCellField % ioinfo % count(1) = nReadCells
- allocate(zCellField % array(nReadCells))
- call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zCell', zCellField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
-
- ! Global edge indices
- allocate(indexToEdgeIDField % ioinfo)
- indexToEdgeIDField % ioinfo % fieldName = 'indexToEdgeID'
- indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
- indexToEdgeIDField % ioinfo % count(1) = nReadEdges
- allocate(indexToEdgeIDField % array(nReadEdges))
- allocate(indexToEdgeIDField % array(nReadEdges))
- allocate(readIndices(nReadEdges))
- do i=1,nReadEdges
- readIndices(i) = i + readEdgeStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Edge x-coordinates (in 3d Cartesian space)
- allocate(xEdgeField % ioinfo)
- xEdgeField % ioinfo % fieldName = 'xEdge'
- xEdgeField % ioinfo % start(1) = readEdgeStart
- xEdgeField % ioinfo % count(1) = nReadEdges
- allocate(xEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xEdge', xEdgeField % array, ierr)
-
- ! Edge y-coordinates (in 3d Cartesian space)
- allocate(yEdgeField % ioinfo)
- yEdgeField % ioinfo % fieldName = 'yEdge'
- yEdgeField % ioinfo % start(1) = readEdgeStart
- yEdgeField % ioinfo % count(1) = nReadEdges
- allocate(yEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yEdge', yEdgeField % array, ierr)
-
- ! Edge z-coordinates (in 3d Cartesian space)
- allocate(zEdgeField % ioinfo)
- zEdgeField % ioinfo % fieldName = 'zEdge'
- zEdgeField % ioinfo % start(1) = readEdgeStart
- zEdgeField % ioinfo % count(1) = nReadEdges
- allocate(zEdgeField % array(nReadEdges))
- call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zEdge', zEdgeField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
-
- ! Global vertex indices
- allocate(indexToVertexIDField % ioinfo)
- indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
- indexToVertexIDField % ioinfo % start(1) = readVertexStart
- indexToVertexIDField % ioinfo % count(1) = nReadVertices
- allocate(indexToVertexIDField % array(nReadVertices))
- allocate(readIndices(nReadVertices))
- do i=1,nReadVertices
- readIndices(i) = i + readVertexStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexIDField % array, ierr)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Vertex x-coordinates (in 3d Cartesian space)
- allocate(xVertexField % ioinfo)
- xVertexField % ioinfo % fieldName = 'xVertex'
- xVertexField % ioinfo % start(1) = readVertexStart
- xVertexField % ioinfo % count(1) = nReadVertices
- allocate(xVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'xVertex', xVertexField % array, ierr)
-
- ! Vertex y-coordinates (in 3d Cartesian space)
- allocate(yVertexField % ioinfo)
- yVertexField % ioinfo % fieldName = 'yVertex'
- yVertexField % ioinfo % start(1) = readVertexStart
- yVertexField % ioinfo % count(1) = nReadVertices
- allocate(yVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'yVertex', yVertexField % array, ierr)
-
- ! Vertex z-coordinates (in 3d Cartesian space)
- allocate(zVertexField % ioinfo)
- zVertexField % ioinfo % fieldName = 'zVertex'
- zVertexField % ioinfo % start(1) = readVertexStart
- zVertexField % ioinfo % count(1) = nReadVertices
- allocate(zVertexField % array(nReadVertices))
- call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'zVertex', zVertexField % array, ierr)
-#endif
-#endif
- deallocate(readIndices)
-
- ! Number of cell/edges/vertices adjacent to each cell
- allocate(nEdgesOnCellField % ioinfo)
- nEdgesOnCellField % ioinfo % fieldName = 'nEdgesOnCell'
- nEdgesOnCellField % ioinfo % start(1) = readCellStart
- nEdgesOnCellField % ioinfo % count(1) = nReadCells
- allocate(nEdgesOnCellField % array(nReadCells))
- allocate(readIndices(nReadCells))
- do i=1,nReadCells
- readIndices(i) = i + readCellStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCellField % array, ierr)
-
- ! Global indices of cells adjacent to each cell
- allocate(cellsOnCellField % ioinfo)
- cellsOnCellField % ioinfo % fieldName = 'cellsOnCell'
- cellsOnCellField % ioinfo % start(1) = 1
- cellsOnCellField % ioinfo % start(2) = readCellStart
- cellsOnCellField % ioinfo % count(1) = maxEdges
- cellsOnCellField % ioinfo % count(2) = nReadCells
- allocate(cellsOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCellField % array, ierr)
-
- ! Global indices of edges adjacent to each cell
- allocate(edgesOnCellField % ioinfo)
- edgesOnCellField % ioinfo % fieldName = 'edgesOnCell'
- edgesOnCellField % ioinfo % start(1) = 1
- edgesOnCellField % ioinfo % start(2) = readCellStart
- edgesOnCellField % ioinfo % count(1) = maxEdges
- edgesOnCellField % ioinfo % count(2) = nReadCells
- allocate(edgesOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCellField % array, ierr)
-
- ! Global indices of vertices adjacent to each cell
- allocate(verticesOnCellField % ioinfo)
- verticesOnCellField % ioinfo % fieldName = 'verticesOnCell'
- verticesOnCellField % ioinfo % start(1) = 1
- verticesOnCellField % ioinfo % start(2) = readCellStart
- verticesOnCellField % ioinfo % count(1) = maxEdges
- verticesOnCellField % ioinfo % count(2) = nReadCells
- allocate(verticesOnCellField % array(maxEdges,nReadCells))
- call MPAS_io_inq_var(inputHandle, 'verticesOnCell', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCellField % array, ierr)
- deallocate(readIndices)
-
- ! Global indices of cells adjacent to each edge
- ! used for determining which edges are owned by a block, where
- ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
- allocate(cellsOnEdgeField % ioinfo)
- cellsOnEdgeField % ioinfo % fieldName = 'cellsOnEdge'
- cellsOnEdgeField % ioinfo % start(1) = 1
- cellsOnEdgeField % ioinfo % start(2) = readEdgeStart
- cellsOnEdgeField % ioinfo % count(1) = 2
- cellsOnEdgeField % ioinfo % count(2) = nReadEdges
- allocate(cellsOnEdgeField % array(2,nReadEdges))
- allocate(readIndices(nReadEdges))
- do i=1,nReadEdges
- readIndices(i) = i + readEdgeStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'cellsOnEdge', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdgeField % array, ierr)
- deallocate(readIndices)
-
- ! Global indices of cells adjacent to each vertex
- ! used for determining which vertices are owned by a block, where
- ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
- allocate(cellsOnVertexField % ioinfo)
- cellsOnVertexField % ioinfo % fieldName = 'cellsOnVertex'
- cellsOnVertexField % ioinfo % start(1) = 1
- cellsOnVertexField % ioinfo % start(2) = readVertexStart
- cellsOnVertexField % ioinfo % count(1) = vertexDegree
- cellsOnVertexField % ioinfo % count(2) = nReadVertices
- allocate(cellsOnVertexField % array(vertexDegree,nReadVertices))
- allocate(readIndices(nReadVertices))
- do i=1,nReadVertices
- readIndices(i) = i + readVertexStart - 1
- end do
- call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
- call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertexField % array, ierr)
- deallocate(readIndices)
-
-
+ call mpas_io_setup_vertex_block_fields(inputHandle, nReadVertices, readVertexStart, readingBlock, vertexDegree, indexToVertexIDField, &
+ xVertexField, yVertexField, zVertexField, cellsOnVertexField)
!
! Set up a graph derived data type describing the connectivity for the cells
! that were read by this process
@@ -450,7 +181,6 @@
partial_global_graph_info % vertexID(:) = indexToCellIDField % array(:)
partial_global_graph_info % nAdjacent(:) = nEdgesOnCellField % array(:)
partial_global_graph_info % adjacencyList(:,:) = cellsOnCellField % array(:,:)
-
! TODO: Ensure (by renaming or exchanging) that initial cell range on each proc is contiguous
! This situation may occur when reading a restart file with cells/edges/vertices written
@@ -463,508 +193,78 @@
deallocate(partial_global_graph_info % vertexID)
deallocate(partial_global_graph_info % nAdjacent)
deallocate(partial_global_graph_info % adjacencyList)
-
-
- allocate(indexToCellID_0Halo(size(local_cell_list)))
- allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
- allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xCell(size(local_cell_list)))
- allocate(yCell(size(local_cell_list)))
- allocate(zCell(size(local_cell_list)))
-#endif
-#endif
-
- !
- ! Now that each process has a list of cells that it owns, exchange cell connectivity
- ! information between the processes that read info for a cell and those that own that cell
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- indexToCellIDField % array, local_cell_list, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
- size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
- size(xCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
- size(yCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
- size(zCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-#endif
-#endif
+ call mpas_block_creator_setup_blocks_and_0halo_cells(domain, indexToCellID_Block, local_cell_list, block_id, block_start, block_count)
+ call mpas_block_creator_build_0halo_cell_fields(indexToCellIDField, nEdgesOnCellField, cellsOnCellField, verticesOnCellField, edgesOnCellField, indexToCellID_Block, nEdgesOnCell_Block, cellsOnCell_Block, verticesOnCell_Block, edgesOnCell_Block)
- deallocate(sendCellList % list)
- deallocate(sendCellList)
- deallocate(recvCellList % list)
- deallocate(recvCellList)
+ call mpas_block_creator_build_0_and_1halo_edge_fields(indexToEdgeIDField, cellsOnEdgeField, indexToCellID_Block, nEdgesOnCell_Block, edgesOnCell_Block, indexToEdgeID_Block, cellsOnEdge_Block, nEdgesSolveField)
+ call mpas_block_creator_build_0_and_1halo_edge_fields(indexToVertexIDField, cellsOnVertexField, indexToCellID_Block, nEdgesOnCell_Block, verticesOnCell_Block, indexToVertexID_Block, cellsOnVertex_Block, nVerticesSolveField)
+ call mpas_block_creator_build_cell_halos(indexToCellID_Block, nEdgesOnCell_Block, cellsOnCell_Block, verticesOnCell_Block, edgesOnCell_Block, nCellsSolveField)
+ call mpas_block_creator_build_edge_halos(indexToCellID_Block, nEdgesOnCell_Block, nCellsSolveField, edgesOnCell_Block, indexToEdgeID_Block, cellsOnEdge_Block, nEdgesSolveField)
+ call mpas_block_creator_build_edge_halos(indexToCellID_Block, nEdgesOnCell_Block, nCellsSolveField, verticesOnCell_Block, indexToVertexID_Block, cellsOnVertex_Block, nVerticesSolveField)
- !
- ! Build a graph of cell connectivity based on cells owned by this process
- !
- block_graph_0Halo % nVerticesTotal = size(local_cell_list)
- block_graph_0Halo % nVertices = size(local_cell_list)
- block_graph_0Halo % maxDegree = maxEdges
- block_graph_0Halo % ghostStart = size(local_cell_list) + 1
- allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
- allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
- allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
-
- block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
- block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
- block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
-
- ! Get back a graph describing the owned cells plus the cells in the 1-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
-
-
- !
- ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
-
- !
- ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
- !
- block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
- block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
-
- ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
-
- block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
- block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
- nOwnCells = block_graph_2Halo % nVertices
+ ! Allocate blocks, and copy indexTo arrays into blocks
+ call mpas_block_creator_finalize_block_init(domain % blocklist, &
+#include "dim_dummy_args.inc"
+ , nCellsSolveField, nEdgesSolveField, nVerticesSolveField, indexToCellID_Block, indexToEdgeID_Block, indexToVertexID_Block)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- !! For now, only use Zoltan with MPI
- !! Zoltan initialization
- call mpas_zoltan_start()
- !! Zoltan hook for cells
- call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
-#endif
-#endif
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
-
-
- !
- ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
- ! on each cell and which vertices are on each cell from the processes that read these
- ! fields for each cell to the processes that own the cells
- !
- allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
- allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
- allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
- indexToCellIDField % array, block_graph_2Halo % vertexID, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
-
- !
- ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
- !
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- edgesOnCell_2Halo, nlocal_edges, local_edge_list)
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-
-
- !
- ! Work out which edges and vertices are owned by this process, and which are ghost
- !
- allocate(cellsOnEdge_2Halo(2,nlocal_edges))
- allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
- 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
- vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
- sendVertexList, recvVertexList)
-
-
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
-
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
-
- !------- set owned and halo cell indices -------!
-
- nCellsCumulative(1) = nOwnCells
- nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
- nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
-
- !------- determin the perimeter and owned edges of own cells and halos -------!
-
- nOwnEdges = ghostEdgeStart-1
- nOwnVertices = ghostVertexStart-1
-
- ! skip the own edges found at the beginning of local_edge_list
- call mpas_hash_init(edgeHash)
- do i=1,nOwnEdges
- call mpas_hash_insert(edgeHash, local_edge_list(i))
- end do
-
- ! skip the own vertices found at the beginning of local_vertex_list
- call mpas_hash_init(vertexHash)
- do i=1,nOwnVertices
- call mpas_hash_insert(vertexHash, local_vertex_list(i))
- end do
-
- cellCount = 1 !tracks the index of the local cell array
- edgeCount = nOwnEdges !tracks where to insert the next local edge
- vertexCount = nOwnVertices !tracks where to insert the next local vertex
-
- nEdgesCumulative(1) = nOwnEdges
- nVerticesCumulative(1) = nOwnVertices
-
- !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
- do i = 1, nHalos + 1 ! for the own cells and each halo...
- do j = cellCount, nCellsCumulative(i)
-
- ! the number of edges on a cell is same to the number of vertices, and therefore
- ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
- do k = 1, nEdgesOnCell_2Halo(j)
- iEdge = edgesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(edgeHash, iEdge)) then
- edgeCount = edgeCount + 1
- local_edge_list(edgeCount) = iEdge
- call mpas_hash_insert(edgeHash, iEdge)
- end if
-
- iVertex = verticesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(vertexHash, iVertex)) then
- vertexCount = vertexCount + 1
- local_vertex_list(vertexCount) = iVertex
- call mpas_hash_insert(vertexHash, iVertex)
- end if
- end do
-
- end do
-
- cellCount = nCellsCumulative(i) + 1
- nEdgesCumulative(i+1) = edgeCount
- nVerticesCumulative(i+1) = vertexCount
- end do
-
- do i = 1, nHalos
- nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
- end do
-
- do i = 1, nHalos + 1
- nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
- end do
-
- do i = 1, nHalos + 1
- nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
- end do
-
- call mpas_hash_destroy(edgeHash)
- call mpas_hash_destroy(vertexHash)
-
-
- ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
- ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
-
- ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
- ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
-
- ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
- ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
- ! contains all of the ghost cells
-
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xEdge(nlocal_edges))
- allocate(yEdge(nlocal_edges))
- allocate(zEdge(nlocal_edges))
- allocate(xVertex(nlocal_vertices))
- allocate(yVertex(nlocal_vertices))
- allocate(zVertex(nlocal_vertices))
-#endif
-#endif
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
- size(xEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
- size(yEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
- size(zEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
- size(xVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
- size(yVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
- size(zVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- !!!!!!!!!!!!!!!!!!
- !! Reorder edges
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
- !!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!
- !! Reorder vertices
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
- !!!!!!!!!!!!!!!!!!
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#endif
-#endif
-
- !
- ! Build ownership and exchange lists for vertical levels
- ! Essentially, process 0 owns all vertical levels when reading and writing,
- ! and it distributes them or gathers them to/from all other processes
- !
- if (domain % dminfo % my_proc_id == 0) then
- allocate(local_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- local_vertlevel_list(i) = i
- end do
- else
- allocate(local_vertlevel_list(0))
- end if
- allocate(needed_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- needed_vertlevel_list(i) = i
- end do
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(local_vertlevel_list), size(needed_vertlevel_list), &
- local_vertlevel_list, needed_vertlevel_list, &
- sendVertLevelList, recvVertLevelList)
-
- deallocate(local_vertlevel_list)
- deallocate(needed_vertlevel_list)
-
-
- !
- ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
- !
- allocate(domain % blocklist)
-
- nCells = block_graph_2Halo % nVerticesTotal
- nEdges = nlocal_edges
- nVertices = nlocal_vertices
-
- call mpas_allocate_block(domain % blocklist, domain, domain%dminfo%my_proc_id, &
-#include "dim_dummy_args.inc"
- )
-
-!!!!!!!!!!MGD HERE WE NEED TO READ IN indexTo*ID fields !!!!!!!!!!!!!!!!!
- call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', local_cell_list(1:nOwnCells), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToCellID', domain % blocklist % mesh % indexToCellID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', local_edge_list(1:nOwnEdges), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToEdgeID', domain % blocklist % mesh % indexToEdgeID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', local_vertex_list(1:nOwnVertices), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToVertexID', domain % blocklist % mesh % indexToVertexID % array, ierr)
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = nOwnVertices
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
call mpas_io_input_init(input_obj, domain % blocklist, domain % dminfo)
-
- !
- ! Read attributes
- !
call MPAS_readStreamAtt(input_obj % io_stream, 'sphere_radius', r_sphere_radius, ierr)
if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
- write(0,*) ' Setting sphere_radius to 1.0'
- domain % blocklist % mesh % sphere_radius = 1.0
+ write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting sphere_radius to 1.0'
+ domain % blocklist % mesh % sphere_radius = 1.0
else
- domain % blocklist % mesh % sphere_radius = r_sphere_radius
+ domain % blocklist % mesh % sphere_radius = r_sphere_radius
end if
call MPAS_readStreamAtt(input_obj % io_stream, 'on_a_sphere', c_on_a_sphere, ierr)
if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
- write(0,*) ' Setting on_a_sphere to ''YES'''
- domain % blocklist % mesh % on_a_sphere = .true.
+ write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
+ write(0,*) ' Setting on_a_sphere to ''YES'''
+ domain % blocklist % mesh % on_a_sphere = .true.
else
- if (index(c_on_a_sphere, 'YES') /= 0) then
- domain % blocklist % mesh % on_a_sphere = .true.
- else
- domain % blocklist % mesh % on_a_sphere = .false.
- end if
+ if (index(c_on_a_sphere, 'YES') /= 0) then
+ domain % blocklist % mesh % on_a_sphere = .true.
+ else
+ domain % blocklist % mesh % on_a_sphere = .false.
+ end if
end if
+ block_ptr => domain % blocklist % next
+ do while (associated(block_ptr))
+ block_ptr % mesh % sphere_radius = domain % blocklist % mesh % sphere_radius
+ block_ptr % mesh % on_a_sphere = domain % blocklist % mesh % on_a_sphere
+
+ ! Link the sendList and recvList pointers in each field type to the appropriate lists
+ ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
+ ! link blocks of fields to eachother
+ call mpas_create_field_links(block_ptr)
+
+ block_ptr => block_ptr % next
+ end do
+
if (.not. config_do_restart) then
- input_obj % time = 1
+ input_obj % time = 1
else
- !
- ! If doing a restart, we need to decide which time slice to read from the
- ! restart file
- !
- input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
- if (ierr == MPAS_IO_ERR) then
- write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
- call mpas_dmpar_abort(domain % dminfo)
- end if
-write(0,*) 'MGD DEBUGGING time = ', input_obj % time
- write(0,*) 'Restarting model from time ', trim(timeStamp)
-
+ !
+ ! If doing a restart, we need to decide which time slice to read from the
+ ! restart file
+ !
+ input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+ if (ierr == MPAS_IO_ERR) then
+ write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
+ call mpas_dmpar_abort(domain % dminfo)
+ end if
+!write(0,*) 'MGD DEBUGGING time = ', input_obj % time
+ write(0,*) 'Restarting model from time ', timeStamp
end if
-
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Do the actual work of reading all fields in from the input or restart file
! For each field:
@@ -973,7 +273,7 @@
! process
! 2) All processes then send the global indices that were read to the
! processes that own those indices based on
- ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
+ ! {send,recv}{Cell,Edge,Vertex}List
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
call mpas_read_and_distribute_fields(input_obj)
@@ -981,292 +281,208 @@
call MPAS_io_close(inputHandle, ierr)
-
!
- ! Work out halo exchange lists for cells, edges, and vertices
- ! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
- ! the list, since Fortran does not allow arrays of pointers
- !
-
- !--------- Create Cell Exchange Lists ---------!
-
- ! pass in neededList of ownedCells and halo layer 1 cells
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nCellsCumulative(2), &
- block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
- domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
-
- ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
- offset = nCellsHalo(1)
- nTempIDs = nOwnCells + nCellsHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
- tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nTempIDs, &
- block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
- domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Edge Exchange Lists ---------!
-
- ! pass in neededList of ownedEdges and ownedCell perimeter edges
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nEdgesCumulative(2), &
- local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
- domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
- offset = nEdgesHalo(1)
- nTempIDs = nOwnEdges + nEdgesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
- offset = nEdgesHalo(1) + nEdgesHalo(2)
- nTempIDs = nOwnEdges + nEdgesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Vertex Exchange Lists ---------!
-
-
- ! pass in neededList of ownedVertices and ownedCell perimeter vertices
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nVerticesCumulative(2), &
- local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
- domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
- offset = nVerticesHalo(1)
- nTempIDs = nOwnVertices + nVerticesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
- offset = nVerticesHalo(1) + nVerticesHalo(2)
- nTempIDs = nOwnVertices + nVerticesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
- ! Link the sendList and recvList pointers in each field type to the appropriate lists
- ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
- ! link blocks of fields to eachother
- call mpas_create_field_links(domain % blocklist)
-
-
- !
! Exchange halos for all of the fields that were read from the input file
!
call mpas_exch_input_field_halos(domain, input_obj)
-
- !
- ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
- !
- allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
- allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
- allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
+ call mpas_block_creator_reindex_block_fields(domain % blocklist)
- do i=1,domain % blocklist % mesh % nCells
- cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
- cellIDSorted(2,i) = i
- end do
- call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToCellIDField % copyList)
- do i=1,domain % blocklist % mesh % nEdges
- edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
- edgeIDSorted(2,i) = i
- end do
- call quicksort(nlocal_edges, edgeIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToEdgeIDField % copyList)
- do i=1,domain % blocklist % mesh % nVertices
- vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
- vertexIDSorted(2,i) = i
- end do
- call quicksort(nlocal_vertices, vertexIDSorted)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % sendList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % recvList)
+ call mpas_dmpar_destroy_mulithalo_exchange_list(indexToVertexIDField % copyList)
+ call mpas_deallocate_field(indexToCellIDField)
+ call mpas_deallocate_field(indexToEdgeIDField)
+ call mpas_deallocate_field(indexToVertexIDField)
+ call mpas_deallocate_field(cellsOnCellField)
- do i=1,domain % blocklist % mesh % nCells
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
+ call mpas_deallocate_field(edgesOnCellField)
+ call mpas_deallocate_field(verticesOnCellField)
+ call mpas_deallocate_field(cellsOnEdgeField)
+ call mpas_deallocate_field(cellsOnVertexField)
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
+ call mpas_deallocate_field(indexToCellID_Block)
+ call mpas_deallocate_field(nEdgesOnCell_Block)
+ call mpas_deallocate_field(cellsOnCell_Block)
+ call mpas_deallocate_field(verticesOnCell_Block)
+ call mpas_deallocate_field(edgesOnCell_Block)
+ call mpas_deallocate_field(indexToVertexID_Block)
+ call mpas_deallocate_field(cellsOnVertex_Block)
+ call mpas_deallocate_field(indexToEdgeID_Block)
+ call mpas_deallocate_field(cellsOnEdge_Block)
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
+ call mpas_deallocate_field(nCellsSolveField)
+ call mpas_deallocate_field(nVerticesSolveField)
+ call mpas_deallocate_field(nEdgesSolveField)
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
+#ifdef HAVE_ZOLTAN
+ call mpas_deallocate_field(xCellField)
+ call mpas_deallocate_field(yCellField)
+ call mpas_deallocate_field(zCellField)
+ call mpas_deallocate_field(xVertexField)
+ call mpas_deallocate_field(yVertexField)
+ call mpas_deallocate_field(zVertexField)
+ call mpas_deallocate_field(xEdgeField)
+ call mpas_deallocate_field(yEdgeField)
+ call mpas_deallocate_field(zEdgeField)
- end do
- end do
+ call mpas_deallocate_field(xCell)
+ call mpas_deallocate_field(yCell)
+ call mpas_deallocate_field(zCell)
+ call mpas_deallocate_field(xVertex)
+ call mpas_deallocate_field(yVertex)
+ call mpas_deallocate_field(zVertex)
+ call mpas_deallocate_field(xEdge)
+ call mpas_deallocate_field(yEdge)
+ call mpas_deallocate_field(zEdge)
+#endif
- do i=1,domain % blocklist % mesh % nEdges
- do j=1,2
+ deallocate(local_cell_list)
+ deallocate(block_id)
+ deallocate(block_start)
+ deallocate(block_count)
+ deallocate(readingBlock)
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! allocate(xCell(size(local_cell_list)))
+! allocate(yCell(size(local_cell_list)))
+! allocate(zCell(size(local_cell_list)))
+! call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
+! size(xCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
+! size(yCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
+! size(zCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!#endif
+!#endif
+
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! !! For now, only use Zoltan with MPI
+! !! Zoltan initialization
+! call mpas_zoltan_start()
+!
+! !! Zoltan hook for cells
+! call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
+!#endif
+!#endif
+!
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! allocate(xEdge(nlocal_edges))
+! allocate(yEdge(nlocal_edges))
+! allocate(zEdge(nlocal_edges))
+! allocate(xVertex(nlocal_vertices))
+! allocate(yVertex(nlocal_vertices))
+! allocate(zVertex(nlocal_vertices))
+!#endif
+!#endif
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
+! size(xEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
+! size(yEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
+! size(zEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
+! size(xVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
+! size(yVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
+! size(zVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder edges
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
+! !!!!!!!!!!!!!!!!!!
+!
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder vertices
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
+! !!!!!!!!!!!!!!!!!!
+!
+! deallocate(sendEdgeList % list)
+! deallocate(sendEdgeList)
+! deallocate(recvEdgeList % list)
+! deallocate(recvEdgeList)
+!
+! deallocate(sendVertexList % list)
+! deallocate(sendVertexList)
+! deallocate(recvVertexList % list)
+! deallocate(recvVertexList)
+!
+! !
+! ! Knowing which edges/vertices are owned by this block and which are actually read
+! ! from the input or restart file, we can build exchange lists to perform
+! ! all-to-all field exchanges from process that reads a field to the processes that
+! ! need them
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToEdgeIDField % array), nlocal_edges, &
+! indexToEdgeIDField % array, local_edge_list, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToVertexIDField % array), nlocal_vertices, &
+! indexToVertexIDField % array, local_vertex_list, &
+! sendVertexList, recvVertexList)
+!
+!#endif
+!#endif
+!
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
- end do
+! !
+! ! Deallocate fields, graphs, and other memory
+! !
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! deallocate(xCellField % ioinfo)
+! deallocate(xCellField % array)
+! deallocate(yCellField % ioinfo)
+! deallocate(yCellField % array)
+! deallocate(zCellField % ioinfo)
+! deallocate(zCellField % array)
+!#endif
+!#endif
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! deallocate(xCell)
+! deallocate(yCell)
+! deallocate(zCell)
+!#endif
+!#endif
+ end subroutine mpas_input_state_for_domain!}}}
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nVertices
- do j=1,vertexDegree
-
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
-
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
- deallocate(cellIDSorted)
- deallocate(edgeIDSorted)
- deallocate(vertexIDSorted)
-
-
- !
- ! Deallocate fields, graphs, and other memory
- !
- deallocate(indexToCellIDField % ioinfo)
- deallocate(indexToCellIDField % array)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCellField % ioinfo)
- deallocate(xCellField % array)
- deallocate(yCellField % ioinfo)
- deallocate(yCellField % array)
- deallocate(zCellField % ioinfo)
- deallocate(zCellField % array)
-#endif
-#endif
- deallocate(indexToEdgeIDField % ioinfo)
- deallocate(indexToEdgeIDField % array)
- deallocate(indexToVertexIDField % ioinfo)
- deallocate(indexToVertexIDField % array)
- deallocate(cellsOnCellField % ioinfo)
- deallocate(cellsOnCellField % array)
- deallocate(edgesOnCellField % ioinfo)
- deallocate(edgesOnCellField % array)
- deallocate(verticesOnCellField % ioinfo)
- deallocate(verticesOnCellField % array)
- deallocate(cellsOnEdgeField % ioinfo)
- deallocate(cellsOnEdgeField % array)
- deallocate(cellsOnVertexField % ioinfo)
- deallocate(cellsOnVertexField % array)
- deallocate(cellsOnCell_0Halo)
- deallocate(nEdgesOnCell_0Halo)
- deallocate(indexToCellID_0Halo)
- deallocate(cellsOnEdge_2Halo)
- deallocate(cellsOnVertex_2Halo)
- deallocate(nEdgesOnCell_2Halo)
- deallocate(edgesOnCell_2Halo)
- deallocate(verticesOnCell_2Halo)
- deallocate(block_graph_0Halo % vertexID)
- deallocate(block_graph_0Halo % nAdjacent)
- deallocate(block_graph_0Halo % adjacencyList)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCell)
- deallocate(yCell)
- deallocate(zCell)
-#endif
-#endif
- end subroutine mpas_input_state_for_domain
-
-
!CR:TODO: an identical subroutine is found in module_io_output - merge
- subroutine mpas_insert_string_suffix(stream, suffix, filename)
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)!{{{
implicit none
@@ -1289,10 +505,9 @@
if (filename(i:i) == ':') filename(i:i) = '.'
end do
- end subroutine mpas_insert_string_suffix
+ end subroutine mpas_insert_string_suffix!}}}
-
- subroutine mpas_read_and_distribute_fields(input_obj)
+ subroutine mpas_read_and_distribute_fields(input_obj)!{{{
implicit none
@@ -1304,11 +519,9 @@
call MPAS_readStream(input_obj % io_stream, 1, ierr)
- end subroutine mpas_read_and_distribute_fields
+ end subroutine mpas_read_and_distribute_fields!}}}
-
-
- subroutine mpas_io_input_init(input_obj, blocklist, dminfo)
+ subroutine mpas_io_input_init(input_obj, blocklist, dminfo)!{{{
implicit none
@@ -1334,10 +547,9 @@
#include "add_input_fields.inc"
- end subroutine mpas_io_input_init
+ end subroutine mpas_io_input_init!}}}
-
- subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)
+ subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)!{{{
implicit none
@@ -1347,10 +559,9 @@
!include "get_dimension_by_name.inc"
- end subroutine mpas_io_input_get_dimension
+ end subroutine mpas_io_input_get_dimension!}}}
-
- subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)!{{{
implicit none
@@ -1360,10 +571,9 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_real
+ end subroutine mpas_io_input_get_att_real!}}}
-
- subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)!{{{
implicit none
@@ -1373,11 +583,10 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_text
+ end subroutine mpas_io_input_get_att_text!}}}
+ subroutine mpas_exch_input_field_halos(domain, input_obj)!{{{
- subroutine mpas_exch_input_field_halos(domain, input_obj)
-
implicit none
type (domain_type), intent(inout) :: domain
@@ -1385,10 +594,11 @@
#include "exchange_input_field_halos.inc"
- end subroutine mpas_exch_input_field_halos
+#include "non_decomp_copy_input_fields.inc"
+ end subroutine mpas_exch_input_field_halos!}}}
- subroutine mpas_io_input_finalize(input_obj, dminfo)
+ subroutine mpas_io_input_finalize(input_obj, dminfo)!{{{
implicit none
@@ -1399,6 +609,435 @@
call MPAS_closeStream(input_obj % io_stream, nferr)
- end subroutine mpas_io_input_finalize
+ end subroutine mpas_io_input_finalize!}}}
+
+ subroutine mpas_io_setup_cell_block_fields(inputHandle, nReadCells, readCellStart, readingBlock, maxEdges, indexToCellID, xCell, yCell, zCell, nEdgesOnCell, cellsOnCell, edgesOnCell, verticesOnCell)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadCells
+ integer, intent(in) :: readCellStart
+ integer, intent(in) :: maxEdges
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToCellID
+ type (field1dReal), pointer :: xCell
+ type (field1dReal), pointer :: yCell
+ type (field1dReal), pointer :: zCell
+ type (field1dInteger), pointer :: nEdgesOnCell
+ type (field2dInteger), pointer :: cellsOnCell
+ type (field2dInteger), pointer :: edgesOnCell
+ type (field2dInteger), pointer :: verticesOnCell
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ !
+ ! Allocate and read fields that we will need in order to ultimately work out
+ ! which cells/edges/vertices are owned by each block, and which are ghost
+ !
+
+ ! Global cell indices
+ allocate(indexToCellID)
+ allocate(indexToCellID % ioinfo)
+ indexToCellID % ioinfo % fieldName = 'indexToCellID'
+ indexToCellID % ioinfo % start(1) = readCellStart
+ indexToCellID % ioinfo % count(1) = nReadCells
+ allocate(indexToCellID % array(nReadCells))
+ allocate(readIndices(nReadCells))
+ do i=1,nReadCells
+ readIndices(i) = i + readCellStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellID % array, ierr)
+ indexToCellID % dimSizes(1) = nReadCells
+ indexToCellID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % sendList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % recvList, nHalos)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToCellID % copyList, nHalos)
+ nullify(indexToCellID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Cell x-coordinates (in 3d Cartesian space)
+ allocate(xCell)
+ allocate(xCell % ioinfo)
+ xCell % ioinfo % fieldName = 'xCell'
+ xCell % ioinfo % start(1) = readCellStart
+ xCell % ioinfo % count(1) = nReadCells
+ allocate(xCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xCell', xCell % array, ierr)
+ xCell % dimSizes(1) = nReadCells
+ xCell % block => readingBlock
+ xCell % sendList => indexToCellID % sendList
+ xCell % recvList => indexToCellID % recvList
+ xCell % copyList => indexToCellID % copyList
+ nullify(xCell % next)
+
+ ! Cell y-coordinates (in 3d Cartesian space)
+ allocate(yCell)
+ allocate(yCell % ioinfo)
+ yCell % ioinfo % fieldName = 'yCell'
+ yCell % ioinfo % start(1) = readCellStart
+ yCell % ioinfo % count(1) = nReadCells
+ allocate(yCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yCell', yCell % array, ierr)
+ yCell % sendList => indexToCellID % sendList
+ yCell % recvList => indexToCellID % recvList
+ yCell % copyList => indexToCellID % copyList
+ yCell % dimSizes(1) = nReadCells
+ yCell % block => readingBlock
+ nullify(yCell % next)
+
+ ! Cell z-coordinates (in 3d Cartesian space)
+ allocate(zCell)
+ allocate(zCell % ioinfo)
+ zCell % ioinfo % fieldName = 'zCell'
+ zCell % ioinfo % start(1) = readCellStart
+ zCell % ioinfo % count(1) = nReadCells
+ allocate(zCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zCell', zCell % array, ierr)
+ zCell % dimSizes(1) = nReadCells
+ zCell % block => readingBlock
+ zCell % sendList => indexToCellID % sendList
+ zCell % recvList => indexToCellID % recvList
+ zCell % copyList => indexToCellID % copyList
+ nullify(zCell % next)
+#endif
+#endif
+
+ ! Number of cell/edges/vertices adjacent to each cell
+ allocate(nEdgesOnCell)
+ allocate(nEdgesOnCell % ioinfo)
+ nEdgesOnCell % ioinfo % fieldName = 'nEdgesOnCell'
+ nEdgesOnCell % ioinfo % start(1) = readCellStart
+ nEdgesOnCell % ioinfo % count(1) = nReadCells
+ allocate(nEdgesOnCell % array(nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCell % array, ierr)
+ nEdgesOnCell % dimSizes(1) = nReadCells
+ nEdgesOnCell % block => readingBlock
+ nEdgesOnCell % sendList => indexToCellID % sendList
+ nEdgesOnCell % recvList => indexToCellID % recvList
+ nEdgesOnCell % copyList => indexToCellID % copyList
+ nullify(nEdgesOnCell % next)
+
+ ! Global indices of cells adjacent to each cell
+ allocate(cellsOnCell)
+ allocate(cellsOnCell % ioinfo)
+ cellsOnCell % ioinfo % fieldName = 'cellsOnCell'
+ cellsOnCell % ioinfo % start(1) = 1
+ cellsOnCell % ioinfo % start(2) = readCellStart
+ cellsOnCell % ioinfo % count(1) = maxEdges
+ cellsOnCell % ioinfo % count(2) = nReadCells
+ allocate(cellsOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCell % array, ierr)
+ cellsOnCell % dimSizes(1) = maxEdges
+ cellsOnCell % dimSizes(2) = nReadCells
+ cellsOnCell % block => readingBlock
+ cellsOnCell % sendList => indexToCellID % sendList
+ cellsOnCell % recvList => indexToCellID % recvList
+ cellsOnCell % copyList => indexToCellID % copyList
+ nullify(cellsOnCell % next)
+
+ ! Global indices of edges adjacent to each cell
+ allocate(edgesOnCell)
+ allocate(edgesOnCell % ioinfo)
+ edgesOnCell % ioinfo % fieldName = 'edgesOnCell'
+ edgesOnCell % ioinfo % start(1) = 1
+ edgesOnCell % ioinfo % start(2) = readCellStart
+ edgesOnCell % ioinfo % count(1) = maxEdges
+ edgesOnCell % ioinfo % count(2) = nReadCells
+ allocate(edgesOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCell % array, ierr)
+ edgesOnCell % dimSizes(1) = maxEdges
+ edgesOnCell % dimSizes(2) = nReadCells
+ edgesOnCell % block => readingBlock
+ edgesOnCell % sendList => indexToCellID % sendList
+ edgesOnCell % recvList => indexToCellID % recvList
+ edgesOnCell % copyList => indexToCellID % copyList
+ nullify(edgesOnCell % next)
+
+ ! Global indices of vertices adjacent to each cell
+ allocate(verticesOnCell)
+ allocate(verticesOnCell % ioinfo)
+ verticesOnCell % ioinfo % fieldName = 'verticesOnCell'
+ verticesOnCell % ioinfo % start(1) = 1
+ verticesOnCell % ioinfo % start(2) = readCellStart
+ verticesOnCell % ioinfo % count(1) = maxEdges
+ verticesOnCell % ioinfo % count(2) = nReadCells
+ allocate(verticesOnCell % array(maxEdges,nReadCells))
+ call MPAS_io_inq_var(inputHandle, 'verticesOnCell', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCell % array, ierr)
+ verticesOnCell % dimSizes(1) = maxEdges
+ verticesOnCell % dimSizes(2) = nReadCells
+ verticesOnCell % block => readingBlock
+ verticesOnCell % sendList => indexToCellID % sendList
+ verticesOnCell % recvList => indexToCellID % recvList
+ verticesOnCell % copyList => indexToCellID % copyList
+ nullify(verticesOnCell % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_cell_block_fields!}}}
+
+ subroutine mpas_io_setup_edge_block_fields(inputHandle, nReadEdges, readEdgeStart, readingBlock, indexToEdgeID, xEdge, yEdge, zEdge, cellsOnEdge)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadEdges
+ integer, intent(in) :: readEdgeStart
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToEdgeID
+ type (field1dReal), pointer :: xEdge
+ type (field1dReal), pointer :: yEdge
+ type (field1dReal), pointer :: zEdge
+ type (field2dInteger), pointer :: cellsOnEdge
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ !
+ ! Allocate and read fields that we will need in order to ultimately work out
+ ! which cells/edges/vertices are owned by each block, and which are ghost
+ !
+
+ allocate(readIndices(nReadEdges))
+
+ ! Global edge indices
+ allocate(indexToEdgeID)
+ allocate(indexToEdgeID % ioinfo)
+ indexToEdgeID % ioinfo % fieldName = 'indexToEdgeID'
+ indexToEdgeID % ioinfo % start(1) = readEdgeStart
+ indexToEdgeID % ioinfo % count(1) = nReadEdges
+ allocate(indexToEdgeID % array(nReadEdges))
+ allocate(indexToEdgeID % array(nReadEdges))
+ do i=1,nReadEdges
+ readIndices(i) = i + readEdgeStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeID % array, ierr)
+ indexToEdgeID % dimSizes(1) = nREadEdges
+ indexToEdgeID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToEdgeID % copyList, nHalos+1)
+ nullify(indexToEdgeID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Edge x-coordinates (in 3d Cartesian space)
+ allocate(xEdge)
+ allocate(xEdge % ioinfo)
+ xEdge % ioinfo % fieldName = 'xEdge'
+ xEdge % ioinfo % start(1) = readEdgeStart
+ xEdge % ioinfo % count(1) = nReadEdges
+ allocate(xEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xEdge', xEdge % array, ierr)
+ xEdge % dimSizes(1) = nReadEdges
+ xEdge % block => readingBlock
+ xEdge % sendList => indexToEdgeID % sendList
+ xEdge % recvList => indexToEdgeID % recvList
+ xEdge % copyList => indexToEdgeID % copyList
+ nullify(xEdge % next)
+
+ ! Edge y-coordinates (in 3d Cartesian space)
+ allocate(yEdge)
+ allocate(yEdge % ioinfo)
+ yEdge % ioinfo % fieldName = 'yEdge'
+ yEdge % ioinfo % start(1) = readEdgeStart
+ yEdge % ioinfo % count(1) = nReadEdges
+ allocate(yEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yEdge', yEdge % array, ierr)
+ yEdge % dimSizes(1) = nReadEdges
+ yEdge % block => readingBlock
+ yEdge % sendList => indexToEdgeID % sendList
+ yEdge % recvList => indexToEdgeID % recvList
+ yEdge % copyList => indexToEdgeID % copyList
+ nullify(yEdge % next)
+
+ ! Edge z-coordinates (in 3d Cartesian space)
+ allocate(zEdge)
+ allocate(zEdge % ioinfo)
+ zEdge % ioinfo % fieldName = 'zEdge'
+ zEdge % ioinfo % start(1) = readEdgeStart
+ zEdge % ioinfo % count(1) = nReadEdges
+ allocate(zEdge % array(nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zEdge', zEdge % array, ierr)
+ zEdge % dimSizes(1) = nReadEdges
+ zEdge % block => readingBlock
+ zEdge % sendList => indexToEdgeID % sendList
+ zEdge % recvList => indexToEdgeID % recvList
+ zEdge % copyList => indexToEdgeID % copyList
+ nullify(zEdge % next)
+#endif
+#endif
+
+
+ ! Global indices of cells adjacent to each edge
+ ! used for determining which edges are owned by a block, where
+ ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
+ allocate(cellsOnEdge)
+ allocate(cellsOnEdge % ioinfo)
+ cellsOnEdge % ioinfo % fieldName = 'cellsOnEdge'
+ cellsOnEdge % ioinfo % start(1) = 1
+ cellsOnEdge % ioinfo % start(2) = readEdgeStart
+ cellsOnEdge % ioinfo % count(1) = 2
+ cellsOnEdge % ioinfo % count(2) = nReadEdges
+ allocate(cellsOnEdge % array(2,nReadEdges))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnEdge', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdge % array, ierr)
+ cellsOnEdge % dimSizes(1) = 2
+ cellsOnEdge % dimSizes(2) = nReadEdges
+ cellsOnEdge % block => readingBlock
+ cellsOnEdge % sendList => indexToEdgeID % sendList
+ cellsOnEdge % recvList => indexToEdgeID % recvList
+ cellsOnEdge % copyList => indexToEdgeID % copyList
+ nullify(cellsOnEdge % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_edge_block_fields!}}}
+
+ subroutine mpas_io_setup_vertex_block_fields(inputHandle, nReadVertices, readVertexStart, readingBlock, vertexDegree, indexToVertexID, xVertex, yVertex, zVertex, cellsOnVertex)!{{{
+ type (MPAS_IO_Handle_type) :: inputHandle
+ integer, intent(in) :: nReadVertices
+ integer, intent(in) :: readVertexStart
+ integer, intent(in) :: vertexDegree
+ type (block_type), pointer :: readingBlock
+ type (field1dInteger), pointer :: indexToVertexID
+ type (field1dReal), pointer :: xVertex
+ type (field1dReal), pointer :: yVertex
+ type (field1dReal), pointer :: zVertex
+ type (field2dInteger), pointer :: cellsOnVertex
+
+ integer :: i, nHalos
+ integer, dimension(:), pointer :: readIndices
+
+ nHalos = config_num_halos
+
+ ! Global vertex indices
+ allocate(indexToVertexID)
+ allocate(indexToVertexID % ioinfo)
+ indexToVertexID % ioinfo % fieldName = 'indexToVertexID'
+ indexToVertexID % ioinfo % start(1) = readVertexStart
+ indexToVertexID % ioinfo % count(1) = nReadVertices
+ allocate(indexToVertexID % array(nReadVertices))
+ allocate(readIndices(nReadVertices))
+ do i=1,nReadVertices
+ readIndices(i) = i + readVertexStart - 1
+ end do
+ call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexID % array, ierr)
+ indexToVertexID % dimSizes(1) = nReadVertices
+ indexToVertexID % block => readingBlock
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % sendList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % recvList, nHalos+1)
+ call mpas_dmpar_init_mulithalo_exchange_list(indexToVertexID % copyList, nHalos+1)
+ nullify(indexToVertexID % next)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Vertex x-coordinates (in 3d Cartesian space)
+ allocate(xVertex)
+ allocate(xVertex % ioinfo)
+ xVertex % ioinfo % fieldName = 'xVertex'
+ xVertex % ioinfo % start(1) = readVertexStart
+ xVertex % ioinfo % count(1) = nReadVertices
+ allocate(xVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'xVertex', xVertex % array, ierr)
+ xVertex % dimSizes(1) = nReadVertices
+ xVertex % block => readingBlock
+ xVertex % sendList => indexToVertexID % sendList
+ xVertex % recvList => indexToVertexID % recvList
+ xVertex % copyList => indexToVertexID % copyList
+ nullify(xVertex % next)
+
+ ! Vertex y-coordinates (in 3d Cartesian space)
+ allocate(yVertex)
+ allocate(yVertex % ioinfo)
+ yVertex % ioinfo % fieldName = 'yVertex'
+ yVertex % ioinfo % start(1) = readVertexStart
+ yVertex % ioinfo % count(1) = nReadVertices
+ allocate(yVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'yVertex', yVertex % array, ierr)
+ yVertex % dimSizes(1) = nReadVertices
+ yVertex % block => readingBlock
+ yVertex % sendList => indexToVertexID % sendList
+ yVertex % recvList => indexToVertexID % recvList
+ yVertex % copyList => indexToVertexID % copyList
+ nullify(yVertex % next)
+
+ ! Vertex z-coordinates (in 3d Cartesian space)
+ allocate(zVertex)
+ allocate(zVertex % ioinfo)
+ zVertex % ioinfo % fieldName = 'zVertex'
+ zVertex % ioinfo % start(1) = readVertexStart
+ zVertex % ioinfo % count(1) = nReadVertices
+ allocate(zVertex % array(nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'zVertex', zVertex % array, ierr)
+ zVertex % dimSizes(1) = nReadVertices
+ zVertex % block => readingBlock
+ zVertex % sendList => indexToVertexID % sendList
+ zVertex % recvList => indexToVertexID % recvList
+ zVertex % copyList => indexToVertexID % copyList
+ nullify(zVertex % next)
+#endif
+#endif
+
+
+ ! Global indices of cells adjacent to each vertex
+ ! used for determining which vertices are owned by a block, where
+ ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
+ allocate(cellsOnVertex)
+ allocate(cellsOnVertex % ioinfo)
+ cellsOnVertex % ioinfo % fieldName = 'cellsOnVertex'
+ cellsOnVertex % ioinfo % start(1) = 1
+ cellsOnVertex % ioinfo % start(2) = readVertexStart
+ cellsOnVertex % ioinfo % count(1) = vertexDegree
+ cellsOnVertex % ioinfo % count(2) = nReadVertices
+ allocate(cellsOnVertex % array(vertexDegree,nReadVertices))
+ call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
+ call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
+ call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertex % array, ierr)
+ cellsOnVertex % dimSizes(1) = vertexDegree
+ cellsOnVertex % dimSizes(2) = nReadVertices
+ cellsOnVertex % block => readingBlock
+ cellsOnVertex % sendList => indexToVertexID % sendList
+ cellsOnVertex % recvList => indexToVertexID % recvList
+ cellsOnVertex % copyList => indexToVertexID % copyList
+ nullify(cellsOnVertex % next)
+
+ deallocate(readIndices)
+
+ end subroutine mpas_io_setup_vertex_block_fields!}}}
+
end module mpas_io_input
Modified: branches/dcmip/src/framework/mpas_io_output.F
===================================================================
--- branches/dcmip/src/framework/mpas_io_output.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_io_output.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -22,8 +22,7 @@
contains
-
- subroutine mpas_output_state_init(output_obj, domain, stream, outputSuffix)
+ subroutine mpas_output_state_init(output_obj, domain, stream, outputSuffix)!{{{
implicit none
@@ -66,11 +65,10 @@
block_ptr % mesh &
)
- end subroutine mpas_output_state_init
+ end subroutine mpas_output_state_init!}}}
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)!{{{
- subroutine mpas_insert_string_suffix(stream, suffix, filename)
-
implicit none
character (len=*), intent(in) :: stream
@@ -92,10 +90,9 @@
if (filename(i:i) == ':') filename(i:i) = '.'
end do
- end subroutine mpas_insert_string_suffix
+ end subroutine mpas_insert_string_suffix!}}}
-
- subroutine mpas_output_state_for_domain(output_obj, domain, itime)
+ subroutine mpas_output_state_for_domain(output_obj, domain, itime)!{{{
implicit none
@@ -103,127 +100,223 @@
type (domain_type), intent(inout) :: domain
integer, intent(in) :: itime
+ type(block_type), pointer :: block_ptr
+
+ integer :: nCells, nEdges, nVertices, vertexDegree
+ integer :: maxEdges, maxEdges2, nEdgesSolve, nCellsSolve, nVerticesSolve
integer :: ierr
integer :: i, j
- integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
- cellsOnEdge, verticesOnEdge, edgesOnEdge, cellsOnVertex, edgesOnVertex
- integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
- cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
- cellsOnVertex_save, edgesOnVertex_save
- type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
- type (field0dReal) :: real0d
- type (field1dReal) :: real1d
- type (field2dReal) :: real2d
- type (field3dReal) :: real3d
- type (field0dChar) :: char0d
- type (field1dChar) :: char1d
+ type (field2dInteger), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
+ cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
+ cellsOnVertex_save, edgesOnVertex_save
+ type (field2dInteger), pointer :: cellsOnCell_ptr, edgesOnCell_ptr, verticesOnCell_ptr, &
+ cellsOnEdge_ptr, verticesOnEdge_ptr, edgesOnEdge_ptr, &
+ cellsOnVertex_ptr, edgesOnVertex_ptr
+
output_obj % time = itime
- allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(cellsOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(verticesOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(edgesOnEdge(2 * domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nEdgesSolve))
- allocate(cellsOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
- allocate(edgesOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
-
-
!
! Convert connectivity information from local to global indices
+ ! Needs to be done block by block
!
- do i=1,domain % blocklist % mesh % nCellsSolve
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnCell % array(i)+1,domain % blocklist % mesh % maxEdges
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- end do
- end do
- do i=1,domain % blocklist % mesh % nEdgesSolve
- cellsOnEdge(1,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(1,i))
- cellsOnEdge(2,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(2,i))
- verticesOnEdge(1,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(1,i))
- verticesOnEdge(2,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(2,i))
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnEdge % array(i)+1,2*domain % blocklist % mesh % maxEdges
- if(domain % blocklist % mesh % nEdgesOnEdge % array(i) .eq. 0) then
- edgesOnEdge(j,i) = domain % blocklist % mesh % nEdgesSolve + 1
- else
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnEdge % array(i))
- endif
- end do
- end do
- do i=1,domain % blocklist % mesh % nVerticesSolve
- do j=1,domain % blocklist % mesh % vertexDegree
- cellsOnVertex(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- edgesOnVertex(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- end do
- end do
+ ! Also, backup local indices to be copied back into blocks after output is complete.
+ !
+ allocate(cellsOnCell_save)
+ allocate(edgesOnCell_save)
+ allocate(verticesOnCell_save)
+ allocate(cellsOnEdge_save)
+ allocate(verticesOnEdge_save)
+ allocate(edgesOnEdge_save)
+ allocate(cellsOnVertex_save)
+ allocate(edgesOnVertex_save)
- cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
- edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
- verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
- cellsOnEdge_save => domain % blocklist % mesh % cellsOnEdge % array
- verticesOnEdge_save => domain % blocklist % mesh % verticesOnEdge % array
- edgesOnEdge_save => domain % blocklist % mesh % edgesOnEdge % array
- cellsOnVertex_save => domain % blocklist % mesh % cellsOnVertex % array
- edgesOnVertex_save => domain % blocklist % mesh % edgesOnVertex % array
+ cellsOnCell_ptr => cellsOnCell_save
+ edgesOnCell_ptr => edgesOnCell_save
+ verticesOnCell_ptr => verticesOnCell_save
+ cellsOnEdge_ptr => cellsOnEdge_save
+ verticesOnEdge_ptr => verticesOnEdge_save
+ edgesOnEdge_ptr => edgesOnEdge_save
+ cellsOnVertex_ptr => cellsOnVertex_save
+ edgesOnVertex_ptr => edgesOnVertex_save
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ maxEdges = block_ptr % mesh % maxEdges
+ maxEdges2 = block_ptr % mesh % maxEdges2
+ vertexDegree = block_ptr % mesh % vertexDegree
+ nCells = block_ptr % mesh % nCells
+ nEdges = block_ptr % mesh % nEdges
+ nVertices = block_ptr % mesh % nVertices
+ nCellsSolve = block_ptr % mesh % nCellsSolve
+ nEdgesSolve = block_ptr % mesh % nEdgesSolve
+ nVerticesSolve = block_ptr % mesh % nVerticesSolve
+ nullify(cellsOncell_ptr % ioinfo)
+ cellsOncell_ptr % array => block_ptr % mesh % cellsOncell % array
+ allocate(block_ptr % mesh % cellsOnCell % array(maxEdges, nCells+1))
+
+ nullify(edgesOnCell_ptr % ioinfo)
+ edgesOnCell_ptr % array => block_ptr % mesh % edgesOnCell % array
+ allocate(block_ptr % mesh % edgesOnCell % array(maxEdges, nCells+1))
+
+ nullify(verticesOnCell_ptr % ioinfo)
+ verticesOnCell_ptr % array => block_ptr % mesh % verticesOnCell % array
+ allocate(block_ptr % mesh % verticesOnCell % array(maxEdges, nCells+1))
+
+ nullify(cellsOnEdge_ptr % ioinfo)
+ cellsOnEdge_ptr % array => block_ptr % mesh % cellsOnEdge % array
+ allocate(block_ptr % mesh % cellsOnEdge % array(2, nEdges+1))
+
+ nullify(verticesOnEdge_ptr % ioinfo)
+ verticesOnEdge_ptr % array => block_ptr % mesh % verticesOnEdge % array
+ allocate(block_ptr % mesh % verticesOnEdge % array(2, nEdges+1))
+
+ nullify(edgesOnEdge_ptr % ioinfo)
+ edgesOnEdge_ptr % array => block_ptr % mesh % edgesOnEdge % array
+ allocate(block_ptr % mesh % edgesOnEdge % array(maxEdges2, nEdges+1))
+
+ nullify(cellsOnVertex_ptr % ioinfo)
+ cellsOnVertex_ptr % array => block_ptr % mesh % cellsOnVertex % array
+ allocate(block_ptr % mesh % cellsOnVertex % array(vertexDegree, nVertices+1))
+
+ nullify(edgesOnVertex_ptr % ioinfo)
+ edgesOnVertex_ptr % array => block_ptr % mesh % edgesOnVertex % array
+ allocate(block_ptr % mesh % edgesOnVertex % array(vertexDegree, nVertices+1))
+
+ do i = 1, nCellsSolve
+ do j = 1, block_ptr % mesh % nEdgesOnCell % array(i)
+ block_ptr % mesh % cellsOnCell % array(j, i) = block_ptr % mesh % indexToCellID % array(cellsOnCell_ptr % array(j, i))
+ block_ptr % mesh % edgesOnCell % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnCell_ptr % array(j, i))
+ block_ptr % mesh % verticesOnCell % array(j, i) = block_ptr % mesh % indexToVertexID % array(verticesOnCell_ptr % array(j, i))
+ end do
+
+ block_ptr % mesh % cellsOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nCells+1
+ block_ptr % mesh % edgesOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nEdges+1
+ block_ptr % mesh % verticesOnCell % array(block_ptr % mesh % nEdgesOnCell % array(i) + 1:maxEdges, i) = nVertices+1
+ end do
+
+ do i = 1, nEdgesSolve
+ block_ptr % mesh % cellsOnEdge % array(1, i) = block_ptr % mesh % indexToCellID % array(cellsOnEdge_ptr % array(1, i))
+ block_ptr % mesh % cellsOnEdge % array(2, i) = block_ptr % mesh % indexToCellID % array(cellsOnEdge_ptr % array(2, i))
+
+ block_ptr % mesh % verticesOnedge % array(1, i) = block_ptr % mesh % indexToVertexID % array(verticesOnEdge_ptr % array(1,i))
+ block_ptr % mesh % verticesOnedge % array(2, i) = block_ptr % mesh % indexToVertexID % array(verticesOnEdge_ptr % array(2,i))
+
+ do j = 1, block_ptr % mesh % nEdgesOnEdge % array(i)
+ block_ptr % mesh % edgesOnEdge % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnEdge_ptr % array(j, i))
+ end do
+
+ block_ptr % mesh % edgesOnEdge % array(block_ptr % mesh % nEdgesOnEdge % array(i)+1:maxEdges2, i) = nEdges+1
+ end do
+
+ do i = 1, nVerticesSolve
+ do j = 1, vertexDegree
+ block_ptr % mesh % cellsOnVertex % array(j, i) = block_ptr % mesh % indexToCellID % array(cellsOnVertex_ptr % array(j, i))
+ block_ptr % mesh % edgesOnVertex % array(j, i) = block_ptr % mesh % indexToEdgeID % array(edgesOnVertex_ptr % array(j, i))
+ end do
+ end do
+
+ block_ptr => block_ptr % next
+ if(associated(block_ptr)) then
+ allocate(cellsOnCell_ptr % next)
+ allocate(edgesOnCell_ptr % next)
+ allocate(verticesOnCell_ptr % next)
+ allocate(cellsOnEdge_ptr % next)
+ allocate(verticesOnEdge_ptr % next)
+ allocate(edgesOnEdge_ptr % next)
+ allocate(cellsOnVertex_ptr % next)
+ allocate(edgesOnVertex_ptr % next)
+
+ cellsOnCell_ptr => cellsOnCell_ptr % next
+ edgesOnCell_ptr => edgesOnCell_ptr % next
+ verticesOnCell_ptr => verticesOnCell_ptr % next
+ cellsOnEdge_ptr => cellsOnEdge_ptr % next
+ verticesOnEdge_ptr => verticesOnEdge_ptr % next
+ edgesOnEdge_ptr => edgesOnEdge_ptr % next
+ cellsOnVertex_ptr => cellsOnVertex_ptr % next
+ edgesOnVertex_ptr => edgesOnVertex_ptr % next
+ end if
+
+ nullify(cellsOnCell_ptr % next)
+ nullify(edgesOnCell_ptr % next)
+ nullify(verticesOnCell_ptr % next)
+ nullify(cellsOnEdge_ptr % next)
+ nullify(verticesOnEdge_ptr % next)
+ nullify(edgesOnEdge_ptr % next)
+ nullify(cellsOnVertex_ptr % next)
+ nullify(edgesOnVertex_ptr % next)
+ end do
+
+ ! Write output file
call MPAS_writeStream(output_obj % io_stream, output_obj % time, ierr)
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell_save
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge_save
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge_save
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge_save
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex_save
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex_save
+ ! Converge indices back to local indices, and deallocate all temporary arrays.
+ cellsOnCell_ptr => cellsOnCell_save
+ edgesOnCell_ptr => edgesOnCell_save
+ verticesOnCell_ptr => verticesOnCell_save
+ cellsOnEdge_ptr => cellsOnEdge_save
+ verticesOnEdge_ptr => verticesOnEdge_save
+ edgesOnEdge_ptr => edgesOnEdge_save
+ cellsOnVertex_ptr => cellsOnVertex_save
+ edgesOnVertex_ptr => edgesOnVertex_save
- deallocate(cellsOnCell)
- deallocate(edgesOnCell)
- deallocate(verticesOnCell)
- deallocate(cellsOnEdge)
- deallocate(verticesOnEdge)
- deallocate(edgesOnEdge)
- deallocate(cellsOnVertex)
- deallocate(edgesOnVertex)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
- end subroutine mpas_output_state_for_domain
+ deallocate(block_ptr % mesh % cellsOnCell % array)
+ deallocate(block_ptr % mesh % edgesOnCell % array)
+ deallocate(block_ptr % mesh % verticesOnCell % array)
+ deallocate(block_ptr % mesh % cellsOnEdge % array)
+ deallocate(block_ptr % mesh % verticesOnEdge % array)
+ deallocate(block_ptr % mesh % edgesOnEdge % array)
+ deallocate(block_ptr % mesh % cellsOnVertex % array)
+ deallocate(block_ptr % mesh % edgesOnVertex % array)
+ block_ptr % mesh % cellsOncell % array => cellsOnCell_ptr % array
+ block_ptr % mesh % edgesOnCell % array => edgesOnCell_ptr % array
+ block_ptr % mesh % verticesOnCell % array => verticesOnCell_ptr % array
+ block_ptr % mesh % cellsOnEdge % array => cellsOnEdge_ptr % array
+ block_ptr % mesh % verticesOnEdge % array => verticesOnEdge_ptr % array
+ block_ptr % mesh % edgesOnEdge % array => edgesOnEdge_ptr % array
+ block_ptr % mesh % cellsOnVertex % array => cellsOnVertex_ptr % array
+ block_ptr % mesh % edgesOnVertex % array => edgesOnVertex_ptr % array
- subroutine mpas_output_state_finalize(output_obj, dminfo)
+ nullify(cellsOnCell_ptr % array)
+ nullify(edgesOnCell_ptr % array)
+ nullify(verticesOnCell_ptr % array)
+ nullify(cellsOnEdge_ptr % array)
+ nullify(verticesOnEdge_ptr % array)
+ nullify(edgesOnEdge_ptr % array)
+ nullify(cellsOnVertex_ptr % array)
+ nullify(edgesOnVertex_ptr % array)
+ block_ptr => block_ptr % next
+ cellsOnCell_ptr => cellsOnCell_ptr % next
+ edgesOnCell_ptr => edgesOnCell_ptr % next
+ verticesOnCell_ptr => verticesOnCell_ptr % next
+ cellsOnEdge_ptr => cellsOnEdge_ptr % next
+ verticesOnEdge_ptr => verticesOnEdge_ptr % next
+ edgesOnEdge_ptr => edgesOnEdge_ptr % next
+ cellsOnVertex_ptr => cellsOnVertex_ptr % next
+ edgesOnVertex_ptr => edgesOnVertex_ptr % next
+ end do
+
+ call mpas_deallocate_field(cellsOnCell_save)
+ call mpas_deallocate_field(edgesOnCell_save)
+ call mpas_deallocate_field(verticesOnCell_save)
+ call mpas_deallocate_field(cellsOnEdge_save)
+ call mpas_deallocate_field(verticesOnEdge_save)
+ call mpas_deallocate_field(edgesOnEdge_save)
+ call mpas_deallocate_field(cellsOnVertex_save)
+ call mpas_deallocate_field(edgesOnVertex_save)
+
+
+
+ end subroutine mpas_output_state_for_domain!}}}
+
+ subroutine mpas_output_state_finalize(output_obj, dminfo)!{{{
+
implicit none
type (io_output_object), intent(inout) :: output_obj
@@ -231,10 +324,9 @@
call mpas_io_output_finalize(output_obj, dminfo)
- end subroutine mpas_output_state_finalize
+ end subroutine mpas_output_state_finalize!}}}
-
- subroutine mpas_io_output_init( domain, output_obj, &
+ subroutine mpas_io_output_init( domain, output_obj, &!{{{
dminfo, &
mesh &
)
@@ -262,10 +354,9 @@
#include "add_output_atts.inc"
- end subroutine mpas_io_output_init
+ end subroutine mpas_io_output_init!}}}
-
- subroutine mpas_io_output_finalize(output_obj, dminfo)
+ subroutine mpas_io_output_finalize(output_obj, dminfo)!{{{
implicit none
@@ -276,6 +367,6 @@
call MPAS_closeStream(output_obj % io_stream, nferr)
- end subroutine mpas_io_output_finalize
+ end subroutine mpas_io_output_finalize!}}}
end module mpas_io_output
Modified: branches/dcmip/src/framework/mpas_io_streams.F
===================================================================
--- branches/dcmip/src/framework/mpas_io_streams.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_io_streams.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -1406,16 +1406,17 @@
! Distribute field to multiple blocks
field_1dint_ptr => field_cursor % int1dField
i = 1
- if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
- ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
- ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
- ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_1dint_ptr % dimSizes(1)
- end if
do while (associated(field_1dint_ptr))
+ if (trim(field_1dint_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dint_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dint_ptr % dimSizes(1)
+ end if
+
if (field_cursor % int1dField % isSuperArray) then
field_1dint_ptr % array(j) = int0d_temp
else
@@ -1483,16 +1484,17 @@
! Distribute field to multiple blocks
field_2dint_ptr => field_cursor % int2dField
i = 1
- if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
- ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
- ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
- ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_2dint_ptr % dimSizes(2)
- end if
do while (associated(field_2dint_ptr))
+ if (trim(field_2dint_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dint_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dint_ptr % dimSizes(2)
+ end if
+
if (field_cursor % int2dField % isSuperArray) then
field_2dint_ptr % array(j,1:ownedSize) = int1d_temp(i:i+ownedSize-1)
else
@@ -1564,16 +1566,17 @@
! Distribute field to multiple blocks
field_3dint_ptr => field_cursor % int3dField
i = 1
- if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
- ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
- else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
- ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
- else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
- ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_3dint_ptr % dimSizes(3)
- end if
do while (associated(field_3dint_ptr))
+ if (trim(field_3dint_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dint_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dint_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dint_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dint_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dint_ptr % dimSizes(3)
+ end if
+
if (field_cursor % int3dField % isSuperArray) then
field_3dint_ptr % array(j,:,1:ownedSize) = int2d_temp(:,i:i+ownedSize-1)
else
@@ -1663,16 +1666,18 @@
! Distribute field to multiple blocks
field_1dreal_ptr => field_cursor % real1dField
i = 1
- if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
- ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
- ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
- ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_1dreal_ptr % dimSizes(1)
- end if
+
do while (associated(field_1dreal_ptr))
+ if (trim(field_1dreal_ptr % dimNames(1)) == 'nCells') then
+ ownedSize = field_1dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nEdges') then
+ ownedSize = field_1dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_1dreal_ptr % dimNames(1)) == 'nVertices') then
+ ownedSize = field_1dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_1dreal_ptr % dimSizes(1)
+ end if
+
if (field_cursor % real1dField % isSuperArray) then
field_1dreal_ptr % array(j) = real0d_temp
else
@@ -1740,16 +1745,17 @@
! Distribute field to multiple blocks
field_2dreal_ptr => field_cursor % real2dField
i = 1
- if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
- ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
- ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
- ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_2dreal_ptr % dimSizes(2)
- end if
do while (associated(field_2dreal_ptr))
+ if (trim(field_2dreal_ptr % dimNames(2)) == 'nCells') then
+ ownedSize = field_2dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nEdges') then
+ ownedSize = field_2dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_2dreal_ptr % dimNames(2)) == 'nVertices') then
+ ownedSize = field_2dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_2dreal_ptr % dimSizes(2)
+ end if
+
if (field_cursor % real2dField % isSuperArray) then
field_2dreal_ptr % array(j,1:ownedSize) = real1d_temp(i:i+ownedSize-1)
else
@@ -1824,16 +1830,17 @@
! Distribute field to multiple blocks
field_3dreal_ptr => field_cursor % real3dField
i = 1
- if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
- ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
- else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
- ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
- else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
- ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
- else
- ownedSize = field_3dreal_ptr % dimSizes(3)
- end if
do while (associated(field_3dreal_ptr))
+ if (trim(field_3dreal_ptr % dimNames(3)) == 'nCells') then
+ ownedSize = field_3dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nEdges') then
+ ownedSize = field_3dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_3dreal_ptr % dimNames(3)) == 'nVertices') then
+ ownedSize = field_3dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_3dreal_ptr % dimSizes(3)
+ end if
+
if (field_cursor % real3dField % isSuperArray) then
!write(0,*) 'DEBUGGING : copying the temporary array'
field_3dreal_ptr % array(j,:,1:ownedSize) = real2d_temp(:,i:i+ownedSize-1)
Modified: branches/dcmip/src/framework/mpas_sort.F
===================================================================
--- branches/dcmip/src/framework/mpas_sort.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_sort.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -2,16 +2,16 @@
use mpas_kind_types
- interface quicksort
- module procedure mpas_quicksort_int
- module procedure mpas_quicksort_real
+ interface mpas_quicksort
+ module procedure mpas_quicksort_1dint
+ module procedure mpas_quicksort_1dreal
+ module procedure mpas_quicksort_2dint
+ module procedure mpas_quicksort_2dreal
end interface
-
contains
-
- recursive subroutine mpas_mergesort(array, d1, n1, n2)
+ recursive subroutine mpas_mergesort(array, d1, n1, n2)!{{{
implicit none
@@ -71,14 +71,137 @@
array(1:d1,n1:n2) = temp(1:d1,1:k-1)
- end subroutine mpas_mergesort
+ end subroutine mpas_mergesort!}}}
+ subroutine mpas_quicksort_1dint(nArray, array)!{{{
- subroutine mpas_quicksort_int(nArray, array)
+ implicit none
+ integer, intent(in) :: nArray
+ integer, dimension(nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ integer :: pivot_value
+ integer, dimension(1) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(pivot)
+ temp(1) = array(pivot)
+ array(pivot) = array(r)
+ array(r) = temp(1)
+
+ s = l
+ do i=l,r-1
+ if (array(i) <= pivot_value) then
+ temp(1) = array(s)
+ array(s) = array(i)
+ array(i) = temp(1)
+ s = s + 1
+ end if
+ end do
+
+ temp(1) = array(s)
+ array(s) = array(r)
+ array(r) = temp(1)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_1dint!}}}
+
+ subroutine mpas_quicksort_1dreal(nArray, array)!{{{
+
implicit none
integer, intent(in) :: nArray
+ real (kind=RKIND), dimension(nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ real (kind=RKIND) :: pivot_value
+ real (kind=RKIND), dimension(1) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(pivot)
+ temp(1) = array(pivot)
+ array(pivot) = array(r)
+ array(r) = temp(1)
+
+ s = l
+ do i=l,r-1
+ if (array(i) <= pivot_value) then
+ temp(1) = array(s)
+ array(s) = array(i)
+ array(i) = temp(1)
+ s = s + 1
+ end if
+ end do
+
+ temp(1) = array(s)
+ array(s) = array(r)
+ array(r) = temp(1)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_1dreal!}}}
+
+ subroutine mpas_quicksort_2dint(nArray, array)!{{{
+
+ implicit none
+
+ integer, intent(in) :: nArray
integer, dimension(2,nArray), intent(inout) :: array
integer :: i, j, top, l, r, pivot, s
@@ -134,11 +257,10 @@
end if
end do
- end subroutine mpas_quicksort_int
+ end subroutine mpas_quicksort_2dint!}}}
+ subroutine mpas_quicksort_2dreal(nArray, array)!{{{
- subroutine mpas_quicksort_real(nArray, array)
-
implicit none
integer, intent(in) :: nArray
@@ -197,11 +319,10 @@
end if
end do
- end subroutine mpas_quicksort_real
+ end subroutine mpas_quicksort_2dreal!}}}
+ integer function mpas_binary_search(array, d1, n1, n2, key)!{{{
- integer function mpas_binary_search(array, d1, n1, n2, key)
-
implicit none
integer, intent(in) :: d1, n1, n2, key
@@ -227,6 +348,6 @@
end if
end do
- end function mpas_binary_search
+ end function mpas_binary_search!}}}
end module mpas_sort
Modified: branches/dcmip/src/framework/mpas_timer.F
===================================================================
--- branches/dcmip/src/framework/mpas_timer.F        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/framework/mpas_timer.F        2012-08-29 00:09:20 UTC (rev 2133)
@@ -1,20 +1,15 @@
module mpas_timer
+ use mpas_kind_types
use mpas_grid_types
- use mpas_dmpar
implicit none
save
-! private
#ifdef _PAPI
include 'f90papi.h'
#endif
-!#ifdef _MPI
-! include 'mpif.h'
-!#endif
-
type timer_node
character (len=StrKIND) :: timer_name
logical :: running, printable
@@ -38,6 +33,10 @@
contains
subroutine mpas_timer_start(timer_name, clear_timer, timer_ptr)!{{{
+# ifdef _MPI
+ use mpi
+# endif
+
character (len=*), intent (in) :: timer_name !< Input: name of timer, stored as name of timer
logical, optional, intent(in) :: clear_timer !< Input: flag to clear timer
type (timer_node), optional, pointer :: timer_ptr !< Output: pointer to store timer in module
@@ -159,6 +158,10 @@
end subroutine mpas_timer_start!}}}
subroutine mpas_timer_stop(timer_name, timer_ptr)!{{{
+# ifdef _MPI
+ use mpi
+# endif
+
character (len=*), intent(in) :: timer_name !< Input: name of timer to stop
type (timer_node), pointer, optional :: timer_ptr !< Input: pointer to timer, for stopping
@@ -250,9 +253,7 @@
tname = ''
do i=0,timer_ptr%levels+2
tname = tname//' '
-! write(*,'(a,$)') ' '
end do
-! tname = tname//timer_ptr%timer_name
if(timer_ptr%total_time == 0.0d0) then
timer_ptr%min_time = 0.0d0
@@ -315,6 +316,8 @@
end subroutine mpas_timer_init!}}}
subroutine mpas_timer_sync()!{{{
+ use mpas_dmpar
+
type (timer_node), pointer :: current
real (kind=RKIND) :: all_total_time, all_max_time, all_min_time, all_ave_time
Modified: branches/dcmip/src/registry/gen_inc.c
===================================================================
--- branches/dcmip/src/registry/gen_inc.c        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/registry/gen_inc.c        2012-08-29 00:09:20 UTC (rev 2133)
@@ -180,7 +180,7 @@
struct dimension * dim_ptr;
struct dimension_list * dimlist_ptr;
struct group_list * group_ptr;
- FILE * fd;
+ FILE * fd, *fd2;
char super_array[1024];
char array_class[1024];
char outer_dim[1024];
@@ -202,14 +202,18 @@
}
dim_ptr = dims;
while (dim_ptr) {
- if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " integer :: %sSolve</font>
<font color="red">", dim_ptr->name_in_code);
- if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " integer :: %sSolve</font>
<font color="blue">", dim_ptr->name_in_file);
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+                 fortprintf(fd, " integer :: %sSolve</font>
<font color="blue">", dim_ptr->name_in_code);
+                 fortprintf(fd, " integer, dimension(:), pointer :: %sArray</font>
<font color="blue">", dim_ptr->name_in_code);
+         }
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+                 fortprintf(fd, " integer :: %sSolve</font>
<font color="gray">", dim_ptr->name_in_file);
+         }
dim_ptr = dim_ptr->next;
}
fclose(fd);
-
/*
* Generate dummy dimension argument list
*/
@@ -232,7 +236,6 @@
fclose(fd);
-
/*
* Generate dummy dimension argument declaration list
*/
@@ -255,8 +258,76 @@
fclose(fd);
+ /*
+ * Generate dummy dimension argument declaration list
+ */
+ fd = fopen("dim_dummy_decls_inout.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer, intent(inout) :: %s", dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer, intent(inout) :: %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+ fclose(fd);
+
/*
+ * Generate non-input dummy dimension argument declaration list
+ */
+ fd = fopen("dim_dummy_decls_noinput.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer :: %s", dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " integer :: %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, ", %s", dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="blue">");
+
+ fclose(fd);
+
+
+
+ /*
+ * Generate dummy dimension assignment instructions
+ */
+ fd = fopen("dim_dummy_assigns.inc", "w");
+ dim_ptr = dims;
+ if (dim_ptr && dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_code, dim_ptr->name_in_code);
+ dim_ptr = dim_ptr->next;
+ }
+ else if (dim_ptr && dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) {
+ fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ while (dim_ptr) {
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_code, dim_ptr->name_in_code);
+ if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s = block %% mesh %% %s</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ dim_ptr = dim_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="gray">");
+
+ fclose(fd);
+
+
+ /*
* Generate declarations of dimensions
*/
fd = fopen("dim_decls.inc", "w");
@@ -479,16 +550,71 @@
group_ptr = groups;
while (group_ptr) {
- if (group_ptr->vlist->var->ntime_levs > 1)
+ if (group_ptr->vlist->var->ntime_levs > 1) {
fortprintf(fd, " type (%s_multilevel_type), pointer :: %s</font>
<font color="red">", group_ptr->name, group_ptr->name);
- else
+ fortprintf(fd, " type (%s_type), pointer :: provis</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         } else {
fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         }
group_ptr = group_ptr->next;
}
fclose(fd);
+ /*
+ * Generate routines for allocating provisional types
+ */
+ fd = fopen("provis_alloc_routines.inc", "w");
+
+ group_ptr = groups;
+ while (group_ptr) {
+ if (group_ptr->vlist->var->ntime_levs > 1) {
+                 fortprintf(fd, " subroutine mpas_setup_provis_%ss(b)!{{{</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+                 fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, "#include \"dim_dummy_decls_noinput.inc\"</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+                 fortprintf(fd, "#include \"dim_dummy_assigns.inc\"</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " allocate(block %% provis)</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_allocate_%s(block, block %% provis, &</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="blue">");
+ fortprintf(fd, " )</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next </font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+ fortprintf(fd, " if(associated(block %% prev) .and. associated(block %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, prev = block %% prev %% provis, next = block %% next %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else if(associated(block %% prev)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, prev = block %% prev %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else if(associated(block %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis, next = block %% next %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(block %% provis)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end if</font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next </font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="blue">");
+                 fortprintf(fd, " end subroutine mpas_setup_provis_%ss!}}}</font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+
+                 fortprintf(fd, " subroutine mpas_deallocate_provis_%ss(b)!{{{</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+                 fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="black"></font>
<font color="blue">");
+                 fortprintf(fd, " block => b</font>
<font color="blue">");
+                 fortprintf(fd, " do while(associated(block))</font>
<font color="blue">");
+                 fortprintf(fd, " call mpas_deallocate_%s(block %% provis)</font>
<font color="blue">", group_ptr->name);
+                 fortprintf(fd, " deallocate(block %% provis)</font>
<font color="blue">");
+                 fortprintf(fd, " block => block %% next</font>
<font color="blue">");
+                 fortprintf(fd, " end do</font>
<font color="blue">");
+                 fortprintf(fd, " end subroutine mpas_deallocate_provis_%ss!}}}</font>
<font color="gray">", group_ptr->name);
+         }
+ group_ptr = group_ptr->next;
+ }
+ fclose(fd);
+
+
+
/* To be included in allocate_block */
fd = fopen("block_allocs.inc", "w");
group_ptr = groups;
@@ -967,7 +1093,18 @@
/* subroutine to call link subroutine for every field type */
fortprintf(fd, " subroutine mpas_create_field_links(b)</font>
<font color="black"></font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="red">");
- fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: prev, next</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " if(associated(b %% prev)) then</font>
<font color="blue">");
+ fortprintf(fd, " prev => b %% prev</font>
<font color="blue">");
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " nullify(prev)</font>
<font color="blue">");
+ fortprintf(fd, " end if</font>
<font color="blue">");
+ fortprintf(fd, " if(associated(b %% next)) then</font>
<font color="blue">");
+ fortprintf(fd, " next => b %% next</font>
<font color="blue">");
+ fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " nullify(next)</font>
<font color="blue">");
+ fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
group_ptr = groups;
while (group_ptr)
{
@@ -995,12 +1132,28 @@
{
for(i=1; i<=ntime_levs; i++)
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else</font>
<font color="blue">");
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
}        
}
else
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " else</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                        fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
}
}
else if (var_ptr->ndims > 0)
@@ -1012,12 +1165,28 @@
{
for(i=1; i<=ntime_levs; i++)
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, next = next %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s, prev = prev %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " else</font>
<font color="blue">");
+                                fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+                                fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
}        
}
else
{
- fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " if(associated(next) .and. associated(prev)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else if(associated(next)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, next = next %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else if(associated(prev)) then</font>
<font color="blue">");        
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s, prev = prev %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " else</font>
<font color="blue">");
+                         fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+                         fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="gray">");
}
}
@@ -1029,9 +1198,10 @@
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine mpas_create_%s_links(%s)</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_create_%s_links(%s, prev, next)</font>
<font color="black"></font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, " implicit none</font>
<font color="red">");
- fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+         fortprintf(fd, " type (%s_type), pointer, optional :: prev, next</font>
<font color="gray">", group_ptr->name);
var_list_ptr = group_ptr->vlist;
while (var_list_ptr) {
@@ -1050,17 +1220,62 @@
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
- }
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+ } else {
+                                 fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+
+                         }
fortprintf(fd, "</font>
<font color="gray">");
}
else
@@ -1073,17 +1288,61 @@
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="black">");
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="black">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
- }
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+ } else {
+ fortprintf(fd, " nullify(%s %% %s %% sendList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% recvList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " nullify(%s %% %s %% copyList)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " if(present(prev)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% prev => prev %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% prev)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="blue">");
+                                 fortprintf(fd, " if(present(next)) then</font>
<font color="blue">");
+                                 fortprintf(fd, " %s %% %s %% next => next %% %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, var_ptr->name_in_code);
+                                 fortprintf(fd, " else</font>
<font color="blue">");
+                                 fortprintf(fd, " nullify(%s %% %s %% next)</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code);
+                                 fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="blue">");
+                         }
fortprintf(fd, "</font>
<font color="gray">");
         }
var_list_ptr = var_list_ptr->next;
@@ -1107,7 +1366,7 @@
struct dimension_list * dimlist_ptr, * lastdim;
struct group_list * group_ptr;
struct dtable * dictionary;
- FILE * fd;
+ FILE * fd, *fd2;
char vtype[5];
char fname[32];
char super_array[1024];
@@ -1857,6 +2116,7 @@
* MGD NEW CODE
*/
fd = fopen("exchange_input_field_halos.inc", "w");
+ fd2 = fopen("non_decomp_copy_input_fields.inc", "w");
group_ptr = groups;
while (group_ptr) {
@@ -1868,14 +2128,16 @@
i = 1;
while (dimlist_ptr) {
if (i == var_ptr->ndims) {
+
+ if (var_ptr->ntime_levs > 1) {
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
+                                 } else {
+ snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
+                                 }
+
if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024)) {
-
- if (var_ptr->ntime_levs > 1)
- snprintf(struct_deref, 1024, "domain %% blocklist %% %s %% time_levs(1) %% %s", group_ptr->name, group_ptr->name);
- else
- snprintf(struct_deref, 1024, "domain %% blocklist %% %s", group_ptr->name);
if (strncmp(var_ptr->super_array, "-", 1024) != 0) {
fortprintf(fd, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="gray">", struct_deref, var_ptr->super_array);
@@ -1898,7 +2160,13 @@
fortprintf(fd, " end if</font>
<font color="black"></font>
<font color="red">");
- }
+ } else {
+ fortprintf(fd2, " if ((%s %% %s %% ioinfo %% input .and. input_obj %% stream == STREAM_INPUT) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " (%s %% %s %% ioinfo %% restart .and. input_obj %% stream == STREAM_RESTART) .or. &</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " (%s %% %s %% ioinfo %% sfc .and. input_obj %% stream == STREAM_SFC)) then</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+                                 fortprintf(fd2, " call mpas_dmpar_copy_field(%s %% %s)</font>
<font color="blue">", struct_deref, var_ptr->name_in_code);
+ fortprintf(fd2, " end if</font>
<font color="black"></font>
<font color="gray">");
+                         }
}
i++;
@@ -1911,6 +2179,7 @@
}
fclose(fd);
+ fclose(fd2);
#ifdef LEGACY_CODE
Modified: branches/dcmip/src/registry/registry_types.h
===================================================================
--- branches/dcmip/src/registry/registry_types.h        2012-08-28 21:17:36 UTC (rev 2132)
+++ branches/dcmip/src/registry/registry_types.h        2012-08-29 00:09:20 UTC (rev 2133)
@@ -71,6 +71,7 @@
int timedim;
int ntime_levs;
int iostreams;
+ int decomposed;
struct dimension_list * dimlist;
struct variable * next;
};
</font>
</pre>