<p><b>croesch@ucar.edu</b> 2012-03-12 13:55:20 -0600 (Mon, 12 Mar 2012)</p><p>BRANCH UPDATE<br>
<br>
Add multiple-layered halo infrastructure to branch, update exchange calls throughout code<br>
<br>
M src/core_hyd_atmos/mpas_atmh_time_integration.F<br>
M src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F<br>
M src/core_sw/mpas_sw_time_integration.F<br>
M src/registry/gen_inc.c<br>
M src/core_atmos_physics/mpas_atmphys_todynamics.F<br>
M src/core_nhyd_atmos/mpas_atm_time_integration.F<br>
M src/core_nhyd_atmos/mpas_atm_mpas_core.F<br>
M src/core_ocean/mpas_ocn_time_integration_rk4.F<br>
M src/core_ocean/mpas_ocn_time_integration_split.F<br>
M src/framework/mpas_io_input.F<br>
A src/framework/mpas_dmpar_types.F<br>
M src/framework/mpas_dmpar.F<br>
M src/framework/Makefile<br>
M src/framework/mpas_grid_types.F<br>
</p><hr noshade><pre><font color="gray">Modified: branches/omp_blocks/halo/src/core_atmos_physics/mpas_atmphys_todynamics.F
===================================================================
--- branches/omp_blocks/halo/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_atmos_physics/mpas_atmphys_todynamics.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -12,21 +12,17 @@
contains
!=============================================================================================
- subroutine physics_addtend(dminfo,cellsToSend,cellsToRecv,mesh,state,diag,tend, &
- tend_physics,mass,mass_edge)
+subroutine physics_addtend(mesh, state, diag, tend, tend_physics, mass, mass_edge)
!=============================================================================================
!input variables:
!----------------
- type(dm_info), intent(in):: dminfo
- type(mesh_type),intent(in):: mesh
- type(exchange_list), dimension(*), intent(inout):: cellsToSend,cellsToRecv
-
+type(mesh_type),intent(in):: mesh
type(state_type),intent(in):: state
type(diag_type),intent(in):: diag
- type(tend_physics_type),intent(in):: tend_physics
- real(kind=RKIND),dimension(:,:):: mass
- real(kind=RKIND),dimension(:,:):: mass_edge
+ type(tend_physics_type),intent(inout):: tend_physics
+ real(kind=RKIND),dimension(:,:),intent(in):: mass
+ real(kind=RKIND),dimension(:,:),intent(in):: mass_edge
!inout variables:
!----------------
@@ -34,6 +30,9 @@
!local variables:
!----------------
+
+ type(block_type),pointer :: block
+
integer:: i,iCell,k,n,nCells,nCellsSolve,nEdges,nEdgesSolve,nVertLevels
real(kind=RKIND),dimension(:,:),pointer:: theta_m,qv
@@ -56,6 +55,8 @@
!=============================================================================================
!write(0,*)
!write(0,*) '--- enter subroutine physics_add_tend:'
+
+ block => mesh % block
nCells = mesh % nCells
nEdges = mesh % nEdges
@@ -99,7 +100,7 @@
if(config_pbl_scheme .ne. 'off') then
allocate(rublten_Edge(nVertLevels,nEdges))
rublten_Edge(:,:) = 0.
- call tend_toEdges(dminfo,CellsToSend,CellsToRecv,mesh,rublten,rvblten,rublten_Edge)
+ call tend_toEdges(mesh,rublten,rvblten,rublten_Edge)
do i = 1, nEdgesSolve
do k = 1, nVertLevels
tend_u(k,i)=tend_u(k,i)+rublten_Edge(k,i)*mass_edge(k,i)
@@ -185,15 +186,12 @@
end subroutine physics_addtend
!=============================================================================================
- subroutine tend_toEdges(dminfo,cellsToSend,cellsToRecv,mesh,Ux_tend,Uy_tend,U_tend)
+ subroutine tend_toEdges(mesh,Ux_tend,Uy_tend,U_tend)
!=============================================================================================
!input arguments:
!----------------
- type(dm_info),intent(in):: dminfo
type(mesh_type),intent(in):: mesh
- type(exchange_list),dimension(*),intent(inout):: cellsToSend,cellsToRecv
-
real(kind=RKIND),intent(in),dimension(:,:):: Ux_tend,Uy_tend
!output arguments:
@@ -201,16 +199,21 @@
real(kind=RKIND),intent(out),dimension(:,:):: U_tend
!local variables:
+!-----------------
+ type(block_type),pointer :: block
+ type (field2DReal):: tempField
integer:: iCell,iEdge,k,j,nCells,nCellsSolve,nVertLevels
integer,dimension(:),pointer :: nEdgesOnCell
integer,dimension(:,:),pointer:: edgesOnCell
real(kind=RKIND),dimension(:,:),pointer:: east,north,edge_normal
- real(kind=RKIND),dimension(:,:),allocatable:: Ux_tend_halo,Uy_tend_halo
+ real(kind=RKIND),dimension(:,:),allocatable,target:: Ux_tend_halo,Uy_tend_halo
!---------------------------------------------------------------------------------------------
- nCells = mesh % nCells
+ block => mesh % block
+
+ nCells = mesh % nCells
nCellsSolve = mesh % nCellsSolve
nVertLevels = mesh % nVertLevels
@@ -232,11 +235,18 @@
enddo
enddo
- call mpas_dmpar_exch_halo_field2d_real( &
- dminfo,Ux_tend_halo,nVertLevels,nCells,cellsToSend,cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real( &
- dminfo,Uy_tend_halo,nVertLevels,nCells,cellsToSend,cellsToRecv)
+ tempField % block => block
+ tempField % dims(1) = nVertLevels
+ tempField % dims(2) = nCellsSolve
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+ tempField % array => Ux_tend_halo
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ tempField % array => Uy_tend_halo
+ call mpas_dmpar_exch_halo_field(tempField)
+
U_tend(:,:) = 0.0
do iCell = 1, nCells
do j = 1, nEdgesOnCell(iCell)
Modified: branches/omp_blocks/halo/src/core_hyd_atmos/mpas_atmh_time_integration.F
===================================================================
--- branches/omp_blocks/halo/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_hyd_atmos/mpas_atmh_time_integration.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -116,34 +116,16 @@
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % qtot % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % cqu % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % mesh % qtot)
+ call mpas_dmpar_exch_halo_field(block % mesh % cqu)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % h)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % pressure)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % geopotential)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % alpha)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % pv_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % divergence)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % vorticity)
end if
block => block % next
end do
@@ -163,12 +145,8 @@
!
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % tend % u)
+ call mpas_dmpar_exch_halo_field(block % tend % theta)
block => block % next
end do
@@ -210,48 +188,20 @@
!
block => domain % blocklist
do while (associated(block))
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nEdges, &
-!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % uhAvg % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nEdges, &
-!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % wwAvg % array(:,:), &
-!! block % mesh % nVertLevels+1, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
-!! block % mesh % nVertLevels, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % mesh % dpsdt % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % state % time_levs(2) % state % surface_pressure % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % ww % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % pressure_old % array(:,:), &
-!! block % mesh % nVertLevels+1, block % mesh % nCells, &
-!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+!! call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % u)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % h_edge)
+!! call mpas_dmpar_exch_halo_field(block % mesh % uhAvg)
+!! call mpas_dmpar_exch_halo_field(block % mesh % wwAvg)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % theta)
+!! call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % h)
+!! call mpas_dmpar_exch_halo_field(block % tend % h)
+ call mpas_dmpar_exch_halo_field(block % mesh % dpsdt)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % surface_pressure)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % alpha)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % ww)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % pressure)
+!! call mpas_dmpar_exch_halo_field(block % mesh % pressure_old)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % geopotential)
block => block % next
end do
@@ -274,22 +224,17 @@
block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
block % mesh, rk_timestep(rk_step) )
else
- call atmh_advance_scalars_mono( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % mesh, rk_timestep(rk_step), rk_step, 3, &
- domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
+ call atmh_advance_scalars_mono(block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % mesh, rk_timestep(rk_step), rk_step, 3)
end if
block => block % next
end do
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % scalars % array(:,:,:), &
- block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
- block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % tend % scalars)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % scalars)
block => block % next
end do
@@ -1461,7 +1406,7 @@
end subroutine atmh_advance_scalars
- subroutine atmh_advance_scalars_mono( tend, s_old, s_new, grid, dt, rk_step, rk_order, dminfo, cellsToSend, cellsToRecv)
+ subroutine atmh_advance_scalars_mono(tend, s_old, s_new, grid, dt, rk_step, rk_order)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Input: s - current model state
@@ -1476,11 +1421,11 @@
type (state_type), intent(in) :: s_old
type (state_type), intent(inout) :: s_new
type (mesh_type), intent(in) :: grid
- integer, intent(in) :: rk_step, rk_order
real (kind=RKIND), intent(in) :: dt
- type (dm_info), intent(in) :: dminfo
- type (exchange_list), dimension(*), intent(inout) :: cellsToSend, cellsToRecv
+ integer, intent(in) :: rk_step, rk_order
+ type (block_type), pointer :: block
+
integer :: i, iCell, iEdge, k, iScalar, cell_upwind, cell1, cell2, num_scalars
real (kind=RKIND) :: flux, scalar_edge, d2fdx2_cell1, d2fdx2_cell2
real (kind=RKIND) :: fdir, flux_upwind, h_flux_upwind, s_upwind
@@ -1491,9 +1436,11 @@
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
integer, dimension(:,:), pointer :: cellsOnEdge
+ type (field3DReal) :: tempField
+
real (kind=RKIND), dimension( s_old % num_scalars, grid % nEdges+1) :: h_flux
- real (kind=RKIND), dimension( s_old % num_scalars, grid % nCells+1, 2 ) :: v_flux, v_flux_upwind, s_update
- real (kind=RKIND), dimension( s_old % num_scalars, grid % nCells+1, 2 ) :: scale_out, scale_in
+ real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1) :: v_flux, v_flux_upwind, s_update
+ real (kind=RKIND), dimension(2, s_old % num_scalars, grid % nCells+1), target :: scale_out, scale_in
real (kind=RKIND), dimension( s_old % num_scalars ) :: s_max, s_min, s_max_update, s_min_update
integer :: nVertLevels, km0, km1, ktmp, kcp1, kcm1
@@ -1502,6 +1449,8 @@
real (kind=RKIND), parameter :: eps=1.e-20
real (kind=RKIND) :: coef_3rd_order
+ block => grid % block
+
num_scalars = s_old % num_scalars
scalar_old => s_old % scalars % array
@@ -1531,8 +1480,8 @@
km1 = 1
km0 = 2
- v_flux(:,:,km1) = 0.
- v_flux_upwind(:,:,km1) = 0.
+ v_flux(km1,:,:) = 0.
+ v_flux_upwind(km1,:,:) = 0.
scale_out(:,:,:) = 1.
scale_in(:,:,:) = 1.
@@ -1552,20 +1501,20 @@
cell_upwind = k
if (wwAvg(k+1,iCell) >= 0) cell_upwind = k+1
do iScalar=1,num_scalars
- v_flux(iScalar,iCell,km0) = dt * wwAvg(k+1,iCell) * &
+ v_flux(km0,iScalar,iCell) = dt * wwAvg(k+1,iCell) * &
(fnm(k+1) * scalar_new(iScalar,k+1,iCell) + fnp(k+1) * scalar_new(iScalar,k,iCell))
- v_flux_upwind(iScalar,iCell,km0) = dt * wwAvg(k+1,iCell) * scalar_old(iScalar,cell_upwind,iCell)
- v_flux(iScalar,iCell,km0) = v_flux(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km0)
+ v_flux_upwind(km0,iScalar,iCell) = dt * wwAvg(k+1,iCell) * scalar_old(iScalar,cell_upwind,iCell)
+ v_flux(km0,iScalar,iCell) = v_flux(km0, iScalar,iCell) - v_flux_upwind(km0,iScalar,iCell)
! v_flux(iScalar,iCell,km0) = 0. ! use only upwind - for testing
- s_update(iScalar,iCell,km0) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
- - rdnw(k) * (v_flux_upwind(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km1))
+ s_update(km0,iScalar,iCell) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
+ - rdnw(k) * (v_flux_upwind(km0,iScalar,iCell) - v_flux_upwind(km1,iScalar,iCell))
end do
else
do iScalar=1,num_scalars
- v_flux(iScalar,iCell,km0) = 0.
- v_flux_upwind(iScalar,iCell,km0) = 0.
- s_update(iScalar,iCell,km0) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
- - rdnw(k) * (v_flux_upwind(iScalar,iCell,km0) - v_flux_upwind(iScalar,iCell,km1))
+ v_flux(km0,iScalar,iCell) = 0.
+ v_flux_upwind(km0,iScalar,iCell) = 0.
+ s_update(km0,iScalar,iCell) = scalar_old(iScalar,k,iCell) * h_old(k,iCell) &
+ - rdnw(k) * (v_flux_upwind(km0,iScalar,iCell) - v_flux_upwind(km1,iScalar,iCell))
end do
end if
@@ -1586,8 +1535,8 @@
h_flux_upwind = dt * uhAvg(k,iEdge) * dvEdge(iEdge) * scalar_old(iScalar,k,cell_upwind)
h_flux(iScalar,iEdge) = h_flux(iScalar,iEdge) - h_flux_upwind
! h_flux(iScalar,iEdge) = 0. ! use only upwind - for testing
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - h_flux_upwind / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + h_flux_upwind / grid % areaCell % array(cell2)
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - h_flux_upwind / grid % areaCell % array(cell1)
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + h_flux_upwind / grid % areaCell % array(cell2)
end do
end do
@@ -1627,8 +1576,8 @@
h_flux_upwind = dt * uhAvg(k,iEdge) * dvEdge(iEdge) * scalar_old(iScalar,k,cell_upwind)
h_flux(iScalar,iEdge) = h_flux(iScalar,iEdge) - h_flux_upwind
! h_flux(iScalar,iEdge) = 0. ! use only upwind - for testing
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - h_flux_upwind / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + h_flux_upwind / grid % areaCell % array(cell2)
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - h_flux_upwind / grid % areaCell % array(cell1)
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + h_flux_upwind / grid % areaCell % array(cell2)
end do
end do
@@ -1647,14 +1596,14 @@
s_max(iScalar) = max(scalar_old(iScalar,k,iCell), scalar_old(iScalar,kcp1,iCell), scalar_old(iScalar,kcm1,iCell))
s_min(iScalar) = min(scalar_old(iScalar,k,iCell), scalar_old(iScalar,kcp1,iCell), scalar_old(iScalar,kcm1,iCell))
- s_max_update(iScalar) = s_update(iScalar,iCell,km0)
- s_min_update(iScalar) = s_update(iScalar,iCell,km0)
+ s_max_update(iScalar) = s_update(km0,iScalar,iCell)
+ s_min_update(iScalar) = s_update(km0,iScalar,iCell)
! add in vertical flux to get max and min estimate
s_max_update(iScalar) = s_max_update(iScalar) &
- - rdnw(k) * (max(0.0_RKIND,v_flux(iScalar,iCell,km0)) - min(0.0_RKIND,v_flux(iScalar,iCell,km1)))
+ - rdnw(k) * (max(0.0_RKIND,v_flux(km0,iScalar,iCell)) - min(0.0_RKIND,v_flux(km1,iScalar,iCell)))
s_min_update(iScalar) = s_min_update(iScalar) &
- - rdnw(k) * (min(0.0_RKIND,v_flux(iScalar,iCell,km0)) - max(0.0_RKIND,v_flux(iScalar,iCell,km1)))
+ - rdnw(k) * (min(0.0_RKIND,v_flux(km0,iScalar,iCell)) - max(0.0_RKIND,v_flux(km1,iScalar,iCell)))
end do
@@ -1681,33 +1630,33 @@
if( config_positive_definite ) s_min(:) = 0.
do iScalar=1,num_scalars
- scale_out (iScalar,iCell,km0) = 1.
- scale_in (iScalar,iCell,km0) = 1.
+ scale_out (km0,iScalar,iCell) = 1.
+ scale_in (km0,iScalar,iCell) = 1.
s_max_update (iScalar) = s_max_update (iScalar) / h_new (k,iCell)
s_min_update (iScalar) = s_min_update (iScalar) / h_new (k,iCell)
- s_upwind = s_update(iScalar,iCell,km0) / h_new(k,iCell)
+ s_upwind = s_update(km0,iScalar,iCell) / h_new(k,iCell)
if ( s_max_update(iScalar) > s_max(iScalar) .and. config_monotonic) &
- scale_in (iScalar,iCell,km0) = max(0.0_RKIND,(s_max(iScalar)-s_upwind)/(s_max_update(iScalar)-s_upwind+eps))
+ scale_in (km0,iScalar,iCell) = max(0.0_RKIND,(s_max(iScalar)-s_upwind)/(s_max_update(iScalar)-s_upwind+eps))
if ( s_min_update(iScalar) < s_min(iScalar) ) &
- scale_out (iScalar,iCell,km0) = max(0.0_RKIND,(s_upwind-s_min(iScalar))/(s_upwind-s_min_update(iScalar)+eps))
+ scale_out (km0,iScalar,iCell) = max(0.0_RKIND,(s_upwind-s_min(iScalar))/(s_upwind-s_min_update(iScalar)+eps))
end do
end do ! end loop over cells to compute scale factor
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,1), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,2), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,1), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,2), &
- num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
+ tempField % block => block
+ tempField % dims(1) = 2
+ tempField % dims(2) = num_scalars
+ tempField % dims(3) = grid % nCells
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+ tempField % array => scale_in
+ call mpas_dmpar_exch_halo_field(tempField)
+
+ tempField % array => scale_out
+ call mpas_dmpar_exch_halo_field(tempField)
+
! rescale the horizontal fluxes
do iEdge = 1, grid % nEdges
@@ -1716,9 +1665,9 @@
do iScalar=1,num_scalars
flux = h_flux(iScalar,iEdge)
if (flux > 0) then
- flux = flux * min(scale_out(iScalar,cell1,km0), scale_in(iScalar,cell2,km0))
+ flux = flux * min(scale_out(km0,iScalar,cell1), scale_in(km0,iScalar,cell2))
else
- flux = flux * min(scale_in(iScalar,cell1,km0), scale_out(iScalar,cell2,km0))
+ flux = flux * min(scale_in(km0,iScalar,cell1), scale_out(km0,iScalar,cell2))
end if
h_flux(iScalar,iEdge) = flux
end do
@@ -1728,13 +1677,13 @@
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- flux = v_flux(iScalar,iCell,km1)
+ flux = v_flux(km1,iScalar,iCell)
if (flux > 0) then
- flux = flux * min(scale_out(iScalar,iCell,km0), scale_in(iScalar,iCell,km1))
+ flux = flux * min(scale_out(km0,iScalar,iCell), scale_in(km1,iScalar,iCell))
else
- flux = flux * min(scale_in(iScalar,iCell,km0), scale_out(iScalar,iCell,km1))
+ flux = flux * min(scale_in(km0,iScalar,iCell), scale_out(km1,iScalar,iCell))
end if
- v_flux(iScalar,iCell,km1) = flux
+ v_flux(km1,iScalar,iCell) = flux
end do
end do
@@ -1748,8 +1697,8 @@
do iCell=1,grid % nCells
! add in upper vertical flux that was just renormalized
do iScalar=1,num_scalars
- s_update(iScalar,iCell,km0) = s_update(iScalar,iCell,km0) + rdnw(k) * v_flux(iScalar,iCell,km1)
- if (k > 1) s_update(iScalar,iCell,km1) = s_update(iScalar,iCell,km1) - rdnw(k-1)*v_flux(iScalar,iCell,km1)
+ s_update(km0,iScalar,iCell) = s_update(km0,iScalar,iCell) + rdnw(k) * v_flux(km1,iScalar,iCell)
+ if (k > 1) s_update(km1,iScalar,iCell) = s_update(km1,iScalar,iCell) - rdnw(k-1)*v_flux(km1,iScalar,iCell)
end do
end do
@@ -1757,9 +1706,9 @@
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
do iScalar=1,num_scalars
- s_update(iScalar,cell1,km0) = s_update(iScalar,cell1,km0) - &
+ s_update(km0,iScalar,cell1) = s_update(km0,iScalar,cell1) - &
h_flux(iScalar,iEdge) / grid % areaCell % array(cell1)
- s_update(iScalar,cell2,km0) = s_update(iScalar,cell2,km0) + &
+ s_update(km0,iScalar,cell2) = s_update(km0,iScalar,cell2) + &
h_flux(iScalar,iEdge) / grid % areaCell % array(cell2)
end do
end do
@@ -1768,13 +1717,13 @@
if (k > 1) then
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- s_update(iScalar,iCell,km1) = s_update(iScalar,iCell,km1) / h_new(k-1,iCell)
+ s_update(km1,iScalar,iCell) = s_update(km1,iScalar,iCell) / h_new(k-1,iCell)
end do
end do
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- scalar_new(iScalar,k-1,iCell) = s_update(iScalar,iCell,km1)
+ scalar_new(iScalar,k-1,iCell) = s_update(km1,iScalar,iCell)
end do
end do
end if
@@ -1787,7 +1736,7 @@
do iCell=1,grid % nCells
do iScalar=1,num_scalars
- scalar_new(iScalar,grid % nVertLevels,iCell) = s_update(iScalar,iCell,km1) / h_new(grid%nVertLevels,iCell)
+ scalar_new(iScalar,grid % nVertLevels,iCell) = s_update(km1,iScalar,iCell) / h_new(grid%nVertLevels,iCell)
end do
end do
Modified: branches/omp_blocks/halo/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F
===================================================================
--- branches/omp_blocks/halo/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_init_nhyd_atmos/mpas_init_atm_test_cases.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -8,6 +8,7 @@
use mpas_atmphys_initialize_real
use mpas_RBF_interpolation
use mpas_vector_reconstruction
+ use mpas_timer
! Added only clause to keep xlf90 from getting confused from the overloaded abs intrinsic in mpas_timekeeping
use mpas_timekeeping !, only: MPAS_Time_type, MPAS_TimeInterval_type, MPAS_Clock_type, &
@@ -35,7 +36,6 @@
type (block_type), pointer :: block_ptr
-
!
! Do some quick checks to make sure compile options are compatible with the chosen test case
!
@@ -103,8 +103,9 @@
write(0,*) ' real-data GFS test case '
block_ptr => domain % blocklist
do while (associated(block_ptr))
- call init_atm_test_case_gfs(domain % dminfo, block_ptr % mesh, block_ptr % fg, block_ptr % state % time_levs(1) % state, &
- block_ptr % diag, config_test_case, block_ptr % parinfo)
+ call init_atm_test_case_gfs(block_ptr % mesh, block_ptr % fg, &
+ block_ptr % state % time_levs(1) % state, block_ptr % diag, &
+ config_test_case)
if (config_met_interp) call physics_initialize_real(block_ptr % mesh, block_ptr % fg)
block_ptr => block_ptr % next
end do
@@ -126,6 +127,7 @@
end if
+
block_ptr => domain % blocklist
do while (associated(block_ptr))
do i=2,nTimeLevs
@@ -144,7 +146,6 @@
end do
endif
-
end subroutine init_atm_setup_test_case
!----------------------------------------------------------------------------------------------------------
@@ -2153,7 +2154,7 @@
end subroutine init_atm_test_case_mtn_wave
- subroutine init_atm_test_case_gfs(dminfo, grid, fg, state, diag, test_case, parinfo)
+ subroutine init_atm_test_case_gfs(grid, fg, state, diag, test_case)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Real-data test case using GFS data
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -2165,13 +2166,15 @@
implicit none
- type (dm_info), intent(in) :: dminfo
type (mesh_type), intent(inout) :: grid
type (fg_type), intent(inout) :: fg
type (state_type), intent(inout) :: state
type (diag_type), intent(inout) :: diag
integer, intent(in) :: test_case
+
+ type (block_type), pointer :: block
type (parallel_info), pointer :: parinfo
+ type (dm_info), pointer :: dminfo
real (kind=RKIND), parameter :: u0 = 35.0
real (kind=RKIND), parameter :: alpha_grid = 0. ! no grid rotation
@@ -2215,6 +2218,8 @@
real (kind=RKIND), dimension(:,:), pointer :: v
real (kind=RKIND), dimension(:,:), pointer :: sorted_arr
+ type (field1DReal):: tempField
+
real(kind=RKIND), dimension(:), pointer :: hs, hs1
real(kind=RKIND) :: hm, zh, dzmin, dzmina, dzmina_global, dzminf, sm
integer :: nsmterrain, kz, sfc_k
@@ -2273,6 +2278,11 @@
real (kind=RKIND) :: dlat
real (kind=RKIND) :: z_edge, z_edge3, d2fdx2_cell1, d2fdx2_cell2
+
+ block => grid % block
+ parinfo => block % parinfo
+ dminfo => block % domain % dminfo
+
weightsOnEdge => grid % weightsOnEdge % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
nEdgesOnCell => grid % nEdgesOnCell % array
@@ -2376,7 +2386,7 @@
allocate(rarray(nx,ny,nzz))
allocate(nhs(grid % nCells))
nhs(:) = 0
- grid % ter % array(:) = 0.0
+ ter(:) = 0.0
do jTileStart=1,20401,ny-6
! do jTileStart=1,961,ny-6
@@ -2409,7 +2419,7 @@
grid % nCells, grid % maxEdges, grid % nEdgesOnCell % array, grid % cellsOnCell % array, &
grid % latCell % array, grid % lonCell % array)
- grid % ter % array(iPoint) = grid % ter % array(iPoint) + rarray(i,j,1)
+ ter(iPoint) = ter(iPoint) + rarray(i,j,1)
nhs(iPoint) = nhs(iPoint) + 1
end do
@@ -2419,7 +2429,7 @@
end do
do iCell=1, grid % nCells
- grid % ter % array(iCell) = grid % ter % array(iCell) / real(nhs(iCell))
+ ter(iCell) = ter(iCell) / real(nhs(iCell))
end do
deallocate(rarray)
@@ -2980,7 +2990,7 @@
nInterpPoints = grid % nCells
latPoints => grid % latCell % array
lonPoints => grid % lonCell % array
- destField1d => grid % ter % array
+ destField1d => ter
ndims = 1
end if
@@ -3046,11 +3056,9 @@
ter(iCell) = hs(iCell) - 0.25*ter(iCell)
end do
- call mpas_dmpar_exch_halo_field1d_real(dminfo, ter(:), &
- grid % nCells, &
- parinfo % cellsToSend, parinfo % cellsToRecv)
+ ! note that ther variable ter used throughout this section is a pointer to grid % ter % array, here we are passing ter's parent field
+ call mpas_dmpar_exch_halo_field(grid % ter)
-
end do
do iCell=1,grid % nCells
@@ -3164,7 +3172,7 @@
sm = .05*min(0.5_RKIND*zw(k)/hm,1.0_RKIND)
do i=1,50
- do iCell=1,grid %nCells
+ do iCell=1,grid % nCells
hs1(iCell) = 0.
do j = 1,nEdgesOnCell(iCell)
@@ -3184,10 +3192,16 @@
end do
- call mpas_dmpar_exch_halo_field1d_real(dminfo, hs(:), &
- grid % nCells, &
- parinfo % cellsToSend, parinfo % cellsToRecv)
+ tempField % block => block
+ tempField % dims(1) = grid % nCells
+ tempField % sendList => parinfo % cellsToSend
+ tempField % recvList => parinfo % cellsToRecv
+ tempField % array => hs
+ call mpas_timer_start("EXCHANGE_1D_REAL")
+ call mpas_dmpar_exch_halo_field(tempField)
+ call mpas_timer_stop("EXCHANGE_1D_REAL")
+
! dzmina = minval(hs(:)-hx(k-1,:))
dzmina = minval(zw(k)+ah(k)*hs(1:grid%nCellsSolve)-zw(k-1)-ah(k-1)*hx(k-1,1:grid%nCellsSolve))
call mpas_dmpar_min_real(dminfo, dzmina, dzmina_global)
Modified: branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_mpas_core.F
===================================================================
--- branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_mpas_core.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -190,25 +190,19 @@
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % state % time_levs(1) % state % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % u)
if (.not. config_do_restart .or. (config_do_restart .and. config_do_DAcycling)) then
call atm_init_coupled_diagnostics( block % state % time_levs(1) % state, block % diag, mesh)
end if
call atm_compute_solve_diagnostics(dt, block % state % time_levs(1) % state, block % diag, mesh)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % ru % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(dminfo, block % diag % rw % array, &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % CellsToSend, block % parinfo % CellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % pv_edge)
+ call mpas_dmpar_exch_halo_field(block % diag % ru)
+
+ call mpas_dmpar_exch_halo_field(block % diag % rw)
+
call mpas_rbf_interp_initialize(mesh)
call mpas_init_reconstruct(mesh)
call mpas_reconstruct(mesh, block % state % time_levs(1) % state % u % array, &
Modified: branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_time_integration.F
===================================================================
--- branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_nhyd_atmos/mpas_atm_time_integration.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -128,21 +128,17 @@
! WCS-parallel: first three and rtheta_p arise from scalar transport and microphysics update (OK). Others come from where?
! theta_m
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(1) % state % theta_m % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % theta_m)
+
! scalars
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(1) % state % scalars % array(:,:,:), &
- block % state % time_levs(1) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % scalars)
+
! pressure_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % pressure_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % pressure_p)
+
! rtheta_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rtheta_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % rtheta_p)
+
block => block % next
end do
@@ -185,9 +181,12 @@
if (debug) write(0,*) ' add physics tendencies '
block => domain % blocklist
do while (associated(block))
- call physics_addtend( domain % dminfo , block % parinfo % cellsToSend, block % parinfo % cellsToRecv, &
- block % mesh , block % state % time_levs(1) % state, block % diag, block % tend, &
- block % tend_physics , block % state % time_levs(2) % state % rho_zz % array(:,:), &
+ call physics_addtend( block % mesh, &
+ block % state % time_levs(1) % state, &
+ block % diag, &
+ block % tend, &
+ block % tend_physics, &
+ block % state % time_levs(2) % state % rho_zz % array(:,:), &
block % diag % rho_edge % array(:,:) )
block => block % next
end do
@@ -202,16 +201,14 @@
block => domain % blocklist
do while (associated(block))
! tend_u
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % tend % u, (/ 1 /))
block => block % next
end do
block => domain % blocklist
do while (associated(block))
call atm_set_smlstep_pert_variables( block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % tend, block % diag, block % mesh )
+ block % tend, block % diag, block % mesh )
call atm_compute_vert_imp_coefs( block % state % time_levs(2) % state, block % mesh, block % diag, rk_sub_timestep(rk_step) )
block => block % next
end do
@@ -223,7 +220,7 @@
block => domain % blocklist
do while (associated(block))
call atm_advance_acoustic_step( block % state % time_levs(2) % state, block % diag, block % tend, &
- block % mesh, rk_sub_timestep(rk_step) )
+ block % mesh, rk_sub_timestep(rk_step) )
block => block % next
end do
@@ -236,9 +233,7 @@
block => domain % blocklist
do while (associated(block))
! rtheta_pp
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rtheta_pp % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % rtheta_pp, (/ 1 /))
block => block % next
end do
@@ -258,27 +253,28 @@
! MGD seems necessary
! rw_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rw_p % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % diag % rw_p, (/ 1 /))
+ call mpas_dmpar_exch_halo_field(block % diag % rw_p)
+
! MGD seems necessary
! ru_p
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % ru_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % diag % ru_p, (/ 2 /))
+ call mpas_dmpar_exch_halo_field(block % diag % ru_p)
+
! rho_pp
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rho_pp % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % rho_pp)
+
+ ! the second layer of halo cells must be exchanged before calling atm_recover_large_step_variables
+ call mpas_dmpar_exch_halo_field(block % diag % rtheta_pp, (/ 2 /))
+
block => block % next
end do
block => domain % blocklist
do while (associated(block))
- call atm_recover_large_step_variables( block % state % time_levs(2) % state, &
+ call atm_recover_large_step_variables( block % state % time_levs(2) % state, &
block % diag, block % tend, block % mesh, &
rk_timestep(rk_step), number_sub_steps(rk_step), rk_step )
-
block => block % next
end do
@@ -287,9 +283,9 @@
block => domain % blocklist
do while (associated(block))
! u
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ !CR: SMALLER STENCIL?: call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % u, (/ 3 /))
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % u)
+
block => block % next
end do
@@ -305,16 +301,16 @@
! so we keep the advance_scalars routine as well
!
if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then
- call atm_advance_scalars( block % tend, &
+ call atm_advance_scalars( block % tend, &
block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
block % diag, &
block % mesh, rk_timestep(rk_step) )
else
- call atm_advance_scalars_mono( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % diag, &
- block % mesh, rk_timestep(rk_step), rk_step, 3, &
- domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
+ block % domain = domain
+ call atm_advance_scalars_mono( block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % diag, block % mesh, &
+ rk_timestep(rk_step), rk_step, 3 )
end if
block => block % next
end do
@@ -359,24 +355,21 @@
!MGD seems necessary
! w
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % w % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % w)
+
! pv_edge
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % pv_edge)
+
! rho_edge
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % diag % rho_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % diag % rho_edge)
! **** this will always be needed - perhaps we can cover this with compute_solve_diagnostics
! scalars
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
- block % state % time_levs(2) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ if(rk_step < 3) then
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % scalars)
+ end if
+
block => block % next
end do
@@ -1421,7 +1414,7 @@
!---------------------------
- subroutine atm_advance_scalars_mono( tend, s_old, s_new, diag, grid, dt, rk_step, rk_order, dminfo, cellsToSend, cellsToRecv)
+ subroutine atm_advance_scalars_mono(tend, s_old, s_new, diag, grid, dt, rk_step, rk_order)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Input: s - current model state
@@ -1429,23 +1422,20 @@
!
implicit none
- type (tend_type), intent(in) :: tend
- type (state_type), intent(in) :: s_old
- type (state_type), intent(inout) :: s_new
- type (diag_type), intent(in) :: diag
- type (mesh_type), intent(in) :: grid
- real (kind=RKIND) :: dt
+ type (tend_type),intent(in) :: tend
+ type (state_type),intent(inout) :: s_old
+ type (state_type),intent(inout) :: s_new
+ type (diag_type),intent(in) :: diag
+ type (mesh_type),intent(in) :: grid
+ real (kind=RKIND),intent(in) :: dt
+ integer, intent(in) :: rk_step, rk_order
- integer, intent(in) :: rk_step, rk_order
- type (dm_info), intent(in) :: dminfo
- type (exchange_list), dimension(*), intent(inout) :: cellsToSend, cellsToRecv
-
-
+ type (block_type), pointer :: block
integer :: i, iCell, iEdge, k, iScalar, cell1, cell2
real (kind=RKIND) :: flux, scalar_edge, d2fdx2_cell1, d2fdx2_cell2, scalar_weight
real (kind=RKIND) :: scalar_weight_cell1, scalar_weight_cell2
- real (kind=RKIND), dimension(:,:,:), pointer :: scalar_old_in, scalar_new_in, scalar_tend
+ real (kind=RKIND), dimension(:,:,:), pointer :: scalar_tend
real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
real (kind=RKIND), dimension(:,:), pointer :: uhAvg, h_old, h_new, wwAvg, rho_edge, rho_zz, zgrid, kdiff
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell, qv_init
@@ -1455,9 +1445,11 @@
integer, dimension(:), pointer :: nAdvCellsForEdge
real (kind=RKIND), dimension(:,:), pointer :: adv_coefs, adv_coefs_3rd
+ type (field2DReal) :: tempField
+
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scalar_old, scalar_new
real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: s_max, s_min, s_update
- real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ) :: scale_in, scale_out
+ real (kind=RKIND), dimension( grid % nVertLevels, grid % nCells ), target :: scale_in, scale_out
real (kind=RKIND), dimension( grid % nVertLevels, grid % nEdges ) :: flux_arr
real (kind=RKIND), dimension( grid % nVertLevels + 1, grid % nCells ) :: wdtn
@@ -1484,10 +1476,10 @@
flux4(q_im2, q_im1, q_i, q_ip1, ua) + &
coef3*abs(ua)*((q_ip1 - q_im2)-3.*(q_i-q_im1))/12.0
+ block => grid % block
+
coef_3rd_order = config_coef_3rd_order
- scalar_old_in => s_old % scalars % array
- scalar_new_in => s_new % scalars % array
kdiff => diag % kdiff % array
deriv_two => grid % deriv_two % array
uhAvg => diag % ruAvg % array
@@ -1532,7 +1524,7 @@
do iCell = 1,grid%nCellsSolve
do k = 1, grid%nVertLevels
do iScalar = 1,s_old%num_scalars
- scalar_old_in(iScalar,k,iCell) = scalar_old_in(iScalar,k,iCell)+dt*scalar_tend(iScalar,k,iCell) / h_old(k,iCell)
+ s_old % scalars % array(iScalar,k,iCell) = s_old % scalars % array(iScalar,k,iCell)+dt*scalar_tend(iScalar,k,iCell) / h_old(k,iCell)
scalar_tend(iScalar,k,iCell) = 0.
end do
end do
@@ -1540,12 +1532,7 @@
! halo exchange
- call mpas_dmpar_exch_halo_field3d_real( dminfo, &
- scalar_old_in(:,:,:), &
- s_old % num_scalars, &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
+ call mpas_dmpar_exch_halo_field(s_old % scalars)
!
! Runge Kutta integration, so we compute fluxes from scalar_new values, update starts from scalar_old
@@ -1560,8 +1547,8 @@
do iCell = 1, grid%nCells
do k=1, grid%nVertLevels
- scalar_old(k,iCell) = scalar_old_in(iScalar,k,iCell)
- scalar_new(k,iCell) = scalar_new_in(iScalar,k,iCell)
+ scalar_old(k,iCell) = s_old % scalars % array(iScalar,k,iCell)
+ scalar_new(k,iCell) = s_new % scalars % array(iScalar,k,iCell)
end do
end do
@@ -1721,17 +1708,23 @@
!
! communicate scale factors here
!
- call mpas_dmpar_exch_halo_field2d_real( dminfo, &
- scale_in(:,:), &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
- call mpas_dmpar_exch_halo_field2d_real( dminfo, &
- scale_out(:,:), &
- grid % nVertLevels, &
- grid % nCells, &
- cellsToSend, cellsToRecv )
!
+! WCS_halo_opt_2 - communicate only first halo row in these next two exchanges
+!
+
+ tempField % block => block
+ tempField % dims(1) = grid % nVertLevels
+ tempField % dims(2) = grid % nCells
+ tempField % sendList => block % parinfo % cellsToSend
+ tempField % recvList => block % parinfo % cellsToRecv
+
+ tempField % array => scale_in
+ call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
+
+ tempField % array => scale_out
+ call mpas_dmpar_exch_halo_field(tempField, (/ 1 /))
+
+!
! rescale the fluxes
!
do iEdge = 1, grid % nEdges
@@ -1801,7 +1794,7 @@
do iCell = 1, grid%nCells
do k=1, grid%nVertLevels
- scalar_new_in(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell))
+ s_new % scalars % array(iScalar,k,iCell) = max(0.0_RKIND,scalar_new(k,iCell))
end do
end do
Modified: branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -79,6 +79,7 @@
integer :: iCell, k, i, err
type (block_type), pointer :: block
type (state_type) :: provis
+ type (state_type), pointer :: provis_ptr
integer :: rk_step, iEdge, cell1, cell2
@@ -100,6 +101,9 @@
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels )
+ provis_ptr => provis
+ call mpas_create_state_links(provis_ptr)
+
!
! Initialize time_levs(2) with state at current time
! Initialize first RK state
@@ -144,17 +148,11 @@
call mpas_timer_start("RK4-diagnostic halo update")
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(provis % pv_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
+ call mpas_dmpar_exch_halo_field(provis % divergence)
+ call mpas_dmpar_exch_halo_field(provis % vorticity)
end if
block => block % next
@@ -192,15 +190,9 @@
call mpas_timer_start("RK4-pronostic halo update")
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
- block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % tend % u)
+ call mpas_dmpar_exch_halo_field(block % tend % h)
+ call mpas_dmpar_exch_halo_field(block % tend % tracers)
block => block % next
end do
call mpas_timer_stop("RK4-pronostic halo update")
Modified: branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_split.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_ocean/mpas_ocn_time_integration_split.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -166,20 +166,11 @@
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % pv_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % divergence)
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % vorticity)
end if
block => block % next
@@ -274,11 +265,7 @@
call mpas_timer_start("se halo ubcl", .false., timer_halo_ubcl)
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
- block % state % time_levs(2) % state % uBcl % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % uBcl)
block => block % next
end do
call mpas_timer_stop("se halo ubcl", timer_halo_ubcl)
@@ -394,11 +381,7 @@
call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
block => domain % blocklist
do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle)
block => block % next
end do ! block
call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
@@ -461,12 +444,8 @@
call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
block => domain % blocklist
do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
- block => block % next
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle)
+ block => block % next
end do ! block
call mpas_timer_stop("se halo ssh", timer_halo_ssh)
@@ -512,10 +491,7 @@
call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle)
block => block % next
end do ! block
call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
@@ -572,10 +548,7 @@
call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle)
block => block % next
end do ! block
call mpas_timer_stop("se halo ssh", timer_halo_ssh)
@@ -631,10 +604,7 @@
call mpas_timer_start("se halo F", .false., timer_halo_f)
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(1) % state % FBtr % array(:), &
- block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(1) % state % FBtr)
block => block % next
end do ! block
call mpas_timer_stop("se halo F", timer_halo_f)
@@ -730,10 +700,7 @@
call mpas_timer_start("se halo h", .false., timer_halo_h)
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % tend % h)
block => block % next
end do
call mpas_timer_stop("se halo h", timer_halo_h)
@@ -852,10 +819,7 @@
call mpas_timer_start("se halo tracers", .false., timer_halo_tracers)
block => domain % blocklist
do while (associated(block))
-
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % tracers % array(:,:,:), &
- block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
+ call mpas_dmpar_exch_halo_field(block % state % time_levs(2) % state % tracers)
block => block % next
end do
call mpas_timer_stop("se halo tracers", timer_halo_tracers)
Modified: branches/omp_blocks/halo/src/core_sw/mpas_sw_time_integration.F
===================================================================
--- branches/omp_blocks/halo/src/core_sw/mpas_sw_time_integration.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/core_sw/mpas_sw_time_integration.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -63,7 +63,8 @@
integer :: iCell, k
type (block_type), pointer :: block
- type (state_type) :: provis
+ type (state_type), target :: provis
+ type (state_type), pointer :: provis_ptr
integer :: rk_step
@@ -74,6 +75,9 @@
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, &
block % mesh % nTracers)
+
+ provis_ptr => provis
+ call mpas_create_state_links(provis_ptr)
!
! Initialize time_levs(2) with state at current time
@@ -118,17 +122,12 @@
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+
+ call mpas_dmpar_exch_halo_field(provis % pv_edge)
if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
+ call mpas_dmpar_exch_halo_field(provis % divergence)
+ call mpas_dmpar_exch_halo_field(provis % vorticity)
end if
block => block % next
@@ -148,15 +147,9 @@
block => domain % blocklist
do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
- block % mesh % nTracers, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field(block % tend % u)
+ call mpas_dmpar_exch_halo_field(block % tend % h)
+ call mpas_dmpar_exch_halo_field(block % tend % tracers)
block => block % next
end do
Modified: branches/omp_blocks/halo/src/framework/Makefile
===================================================================
--- branches/omp_blocks/halo/src/framework/Makefile        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/framework/Makefile        2012-03-12 19:55:20 UTC (rev 1620)
@@ -10,6 +10,7 @@
mpas_timekeeping.o \
mpas_configure.o \
mpas_constants.o \
+ mpas_dmpar_types.o \
mpas_grid_types.o \
mpas_hash.o \
mpas_sort.o \
@@ -31,10 +32,12 @@
mpas_constants.o: mpas_kind_types.o
-mpas_grid_types.o: mpas_dmpar.o
+mpas_dmpar_types.o : mpas_kind_types.o
-mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o
+mpas_grid_types.o: mpas_kind_types.o mpas_dmpar_types.o
+mpas_dmpar.o: mpas_sort.o streams.o mpas_kind_types.o mpas_grid_types.o
+
mpas_sort.o: mpas_kind_types.o
mpas_timekeeping.o: mpas_kind_types.o
Modified: branches/omp_blocks/halo/src/framework/mpas_dmpar.F
===================================================================
--- branches/omp_blocks/halo/src/framework/mpas_dmpar.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/framework/mpas_dmpar.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -1,6 +1,7 @@
module mpas_dmpar
- use mpas_kind_types
+ use mpas_dmpar_types
+ use mpas_grid_types
use mpas_sort
#ifdef _MPI
@@ -18,24 +19,6 @@
integer, parameter :: BUFSIZE = 6000
- type dm_info
- integer :: nprocs, my_proc_id, comm, info
- logical :: using_external_comm
- end type dm_info
-
-
- type exchange_list
- integer :: procID
- integer :: blockID
- integer :: nlist
- integer, dimension(:), pointer :: list
- type (exchange_list), pointer :: next
- real (kind=RKIND), dimension(:), pointer :: rbuffer
- integer, dimension(:), pointer :: ibuffer
- integer :: reqID
- end type exchange_list
-
-
interface mpas_dmpar_alltoall_field
module procedure mpas_dmpar_alltoall_field1d_integer
module procedure mpas_dmpar_alltoall_field2d_integer
@@ -44,7 +27,30 @@
module procedure mpas_dmpar_alltoall_field3d_real
end interface
+ private :: mpas_dmpar_alltoall_field1d_integer
+ private :: mpas_dmpar_alltoall_field2d_integer
+ private :: mpas_dmpar_alltoall_field1d_real
+ private :: mpas_dmpar_alltoall_field2d_real
+ private :: mpas_dmpar_alltoall_field3d_real
+
+ interface mpas_dmpar_exch_halo_field
+ module procedure mpas_dmpar_exch_halo_field1d_integer
+ module procedure mpas_dmpar_exch_halo_field2d_integer
+ module procedure mpas_dmpar_exch_halo_field3d_integer
+ module procedure mpas_dmpar_exch_halo_field1d_real
+ module procedure mpas_dmpar_exch_halo_field2d_real
+ module procedure mpas_dmpar_exch_halo_field3d_real
+ end interface
+
+ private :: mpas_dmpar_exch_halo_field1d_integer
+ private :: mpas_dmpar_exch_halo_field2d_integer
+ private :: mpas_dmpar_exch_halo_field3d_integer
+ private :: mpas_dmpar_exch_halo_field1d_real
+ private :: mpas_dmpar_exch_halo_field2d_real
+ private :: mpas_dmpar_exch_halo_field3d_real
+
+
contains
@@ -566,7 +572,7 @@
subroutine mpas_dmpar_get_owner_list(dminfo, &
nOwnedList, nNeededList, &
ownedList, neededList, &
- sendList, recvList)
+ sendList, recvList, inOffset)
implicit none
@@ -576,9 +582,10 @@
integer, dimension(nNeededList), intent(in) :: neededList
type (exchange_list), pointer :: sendList
type (exchange_list), pointer :: recvList
+ integer, optional :: inOffset
integer :: i, j, k, kk
- integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc
+ integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc, offset
integer :: numToSend, numToRecv
integer, dimension(nOwnedList) :: recipientList
integer, dimension(2,nOwnedList) :: ownedListSorted
@@ -586,6 +593,7 @@
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: mpi_ierr, mpi_rreq, mpi_sreq
+
#ifdef _MPI
allocate(sendList)
allocate(recvList)
@@ -594,6 +602,11 @@
sendListPtr => sendList
recvListPtr => recvList
+ offset = 0
+ if(present(inOffset)) then
+ offset = inOffset
+ end if
+
do i=1,nOwnedList
ownedListSorted(1,i) = ownedList(i)
ownedListSorted(2,i) = i
@@ -677,7 +690,7 @@
kk = 1
do j=1,nNeededList
if (ownerListIn(j) == -i) then
- recvListPtr % list(kk) = j
+ recvListPtr % list(kk) = j + offset
kk = kk + 1
end if
end do
@@ -1472,22 +1485,28 @@
end subroutine mpas_unpack_recv_buf3d_integer
- subroutine mpas_dmpar_exch_halo_field1d_integer(dminfo, array, dim1, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- integer, dimension(*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field1DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
+ integer, dimension(size(field % dims)) :: dims
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % ibuffer(recvListPtr % nlist))
@@ -1497,28 +1516,28 @@
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf1d_integer(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ call mpas_unpack_recv_buf1d_integer(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1532,26 +1551,32 @@
end subroutine mpas_dmpar_exch_halo_field1d_integer
- subroutine mpas_dmpar_exch_halo_field2d_integer(dminfo, array, dim1, dim2, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- integer, dimension(dim1,*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field2DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d2
+ integer, dimension(size(field % dims)) :: dims
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
+ d2 = dims(1) * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d2))
call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1559,30 +1584,30 @@
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
+ d2 = dims(1) * sendListPtr % nlist
allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf2d_integer(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ d2 = dims(1) * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_integer(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1596,26 +1621,32 @@
end subroutine mpas_dmpar_exch_halo_field2d_integer
- subroutine mpas_dmpar_exch_halo_field3d_integer(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- integer, dimension(dim1,dim2,*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field3DInteger), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d3
+ integer, dimension(size(field % dims)) :: dims
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d3))
call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1623,12 +1654,12 @@
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
+ d3 = dims(1) * dims(2) * sendListPtr % nlist
allocate(sendListPtr % ibuffer(d3))
- call mpas_pack_send_buf3d_integer(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ call mpas_pack_send_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
sendListPtr % ibuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
@@ -1636,19 +1667,19 @@
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_integer(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_integer(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % ibuffer)
end if
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1752,22 +1783,28 @@
end subroutine mpas_unpack_recv_buf3d_real
- subroutine mpas_dmpar_exch_halo_field1d_real(dminfo, array, dim1, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- real (kind=RKIND), dimension(*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field1DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
+ integer, dimension(size(field % dims)) :: dims
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % rbuffer(recvListPtr % nlist))
@@ -1776,29 +1813,29 @@
end if
recvListPtr => recvListPtr % next
end do
-
- sendListPtr => sendList(1) % next
+
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf1d_real(dims(1), field % array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ call mpas_unpack_recv_buf1d_real(dims(1), field % array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1812,57 +1849,64 @@
end subroutine mpas_dmpar_exch_halo_field1d_real
- subroutine mpas_dmpar_exch_halo_field2d_real(dminfo, array, dim1, dim2, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field2DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d2
+ integer, dimension(size(field % dims)) :: dims
+
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
+ d2 = dims(1) * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d2))
call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
end do
-
- sendListPtr => sendList(1) % next
+
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
+ d2 = dims(1) * sendListPtr % nlist
allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call mpas_pack_send_buf2d_real(1, dims(1), dims(2), field % array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ d2 = dims(1) * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_real(1, dims(1), dims(2), field % array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
recvListPtr => recvListPtr % next
end do
-
- sendListPtr => sendList(1) % next
+
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1876,26 +1920,32 @@
end subroutine mpas_dmpar_exch_halo_field2d_real
- subroutine mpas_dmpar_exch_halo_field3d_real(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloIndices)
implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: array
- type (exchange_list), dimension(*), intent(inout), target :: sendList, recvList
+ type (field3DReal), intent(inout) :: field
+ integer, dimension(:), intent(in), optional :: haloIndices
+ type (dm_info) :: dminfo
+ type (exchange_list), pointer :: sendList, recvList
type (exchange_list), pointer :: sendListPtr, recvListPtr
integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
integer :: mpi_ierr
integer :: d3
+ integer, dimension(size(field % dims)) :: dims
#ifdef _MPI
- recvListPtr => recvList(1) % next
+ dminfo = field % block % domain % dminfo
+ dims = field % dims
+
+ call AggregateExchangeLists(dminfo % my_proc_id, haloIndices, field % sendList, field % recvList, sendList, recvList)
+
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d3))
call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
@@ -1903,12 +1953,12 @@
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
+ d3 = dims(1) * dims(2) * sendListPtr % nlist
allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ call mpas_pack_send_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, sendListPtr, 1, d3, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
@@ -1916,19 +1966,19 @@
sendListPtr => sendListPtr % next
end do
- recvListPtr => recvList(1) % next
+ recvListPtr => recvList
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ d3 = dims(1) * dims(2) * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_real(1, dims(1), 1, dims(2), dims(3), field % array, recvListPtr, 1, d3, &
recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
deallocate(recvListPtr % rbuffer)
end if
recvListPtr => recvListPtr % next
end do
- sendListPtr => sendList(1) % next
+ sendListPtr => sendList
do while (associated(sendListPtr))
if (sendListPtr % procID /= dminfo % my_proc_id) then
call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
@@ -1942,4 +1992,161 @@
end subroutine mpas_dmpar_exch_halo_field3d_real
+ subroutine AggregateExchangeLists(myProcID, haloIndicesIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)
+
+ implicit none
+
+ !--- in variables ---!
+ integer, intent(in) :: myProcID
+ integer, dimension(:), intent(in), target, optional :: haloIndicesIn
+ type (exchange_list), dimension(:), pointer :: sendListArray, recvListArray
+
+ !--- out variabls ---!
+ type (exchange_list), pointer :: aggregateSendList, aggregateRecvList
+
+ !--- local variables ---!
+ integer :: i, j
+ integer, dimension(:), pointer :: haloIndices
+ type (exchange_list), pointer :: inListPtr, aggListPtr
+ logical :: blockAdded
+ logical :: listInitilized
+
+ if (present(haloIndicesIn)) then
+ haloIndices => haloIndicesIn
+ else
+ allocate(haloIndices(size(sendListArray)))
+ do i=1, size(haloIndices)
+ haloIndices(i) = i
+ end do
+ end if
+
+ nullify(aggregateSendList)
+ nullify(aggregateRecvList)
+
+ do i=1, size(haloIndices)
+
+ inListPtr => sendListArray(haloIndices(i)) % next
+ do while(associated(inListPtr))
+
+ blockAdded = .false.
+ aggListPtr => aggregateSendList
+
+ do while(associated(aggListPtr))
+ if(inListPtr % blockID == aggListPtr % blockID) then
+ if(inListPtr % procID .ne. myProcID) then
+ call MergeIntegerArrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
+ end if
+ blockAdded = .true.
+ exit
+ end if
+ aggListPtr => aggListPtr % next
+ end do
+
+ if(.not. blockAdded) then
+
+ if (.not. associated(aggregateSendList)) then
+ allocate(aggregateSendList)
+ nullify(aggregateSendList % next)
+ aggListPtr => aggregateSendList
+ else
+ aggListPtr => aggregateSendList
+ do while(associated(aggListPtr % next))
+ aggListPtr => aggListPtr % next
+ end do
+ allocate(aggListPtr % next)
+ aggListPtr => aggListPtr % next
+ end if
+
+ nullify(aggListPtr % next)
+ aggListPtr % procID = inListPtr % procID
+ aggListPtr % blockID = inListPtr % blockID
+ aggListPtr % nlist = inListPtr % nlist
+ allocate(aggListPtr % list(inListPtr % nlist))
+ aggListPtr % list = inListPtr % list
+ aggListPtr % reqID = inListPtr % reqID
+
+ end if
+
+ inListPtr => inListPtr % next
+ end do
+
+
+ inListPtr => recvListArray(haloIndices(i)) % next
+ do while(associated(inListPtr))
+
+ blockAdded = .false.
+ aggListPtr => aggregateRecvList
+ do while(associated(aggListPtr))
+ if(inListPtr % blockID == aggListPtr % blockID) then
+ if(inListPtr % procID .ne. myProcID) then
+ call MergeIntegerArrays(aggListPtr % list, aggListPtr % nlist, inListPtr % list)
+ end if
+ blockAdded = .true.
+ exit
+ end if
+ aggListPtr => aggListPtr % next
+ end do
+
+ if(.not. blockAdded) then
+
+ if (.not. associated(aggregateRecvList)) then
+ allocate(aggregateRecvList)
+ nullify(aggregateRecvList % next)
+ aggListPtr => aggregateRecvList
+ else
+ aggListPtr => aggregateRecvList
+ do while(associated(aggListPtr % next))
+ aggListPtr => aggListPtr % next
+ end do
+
+ allocate(aggListPtr % next)
+ aggListPtr => aggListPtr % next
+ nullify(aggListPtr % next)
+ end if
+
+ aggListPtr % procID = inListPtr % procID
+ aggListPtr % blockID = inListPtr % blockID
+ aggListPtr % nlist = inListPtr % nlist
+ allocate(aggListPtr % list(inListPtr % nlist))
+ aggListPtr % list = inListPtr % list
+ aggListPtr % reqID = inListPtr % reqID
+
+ end if
+
+ inListPtr => inListPtr % next
+ end do
+
+ end do
+
+ end subroutine AggregateExchangeLists
+
+
+ subroutine MergeIntegerArrays(mergeArray, nMergeArray, dataToAppend)
+
+ implicit none
+
+ !--- inout variables ---!
+ integer, dimension(:), pointer :: mergeArray
+ integer, intent(inout) :: nMergeArray
+
+ !--- in variables ---!
+ integer, dimension(:), pointer :: dataToAppend
+
+ !--- local variables ---!
+ integer :: nDataToAppend, newSize
+ integer, dimension(nMergeArray) :: mergeArrayCopy
+
+
+ nDataToAppend = size(dataToAppend)
+ newSize = nMergeArray + nDataToAppend
+ mergeArrayCopy = mergeArray
+ deallocate(mergeArray)
+ allocate(mergeArray(newSize))
+ mergeArray(1:nMergeArray) = mergeArrayCopy
+ mergeArray(nMergeArray+1:newSize) = dataToAppend
+ nMergeArray = newSize
+
+ end subroutine MergeIntegerArrays
+
+
end module mpas_dmpar
Added: branches/omp_blocks/halo/src/framework/mpas_dmpar_types.F
===================================================================
--- branches/omp_blocks/halo/src/framework/mpas_dmpar_types.F         (rev 0)
+++ branches/omp_blocks/halo/src/framework/mpas_dmpar_types.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -0,0 +1,25 @@
+module mpas_dmpar_types
+
+ use mpas_kind_types
+
+ type dm_info
+ integer :: nprocs, my_proc_id, comm, info
+ logical :: using_external_comm
+ end type dm_info
+
+
+ type exchange_list
+ integer :: procID
+ integer :: blockID
+ integer :: nlist
+ integer, dimension(:), pointer :: list
+ type (exchange_list), pointer :: next
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
+
+ end type exchange_list
+
+ contains
+
+end module mpas_dmpar_types
Modified: branches/omp_blocks/halo/src/framework/mpas_grid_types.F
===================================================================
--- branches/omp_blocks/halo/src/framework/mpas_grid_types.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/framework/mpas_grid_types.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -1,6 +1,7 @@
module mpas_grid_types
- use mpas_dmpar
+ use mpas_kind_types
+ use mpas_dmpar_types
integer, parameter :: nTimeLevs = 2
@@ -75,6 +76,20 @@
! Derived type for storing fields
+ type field3DInteger
+ type (block_type), pointer :: block
+ integer, dimension(:,:,:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ integer, dimension(3) :: dims
+ logical :: timeDimension
+ type (field3DInteger), pointer :: prev, next
+ type (exchange_list), dimension(:), pointer :: sendList
+ type (exchange_list), dimension(:), pointer :: recvList
+ type (exchange_list), dimension(:), pointer :: copyList
+ end type field3DInteger
+
+
+ ! Derived type for storing fields
type field2DInteger
type (block_type), pointer :: block
integer, dimension(:,:), pointer :: array
@@ -132,6 +147,8 @@
! Derived type for storing grid meta-data
type mesh_type
+ type (block_type), pointer :: block
+
#include "field_dimensions.inc"
logical :: on_a_sphere
@@ -215,7 +232,7 @@
#include "dim_dummy_decls.inc"
- integer, parameter :: nHaloLayers = 1 ! Currently, the only halo layer actually encompasses both halo layers
+ integer, parameter :: nHaloLayers = 2
integer :: i
@@ -230,13 +247,13 @@
allocate(b % parinfo % cellsToRecv(nHaloLayers))
allocate(b % parinfo % cellsToCopy(nHaloLayers))
- allocate(b % parinfo % edgesToSend(nHaloLayers))
- allocate(b % parinfo % edgesToRecv(nHaloLayers))
- allocate(b % parinfo % edgesToCopy(nHaloLayers))
+ allocate(b % parinfo % edgesToSend(nHaloLayers + 1)) ! first index is owned-cell edges
+ allocate(b % parinfo % edgesToRecv(nHaloLayers + 1)) ! first index is owned-cell edges
+ allocate(b % parinfo % edgesToCopy(nHaloLayers + 1)) ! first index is owned-cell edges
- allocate(b % parinfo % verticesToSend(nHaloLayers))
- allocate(b % parinfo % verticesToRecv(nHaloLayers))
- allocate(b % parinfo % verticesToCopy(nHaloLayers))
+ allocate(b % parinfo % verticesToSend(nHaloLayers + 1)) ! first index is owned-cell vertices
+ allocate(b % parinfo % verticesToRecv(nHaloLayers + 1)) ! first index is owned-cell vertices
+ allocate(b % parinfo % verticesToCopy(nHaloLayers + 1)) ! first index is owned-cell vertices
b % domain => dom
@@ -306,14 +323,7 @@
#include "group_shift_level_routines.inc"
- subroutine mpas_create_field_links(b)
-
- implicit none
-
- type (block_type), pointer :: b
-
#include "field_links.inc"
- end subroutine mpas_create_field_links
end module mpas_grid_types
Modified: branches/omp_blocks/halo/src/framework/mpas_io_input.F
===================================================================
--- branches/omp_blocks/halo/src/framework/mpas_io_input.F        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/framework/mpas_io_input.F        2012-03-12 19:55:20 UTC (rev 1620)
@@ -93,12 +93,15 @@
type (field1DChar) :: xtime
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
+ integer, dimension(:), pointer :: indexToCellID_0Halo
+ integer, dimension(:), pointer :: nEdgesOnCell_0Halo
integer, dimension(:,:), pointer :: cellsOnCell_0Halo
-
+
+ integer, dimension(:), pointer :: nEdgesOnCell_2Halo
+
integer, dimension(:,:), pointer :: edgesOnCell_2Halo
integer, dimension(:,:), pointer :: verticesOnCell_2Halo
+
integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
@@ -131,6 +134,22 @@
character(len=32) :: timeStamp
character(len=1024) :: filename
+ integer, parameter :: nHalos = 2
+ integer, dimension(nHalos+1) :: nCellsCumulative ! own cells, halo 1 cells, halo 2 cells
+ integer, dimension(nHalos+2) :: nEdgesCumulative ! own edges, own cell's edges, halo 1 edges, halo 2 edges
+ integer, dimension(nHalos+2) :: nVerticesCumulative ! own vertices, own cell's vertices, halo 1 vertices, halo 2 vertices
+
+ integer, dimension(nHalos) :: nCellsHalo ! halo 1 cells, halo 2 cells
+ integer, dimension(nHalos+1) :: nEdgesHalo ! own cell's edges, halo 1 edges, halo 2 edges
+ integer, dimension(nHalos+1) :: nVerticesHalo ! own cell's vertices, halo 1 vertices, halo 2 vertices
+
+ integer, dimension(:), pointer :: tempIDs
+ integer :: ntempIDs, offset
+
+ integer :: nHalo, nOwnCells, nOwnEdges, nOwnVertices, cellCount, edgeCount, vertexCount, iEdge, iVertex
+ type (hashtable) :: edgeHash, vertexHash
+
+
if (config_do_restart) then
! this get followed by set is to ensure that the time is in standard format
@@ -158,10 +177,10 @@
! from the input file
!
call mpas_dmpar_get_index_range(domain % dminfo, 1, nCells, readCellStart, readCellEnd)
- nReadCells = readCellEnd - readCellStart + 1
+ nReadCells = readCellEnd - readCellStart + 1
call mpas_dmpar_get_index_range(domain % dminfo, 1, nEdges, readEdgeStart, readEdgeEnd)
- nReadEdges = readEdgeEnd - readEdgeStart + 1
+ nReadEdges = readEdgeEnd - readEdgeStart + 1
call mpas_dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
nReadVertices = readVertexEnd - readVertexStart + 1
@@ -490,6 +509,8 @@
block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
+ nOwnCells = block_graph_2Halo % nVertices
+
#ifdef HAVE_ZOLTAN
#ifdef _MPI
!! For now, only use Zoltan with MPI
@@ -525,6 +546,7 @@
! on each cell and which vertices are on each cell from the processes that read these
! fields for each cell to the processes that own the cells
!
+ allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
@@ -533,6 +555,10 @@
indexToCellIDField % array, block_graph_2Halo % vertexID, &
sendCellList, recvCellList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
+ size(indexToCellIDField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
sendCellList, recvCellList)
@@ -577,15 +603,90 @@
sendVertexList, recvVertexList)
- call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+ call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+ block_graph_2Halo % vertexID(1:nOwnCells), &
2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
- call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+
+ call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+ block_graph_2Halo % vertexID(1:nOwnCells), &
vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
+ !------- set owned and halo cell indices -------!
+
+ nCellsCumulative(1) = nOwnCells
+ nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
+ nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
- ! At this point, local_edge_list(1;ghostEdgeStart-1) contains all of the owned edges for this block
+ !------- determin the perimeter and owned edges of own cells and halos -------!
+
+ nOwnEdges = ghostEdgeStart-1
+ nOwnVertices = ghostVertexStart-1
+
+ ! skip the own edges found at the beginning of local_edge_list
+ call mpas_hash_init(edgeHash)
+ do i=1,nOwnEdges
+ call mpas_hash_insert(edgeHash, local_edge_list(i))
+ end do
+
+ ! skip the own vertices found at the beginning of local_vertex_list
+ call mpas_hash_init(vertexHash)
+ do i=1,nOwnVertices
+ call mpas_hash_insert(vertexHash, local_vertex_list(i))
+ end do
+
+ cellCount = 1 !tracks the index of the local cell array
+ edgeCount = nOwnEdges !tracks where to insert the next local edge
+ vertexCount = nOwnVertices !tracks where to insert the next local vertex
+
+ nEdgesCumulative(1) = nOwnEdges
+ nVerticesCumulative(1) = nOwnVertices
+
+ !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
+ do i = 1, nHalos + 1 ! for the own cells and each halo...
+ do j = cellCount, nCellsCumulative(i)
+
+ ! the number of edges on a cell is same to the number of vertices, and therefore
+ ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
+ do k = 1, nEdgesOnCell_2Halo(j)
+ iEdge = edgesOnCell_2Halo(k,j)
+ if (.not. mpas_hash_search(edgeHash, iEdge)) then
+ edgeCount = edgeCount + 1
+ local_edge_list(edgeCount) = iEdge
+ call mpas_hash_insert(edgeHash, iEdge)
+ end if
+
+ iVertex = verticesOnCell_2Halo(k,j)
+ if (.not. mpas_hash_search(vertexHash, iVertex)) then
+ vertexCount = vertexCount + 1
+ local_vertex_list(vertexCount) = iVertex
+ call mpas_hash_insert(vertexHash, iVertex)
+ end if
+ end do
+
+ end do
+
+ cellCount = nCellsCumulative(i) + 1
+ nEdgesCumulative(i+1) = edgeCount
+ nVerticesCumulative(i+1) = vertexCount
+ end do
+
+ do i = 1, nHalos
+ nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
+ end do
+
+ do i = 1, nHalos + 1
+ nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
+ end do
+
+ do i = 1, nHalos + 1
+ nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
+ end do
+
+ call mpas_hash_destroy(edgeHash)
+ call mpas_hash_destroy(vertexHash)
+
+
+ ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
@@ -657,13 +758,13 @@
!!!!!!!!!!!!!!!!!!
!! Reorder edges
!!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_edges(ghostEdgeStart-1,local_edge_list,3,xEdge,yEdge,zEdge)
+ call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!
!! Reorder vertices
!!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_verts(ghostVertexStart-1,local_vertex_list,3,xVertex,yVertex,zVertex)
+ call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
!!!!!!!!!!!!!!!!!!
deallocate(sendEdgeList % list)
@@ -951,23 +1052,102 @@
! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
! the list, since Fortran does not allow arrays of pointers
!
+
+ !--------- Create Cell Exchange Lists ---------!
+
+ ! pass in neededList of ownedCells and halo layer 1 cells
call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_2Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), block_graph_2Halo % vertexID, &
+ nOwnCells, nCellsCumulative(2), &
+ block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
+ ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
+ offset = nCellsHalo(1)
+ nTempIDs = nOwnCells + nCellsHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
+ tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
call mpas_dmpar_get_owner_list(domain % dminfo, &
- ghostEdgeStart-1, nlocal_edges, &
- local_edge_list(1:ghostEdgeStart-1), local_edge_list, &
+ nOwnCells, nTempIDs, &
+ block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
+ domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ !--------- Create Edge Exchange Lists ---------!
+
+ ! pass in neededList of ownedEdges and ownedCell perimeter edges
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnEdges, nEdgesCumulative(2), &
+ local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
+ ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
+ offset = nEdgesHalo(1)
+ nTempIDs = nOwnEdges + nEdgesHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+ tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
call mpas_dmpar_get_owner_list(domain % dminfo, &
- ghostVertexStart-1, nlocal_vertices, &
- local_vertex_list(1:ghostVertexStart-1), local_vertex_list, &
+ nOwnEdges, nTempIDs, &
+ local_edge_list(1:nOwnEdges), tempIDs, &
+ domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+ ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
+ offset = nEdgesHalo(1) + nEdgesHalo(2)
+ nTempIDs = nOwnEdges + nEdgesHalo(3)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+ tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnEdges, nTempIDs, &
+ local_edge_list(1:nOwnEdges), tempIDs, &
+ domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ !--------- Create Vertex Exchange Lists ---------!
+
+
+ ! pass in neededList of ownedVertices and ownedCell perimeter vertices
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nVerticesCumulative(2), &
+ local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
- domain % blocklist % mesh % nCellsSolve = block_graph_2Halo % nVertices
- domain % blocklist % mesh % nEdgesSolve = ghostEdgeStart-1
+ ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
+ offset = nVerticesHalo(1)
+ nTempIDs = nOwnVertices + nVerticesHalo(2)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+ tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nTempIDs, &
+ local_vertex_list(1:nOwnVertices), tempIDs, &
+ domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
+ offset)
+ deallocate(tempIDs)
+
+ ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
+ offset = nVerticesHalo(1) + nVerticesHalo(2)
+ nTempIDs = nOwnVertices + nVerticesHalo(3)
+ allocate(tempIDs(nTempIDs))
+ tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+ tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ nOwnVertices, nTempIDs, &
+ local_vertex_list(1:nOwnVertices), tempIDs, &
+ domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
+ offset)
+ deallocate(tempIDs)
+
+
+ domain % blocklist % mesh % nCellsSolve = nOwnCells
+ domain % blocklist % mesh % nEdgesSolve = nOwnEdges
domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
@@ -1011,6 +1191,7 @@
deallocate(indexToCellID_0Halo)
deallocate(cellsOnEdge_2Halo)
deallocate(cellsOnVertex_2Halo)
+ deallocate(nEdgesOnCell_2Halo)
deallocate(edgesOnCell_2Halo)
deallocate(verticesOnCell_2Halo)
deallocate(block_graph_0Halo % vertexID)
Modified: branches/omp_blocks/halo/src/registry/gen_inc.c
===================================================================
--- branches/omp_blocks/halo/src/registry/gen_inc.c        2012-03-12 18:26:51 UTC (rev 1619)
+++ branches/omp_blocks/halo/src/registry/gen_inc.c        2012-03-12 19:55:20 UTC (rev 1620)
@@ -373,6 +373,8 @@
if (strncmp(group_ptr->name, "mesh", 1024)) {
fortprintf(fd, " type %s_type</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " type (block_type), pointer :: block</font>
<font color="gray">");
+
var_list_ptr = group_ptr->vlist;
memcpy(super_array, var_list_ptr->var->super_array, 1024);
i = 1;
@@ -544,6 +546,8 @@
fortprintf(fd, "#include \"dim_dummy_decls.inc\"</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="blue">");
+ fortprintf(fd, " %s %% block => b</font>
<font color="gray">", group_ptr->name);
+
if (!strncmp(group_ptr->name, "mesh", 1024)) {
dim_ptr = dims;
while (dim_ptr) {
@@ -551,6 +555,7 @@
if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " %s %% %s = %s</font>
<font color="blue">", group_ptr->name, dim_ptr->name_in_file, dim_ptr->name_in_file);
dim_ptr = dim_ptr->next;
}
+
fortprintf(fd, "</font>
<font color="gray">");
}
@@ -854,9 +859,76 @@
/* Definitions of deallocate subroutines */
fd = fopen("field_links.inc", "w");
+
+ /* subroutine to call link subroutine for every field type */
+ fortprintf(fd, " subroutine mpas_create_field_links(b)</font>
<font color="black"></font>
<font color="blue">");
+ fortprintf(fd, " implicit none</font>
<font color="blue">");
+ fortprintf(fd, " type (block_type), pointer :: b</font>
<font color="black"></font>
<font color="blue">");
group_ptr = groups;
+ while (group_ptr)
+ {
+ var_list_ptr = group_ptr->vlist;
+ var_list_ptr = var_list_ptr->next;
+ var_ptr = var_list_ptr->var;
+
+
+ int ntime_levs = 1;
+
+ if (strncmp(var_ptr->super_array, "-", 1024) != 0)
+ {
+ memcpy(super_array, var_ptr->super_array, 1024);
+ memcpy(array_class, var_ptr->array_class, 1024);
+ while (var_list_ptr && strncmp(super_array, var_list_ptr->var->super_array, 1024) == 0)
+ {
+ var_list_ptr2 = var_list_ptr;
+ var_list_ptr = var_list_ptr->next;
+ }
+ var_ptr2 = var_list_ptr2->var;
+ get_outer_dim(var_ptr2, outer_dim);
+ ntime_levs = var_ptr2->ntime_levs;
+
+ if(ntime_levs > 1)
+ {
+ for(i=1; i<=ntime_levs; i++)
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+ }        
+ }
+ else
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ }
+ }
+ else if (var_ptr->ndims > 0)
+ {
+ get_outer_dim(var_ptr, outer_dim);
+ ntime_levs = var_ptr->ntime_levs;
+
+ if(ntime_levs > 1)
+ {
+ for(i=1; i<=ntime_levs; i++)
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s %% time_levs(%i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, i, group_ptr->name);
+ }        
+ }
+ else
+ {
+ fortprintf(fd, " call mpas_create_%s_links(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ }
+ }
+
+ group_ptr = group_ptr->next;
+ }
+ fortprintf(fd, "</font>
<font color="black"> end subroutine mpas_create_field_links</font>
<font color="black"></font>
<font color="black"></font>
<font color="red">");
+
+ /* subroutines for linking specific field type */
+ group_ptr = groups;
+
while (group_ptr) {
- fortprintf(fd, " ! Create links for fields in %s</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " subroutine mpas_create_%s_links(%s)</font>
<font color="black"></font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " implicit none</font>
<font color="blue">");
+ fortprintf(fd, " type (%s_type), pointer :: %s</font>
<font color="black"></font>
<font color="gray">", group_ptr->name, group_ptr->name);
+
var_list_ptr = group_ptr->vlist;
while (var_list_ptr) {
var_ptr = var_list_ptr->var;
@@ -869,94 +941,53 @@
}
var_ptr2 = var_list_ptr2->var;
get_outer_dim(var_ptr2, outer_dim);
- if (var_ptr2->ntime_levs > 1) {
+
if (strncmp("nCells",outer_dim,1024) == 0) {
- for(i=1; i<=var_ptr2->ntime_levs; i++) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% cellsToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% cellsToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% cellsToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- }
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
- for(i=1; i<=var_ptr2->ntime_levs; i++) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% edgesToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% edgesToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% edgesToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- }
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
- for(i=1; i<=var_ptr2->ntime_levs; i++) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% verticesToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% verticesToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr2->super_array);
- }
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, group_ptr->name, var_ptr2->super_array);
}
- }
- else {
+ fortprintf(fd, "</font>
<font color="red">");
+ }
+ else
+ {
+         if (var_ptr->ndims > 0)
+         {
+ get_outer_dim(var_ptr, outer_dim);
+
if (strncmp("nCells",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% cellsToSend</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% cellsToRecv</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% cellsToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% cellsToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% cellsToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% cellsToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
}
else if (strncmp("nEdges",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% edgesToSend</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% edgesToRecv</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% edgesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% edgesToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% edgesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% edgesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
}
else if (strncmp("nVertices",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% verticesToSend</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% verticesToRecv</font>
<font color="red">", group_ptr->name, var_ptr2->super_array);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% verticesToCopy</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array);
+ fortprintf(fd, " %s %% %s %% sendList => %s %% %s %% block %% parinfo %% verticesToSend</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% recvList => %s %% %s %% block %% parinfo %% verticesToRecv</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
+ fortprintf(fd, " %s %% %s %% copyList => %s %% %s %% block %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, group_ptr->name, var_ptr->name_in_code);
}
- }
- fortprintf(fd, "</font>
<font color="red">");
- }
- else {
- if (var_ptr->ndims > 0) {
- get_outer_dim(var_ptr, outer_dim);
- if (var_ptr->ntime_levs > 1) {
- for(i=1; i<=var_ptr->ntime_levs; i++) {
- if (strncmp("nCells",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% cellsToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% cellsToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% cellsToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- }
- else if (strncmp("nEdges",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% edgesToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% edgesToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% edgesToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- }
- else if (strncmp("nVertices",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% sendList => b %% parinfo %% verticesToSend</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% recvList => b %% parinfo %% verticesToRecv</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% time_levs(%i) %% %s %% %s %% copyList => b %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, i, group_ptr->name, var_ptr->name_in_code);
- }
- }
- }
- else {
- if (strncmp("nCells",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% cellsToSend</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% cellsToRecv</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% cellsToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- }
- else if (strncmp("nEdges",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% edgesToSend</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% edgesToRecv</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% edgesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- }
- else if (strncmp("nVertices",outer_dim,1024) == 0) {
- fortprintf(fd, " b %% %s %% %s %% sendList => b %% parinfo %% verticesToSend</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% recvList => b %% parinfo %% verticesToRecv</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- fortprintf(fd, " b %% %s %% %s %% copyList => b %% parinfo %% verticesToCopy</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
- }
- }
fortprintf(fd, "</font>
<font color="red">");
- }
+         }
var_list_ptr = var_list_ptr->next;
- }
+         }
}
+
+ fortprintf(fd, " end subroutine mpas_create_%s_links</font>
<font color="black"></font>
<font color="black"></font>
<font color="red">", group_ptr->name);
- fortprintf(fd, "</font>
<font color="black">");
group_ptr = group_ptr->next;
}
fclose(fd);
</font>
</pre>