<p><b>duda</b> 2011-04-19 18:04:37 -0600 (Tue, 19 Apr 2011)</p><p>BRANCH COMMIT<br>
<br>
Remove unnecessary halo exchanges from the solver, and also<br>
remove some if-tests from loops.<br>
<br>
<br>
M src/core_nhyd_atmos/module_mpas_core.F<br>
M src/core_nhyd_atmos/module_time_integration.F<br>
</p><hr noshade><pre><font color="gray">Modified: branches/atmos_physics/src/core_nhyd_atmos/module_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/module_mpas_core.F        2011-04-18 21:39:04 UTC (rev 794)
+++ branches/atmos_physics/src/core_nhyd_atmos/module_mpas_core.F        2011-04-20 00:04:37 UTC (rev 795)
@@ -31,7 +31,7 @@
dt = config_dt
block => domain % blocklist
do while (associated(block))
- call mpas_init_block(domain % dminfo, block, block % mesh, dt)
+ call mpas_init_block(domain % dminfo, block, block % mesh, dt)
block => block % next
end do
@@ -57,16 +57,30 @@
implicit none
- type (dm_info), intent(in):: dminfo
+ type (dm_info), intent(in) :: dminfo
type (block_type), intent(inout) :: block
type (mesh_type), intent(inout) :: mesh
real (kind=RKIND), intent(in) :: dt
+ call dmpar_exch_halo_field2dReal(dminfo, block % state % time_levs(1) % state % u % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
if (.not. config_do_restart) then
call init_coupled_diagnostics( block % state % time_levs(1) % state, block % diag, mesh)
call compute_solve_diagnostics(dt, block % state % time_levs(1) % state, block % diag, mesh)
end if
+
+ call dmpar_exch_halo_field2dReal(dminfo, block % diag % pv_edge % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call dmpar_exch_halo_field2dReal(dminfo, block % diag % ru % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ call dmpar_exch_halo_field2dReal(dminfo, block % diag % rw % array, &
+ block % mesh % nVertLevels+1, block % mesh % nCells, &
+ block % parinfo % CellsToSend, block % parinfo % CellsToRecv)
+
call rbfInterp_initialize(mesh)
call init_reconstruct(mesh)
call reconstruct(block % state % time_levs(1) % state, block % diag, mesh)
Modified: branches/atmos_physics/src/core_nhyd_atmos/module_time_integration.F
===================================================================
--- branches/atmos_physics/src/core_nhyd_atmos/module_time_integration.F        2011-04-18 21:39:04 UTC (rev 794)
+++ branches/atmos_physics/src/core_nhyd_atmos/module_time_integration.F        2011-04-20 00:04:37 UTC (rev 795)
@@ -107,8 +107,16 @@
if(debug) write(0,*) ' copy step in rk solver '
+! WCS-parallel: it appears we have chosen to update all edges of nCellsSolve (to cut down on communications on acoustic steps).
+! Do our communications patterns and loops (specifically in compute_dyn_tend) reflect this? Or do they assume we are only updating
+! the so-called owned edges?
+
+
block => domain % blocklist
do while (associated(block))
+
+! WCS-parallel: first three and rtheta_p arise from scalar transport and microphysics update (OK). Others come from where?
+
! theta
call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(1) % state % theta % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
@@ -121,37 +129,14 @@
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pressure_p % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-! vorticity
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
-! divergence
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-! pv_edge
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
! rtheta_p
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rtheta_p % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-! ru
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % ru % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-! rw
- call dmpar_exch_halo_field2dReal(domain % dminfo, &
- block % diag % rw % array, &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % CellsToSend, block % parinfo % CellsToRecv)
-
!surface_pressure
call dmpar_exch_halo_field1dReal(domain % dminfo, block % diag % surface_pressure % array(:), &
block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
block => block % next
end do
@@ -169,7 +154,7 @@
do rk_step = 1, 3 ! Runge-Kutta loop
- if(debug) write(0,*) ' rk substep ', rk_step
+ if(debug) write(0,*) ' rk substep ', rk_step
block => domain % blocklist
do while (associated(block))
@@ -180,20 +165,7 @@
block => block % next
end do
- block => domain % blocklist
- do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, &
- block % diag % cqu % array, &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % EdgesToSend, block % parinfo % EdgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, &
- block % diag % cqw % array, &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % CellsToSend, block % parinfo % CellsToRecv)
- block => block % next
- end do
-
if (debug) write(0,*) ' compute_dyn_tend '
block => domain % blocklist
do while (associated(block))
@@ -220,181 +192,182 @@
! we will need to communicate the momentum tendencies here - we want tendencies for all edges of owned cells
! because we are solving for all edges of owned cells
!***********************************
+
block => domain % blocklist
do while (associated(block))
+! tend_u
call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % u % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % rho % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % w % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
end do
- block => domain % blocklist
- do while (associated(block))
- call set_smlstep_pert_variables( block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % tend, block % diag, block % mesh )
- call compute_vert_imp_coefs( block % state % time_levs(2) % state, block % mesh, block % diag, rk_sub_timestep(rk_step) )
+ block => domain % blocklist
+ do while (associated(block))
+ call set_smlstep_pert_variables( block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % tend, block % diag, block % mesh )
+ call compute_vert_imp_coefs( block % state % time_levs(2) % state, block % mesh, block % diag, rk_sub_timestep(rk_step) )
block => block % next
- end do
+ end do
- do small_step = 1, number_sub_steps(rk_step)
+ do small_step = 1, number_sub_steps(rk_step)
- if(debug) write(0,*) ' acoustic step ',small_step
+ if(debug) write(0,*) ' acoustic step ',small_step
- block => domain % blocklist
- do while (associated(block))
- call advance_acoustic_step( block % state % time_levs(2) % state, block % diag, block % tend, &
- block % mesh, rk_sub_timestep(rk_step) )
- block => block % next
- end do
+ block => domain % blocklist
+ do while (associated(block))
+ call advance_acoustic_step( block % state % time_levs(2) % state, block % diag, block % tend, &
+ block % mesh, rk_sub_timestep(rk_step) )
+ block => block % next
+ end do
- if(debug) write(0,*) ' acoustic step complete '
+ if(debug) write(0,*) ' acoustic step complete '
- ! will need communications here for rtheta_pp
+ ! will need communications here for rtheta_pp
+
+! WCS-parallel: is this a candidate for a smaller stencil? we need only communicate cells that share edges with owned cells.
+
block => domain % blocklist
do while (associated(block))
+! rtheta_pp
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rtheta_pp % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rw_p % array(:,:), &
- block % mesh % nVertLevels+1, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % ru_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
block => block % next
end do
- end do ! end of small stimestep loop
+ end do ! end of small stimestep loop
- ! will need communications here for rho_pp
+ ! will need communications here for rho_pp
block => domain % blocklist
do while (associated(block))
+
+! WCS-parallel: is communication of rw_p and rho_pp because of limiter (pd or mono scheme?),
+! or is it needed for the large-step variable recovery (to get decoupled variables)?
+! seems like only rho_pp needed...
+!
+! or, do we need ru and u in the halo for diagnostics that are computed later in compute_solve_diagnostics?
+!
+! rho_pp might be candidate for smaller stencil (same stencil as rtheta_pp above).
+
+! MGD seems necessary
+! rw_p
+ call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rw_p % array(:,:), &
+ block % mesh % nVertLevels+1, block % mesh % nCells, &
+ block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+! MGD seems necessary
+! ru_p
+ call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % ru_p % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+! rho_pp
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rho_pp % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
end do
- block => domain % blocklist
- do while (associated(block))
- call recover_large_step_variables( block % state % time_levs(2) % state, &
- block % diag, block % tend, block % mesh, &
- rk_timestep(rk_step), number_sub_steps(rk_step), rk_step )
+ block => domain % blocklist
+ do while (associated(block))
+ call recover_large_step_variables( block % state % time_levs(2) % state, &
+ block % diag, block % tend, block % mesh, &
+ rk_timestep(rk_step), number_sub_steps(rk_step), rk_step )
- block => block % next
- end do
+ block => block % next
+ end do
! ************ advection of moist variables here...
+ block => domain % blocklist
+ do while (associated(block))
+! u
+ call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ block => block % next
+ end do
- if (config_scalar_advection) then
- block => domain % blocklist
- do while (associated(block))
- !
- ! Note: The advance_scalars_mono routine can be used without limiting, and thus, encompasses
- ! the functionality of the advance_scalars routine; however, it is noticeably slower,
- ! so we keep the advance_scalars routine as well
- !
- if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then
- call advance_scalars( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % diag, &
- block % mesh, rk_timestep(rk_step) )
- else
- call advance_scalars_mono( block % tend, &
- block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
- block % diag, &
- block % mesh, rk_timestep(rk_step), rk_step, 3, &
- domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
- end if
- block => block % next
- end do
+ if (config_scalar_advection) then
+
block => domain % blocklist
do while (associated(block))
+ !
+ ! Note: The advance_scalars_mono routine can be used without limiting, and thus, encompasses
+ ! the functionality of the advance_scalars routine; however, it is noticeably slower,
+ ! so we keep the advance_scalars routine as well
+ !
+ if (rk_step < 3 .or. (.not. config_monotonic .and. .not. config_positive_definite)) then
+ call advance_scalars( block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % diag, &
+ block % mesh, rk_timestep(rk_step) )
+ else
+ call advance_scalars_mono( block % tend, &
+ block % state % time_levs(1) % state, block % state % time_levs(2) % state, &
+ block % diag, &
+ block % mesh, rk_timestep(rk_step), rk_step, 3, &
+ domain % dminfo, block % parinfo % cellsToSend, block % parinfo % cellsToRecv )
+ end if
+ block => block % next
+ end do
+
! For now, we do scalar halo updates later on...
+! block => domain % blocklist
+! do while (associated(block))
! call dmpar_exch_halo_field3dReal(domain % dminfo, block % tend % scalars % array(:,:,:), &
! block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
! call dmpar_exch_halo_field3dReal(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
! block % state % time_levs(2) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+! block => block % next
+! end do
- else
+ else
- write(0,*) ' no scalar advection '
+ write(0,*) ' no scalar advection '
- end if
+ end if
- block => domain % blocklist
- do while (associated(block))
- call compute_solve_diagnostics( dt, block % state % time_levs(2) % state, block % diag, block % mesh )
- block => block % next
- end do
+! WCS-parallel: seems like we already use u and w (and other state variables) as if they were already correctly set in the halo,
+! but they are not (at least to the outer edges - see communications below? or are those communications redundent?).
+! Perhaps we should communicate u, w, theta, rho, etc after recover_large_step_variables),
+! cover it with the scalar communications, and then compute solve_diagnostics. I do not think we need to communicate the stuff we compute
+! in compute_solve_diagnostics if we compute it out in the halo (and I think we do - the halos should be large enough).
- if(debug) write(0,*) ' diagnostics complete '
+ block => domain % blocklist
+ do while (associated(block))
+ call compute_solve_diagnostics( dt, block % state % time_levs(2) % state, block % diag, block % mesh )
+ block => block % next
+ end do
+ if(debug) write(0,*) ' diagnostics complete '
+
! need communications here to fill out u, w, theta, p, and pp, scalars, etc
! so that they are available for next RK step or the first rk substep of the next timestep
block => domain % blocklist
do while (associated(block))
-! NB: if any of these cause differences, better to uncomment the version after qd_kessler?
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pressure_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % rho % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rho_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+!MGD seems necessary
+! w
call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % w % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pv_vertex % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pv_cell % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % ke % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % v % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+! pv_edge
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pv_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+! rho_edge
call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rho_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+
+! **** this will always be needed - perhaps we can cover this with compute_solve_diagnostics
+
+! scalars
call dmpar_exch_halo_field3dReal(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
block % state % time_levs(2) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
@@ -436,29 +409,6 @@
! end do
! end if
- block => domain % blocklist
- do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % rtheta_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % exner % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % diag % pressure_p % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % rt_diabatic_tend % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
- block % state % time_levs(2) % state % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
-
! if(debug) then
101 format(' min, max scalar',i4,2(1x,e17.10))
write(0,*)
@@ -1081,7 +1031,9 @@
! recover owned-cell values in block
! if( iCell <= nCellsSolve ) then ! If using this test, more halo exchanges will be needed
+! WCS-parallel: do we know about this? (OK)
+
if(debug) then
if( iCell == 479 ) then
write(0,*) ' k, rw, rw_save, rw_p '
@@ -1153,6 +1105,10 @@
! if( cell1 <= nCellsSolve .or. cell2 <= nCellsSolve ) then ! If using this test, more halo exchanges will be needed,
! though we can avoid division by zero, e.g., by rho(:,cell2)
do k = 1, nVertLevels
+
+
+! WCS-parallel: we could pick this up in the last acoustic step (ruAvg) because we solve for all edges of owned cells
+
ruAvg(k,iEdge) = ru(k,iEdge) + (ruAvg(k,iEdge) / float(ns))
ru(k,iEdge) = ru(k,iEdge) + ru_p(k,iEdge)
@@ -1160,6 +1116,9 @@
u(k,iEdge) = 2.*ru(k,iEdge)/(rho(k,cell1)+rho(k,cell2))
enddo
+
+! WCS-parallel: we likely only need this for owned cells
+
!SHP-mtn
flux = cf1*ru(1,iEdge) + cf2*ru(2,iEdge) + cf3*ru(3,iEdge)
w(1,cell2) = w(1,cell2) - zb(1,2,iEdge)*flux/(cf1*rho(1,cell2)+cf2*rho(2,cell2)+cf3*rho(3,cell2))
@@ -1855,18 +1814,12 @@
end do ! end loop over cells to compute scale factor
- call dmpar_exch_halo_field2dReal(dminfo, scale_out(:,:,1), &
+ call dmpar_exch_halo_field2dReal(dminfo, scale_out(:,:,km0), &
s_old % num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_out(:,:,2), &
+ call dmpar_exch_halo_field2dReal(dminfo, scale_in(:,:,km0), &
s_old % num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_in(:,:,1), &
- s_old % num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_in(:,:,2), &
- s_old % num_scalars, grid % nCells, &
- cellsToSend, cellsToRecv)
! rescale the horizontal fluxes
@@ -3161,30 +3114,21 @@
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
- if (cell1 <= grid%nCells .and. cell2 <= grid%nCells) then
- do k=1,nVertLevels
- h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
- end do
- end if
+ do k=1,nVertLevels
+ h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
+ end do
end do
-
!
! Compute circulation and relative vorticity at each vertex
!
circulation(:,:) = 0.0
do iEdge=1,nEdges
- if (verticesOnEdge(1,iEdge) <= grid%nVertices) then
- do k=1,nVertLevels
- circulation(k,verticesOnEdge(1,iEdge)) = circulation(k,verticesOnEdge(1,iEdge)) - dcEdge(iEdge) * u(k,iEdge)
- end do
- end if
- if (verticesOnEdge(2,iEdge) <= grid%nVertices) then
- do k=1,nVertLevels
- circulation(k,verticesOnEdge(2,iEdge)) = circulation(k,verticesOnEdge(2,iEdge)) + dcEdge(iEdge) * u(k,iEdge)
- end do
- end if
+ do k=1,nVertLevels
+ circulation(k,verticesOnEdge(1,iEdge)) = circulation(k,verticesOnEdge(1,iEdge)) - dcEdge(iEdge) * u(k,iEdge)
+ circulation(k,verticesOnEdge(2,iEdge)) = circulation(k,verticesOnEdge(2,iEdge)) + dcEdge(iEdge) * u(k,iEdge)
+ end do
end do
do iVertex=1,nVertices
do k=1,nVertLevels
@@ -3200,23 +3144,16 @@
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
- if (cell1 <= grid%nCells) then
- do k=1,nVertLevels
- divergence(k,cell1) = divergence(k,cell1) + u(k,iEdge)*dvEdge(iEdge)
- end do
- end if
- if(cell2 <= grid%nCells) then
- do k=1,nVertLevels
- divergence(k,cell2) = divergence(k,cell2) - u(k,iEdge)*dvEdge(iEdge)
- end do
- end if
-
+ do k=1,nVertLevels
+ divergence(k,cell1) = divergence(k,cell1) + u(k,iEdge)*dvEdge(iEdge)
+ divergence(k,cell2) = divergence(k,cell2) - u(k,iEdge)*dvEdge(iEdge)
+ end do
end do
do iCell = 1,nCells
- r = 1.0 / areaCell(iCell)
- do k = 1,nVertLevels
- divergence(k,iCell) = divergence(k,iCell) * r
- end do
+ r = 1.0 / areaCell(iCell)
+ do k = 1,nVertLevels
+ divergence(k,iCell) = divergence(k,iCell) * r
+ end do
end do
@@ -3287,24 +3224,18 @@
do iEdge = 1,nEdges
do i=1,nEdgesOnEdge(iEdge)
eoe = edgesOnEdge(i,iEdge)
- if (eoe <= grid%nEdges) then
- do k = 1,nVertLevels
- v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
- end do
- end if
+ do k = 1,nVertLevels
+ v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
+ end do
end do
end do
- ! tdr
!
! Compute height at vertices, pv at vertices, and average pv to edge locations
! ( this computes pv_vertex at all vertices bounding real cells )
!
- VTX_LOOP: do iVertex = 1,nVertices
- do i=1,grid % vertexDegree
- if (cellsOnVertex(i,iVertex) <= 0) cycle VTX_LOOP
- end do
+ do iVertex = 1,nVertices
do k=1,nVertLevels
h_vertex = 0.0
do i=1,grid % vertexDegree
@@ -3314,41 +3245,34 @@
pv_vertex(k,iVertex) = (fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
end do
- end do VTX_LOOP
- ! tdr
+ end do
- ! tdr
!
! Compute gradient of PV in the tangent direction
! ( this computes gradPVt at all edges bounding real cells )
!
do iEdge = 1,nEdges
do k = 1,nVertLevels
- gradPVt(k,iEdge) = (pv_vertex(k,verticesOnEdge(2,iEdge)) - pv_vertex(k,verticesOnEdge(1,iEdge))) / &
- dvEdge(iEdge)
+ gradPVt(k,iEdge) = (pv_vertex(k,verticesOnEdge(2,iEdge)) - pv_vertex(k,verticesOnEdge(1,iEdge))) / &
+ dvEdge(iEdge)
end do
end do
- ! tdr
!
! Compute pv at the edges
! ( this computes pv_edge at all edges bounding real cells )
!
pv_edge(:,:) = 0.0
do iVertex = 1,nVertices
- do i=1,grid % vertexDegree
- iEdge = edgesOnVertex(i,iVertex)
- if(iEdge <= grid%nEdges) then
+ do i=1,grid % vertexDegree
+ iEdge = edgesOnVertex(i,iVertex)
do k=1,nVertLevels
- pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex)
+ pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex)
end do
- end if
- end do
+ end do
end do
- ! tdr
- ! tdr
!
! Modify PV edge with upstream bias.
!
@@ -3359,39 +3283,31 @@
end do
- ! tdr
!
! Compute pv at cell centers
! ( this computes pv_cell for all real cells )
!
pv_cell(:,:) = 0.0
do iVertex = 1, nVertices
- do i=1,grid % vertexDegree
- iCell = cellsOnVertex(i,iVertex)
- if( iCell <= grid % nCells) then
- do k = 1,nVertLevels
- pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) / areaCell(iCell)
- end do
- end if
- end do
+ do i=1,grid % vertexDegree
+ iCell = cellsOnVertex(i,iVertex)
+ do k = 1,nVertLevels
+ pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) / areaCell(iCell)
+ end do
+ end do
end do
- ! tdr
- ! tdr
!
! Compute gradient of PV in normal direction
! (tdr: 2009-10-02: this is not correct because the pv_cell in the halo is not correct)
!
gradPVn(:,:) = 0.0
do iEdge = 1,nEdges
- if( cellsOnEdge(1,iEdge) <= grid%nCells .and. cellsOnEdge(2,iEdge) <= grid%nCells) then
- do k = 1,nVertLevels
+ do k = 1,nVertLevels
gradPVn(k,iEdge) = (pv_cell(k,cellsOnEdge(2,iEdge)) - pv_cell(k,cellsOnEdge(1,iEdge))) / &
dcEdge(iEdge)
- end do
- end if
+ end do
end do
- ! tdr
! Modify PV edge with upstream bias.
!
</font>
</pre>