<p><b>ringler@lanl.gov</b> 2012-02-06 14:41:29 -0700 (Mon, 06 Feb 2012)</p><p><br>
comments on mpas_ocn_time_integration_split.F as a part of code review.<br>
<br>
only one comments pertains to ALE vertical coordinate.<br>
<br>
(Mark, grep for "TDR", address comments (or not!), remove my comments and then recommit)<br>
</p><hr noshade><pre><font color="gray">Modified: branches/ocean_projects/ale_split_exp/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/ocean_projects/ale_split_exp/src/core_ocean/mpas_ocn_time_integration_split.F        2012-02-06 19:39:30 UTC (rev 1468)
+++ branches/ocean_projects/ale_split_exp/src/core_ocean/mpas_ocn_time_integration_split.F        2012-02-06 21:41:29 UTC (rev 1469)
@@ -87,6 +87,7 @@
integer :: iCell, i,k,j, iEdge, cell1, cell2, split_explicit_step, split, &
eoe, oldBtrSubcycleTime, newBtrSubcycleTime, uPerpTime, BtrCorIter, &
n_bcl_iter(config_n_ts_iter), &
+!TDR: vertex1, vertex2, iVertex unused
vertex1, vertex2, iVertex
type (block_type), pointer :: block
real (kind=RKIND) :: uhSum, hSum, flux, sshEdge, &
@@ -97,6 +98,8 @@
real (kind=RKIND), dimension(:,:,:), pointer :: tracers
integer, dimension(:), pointer :: &
maxLevelCell, maxLevelEdgeTop
+!TDR: i think that A and C are unused. if they are used, the names should be
+!TDR: expanded in order to find via grep
real (kind=RKIND), dimension(:), allocatable:: A,C,uTemp
real (kind=RKIND), dimension(:,:), allocatable:: tracersTemp
@@ -138,6 +141,7 @@
block % state % time_levs(2) % state % h_edge % array(:,:) &
= block % state % time_levs(1) % state % h_edge % array(:,:)
+!TDR: either the following code or the comment "couple tracers to h" is incorrect below
do iCell=1,block % mesh % nCells ! couple tracers to h
! change to maxLevelCell % array(iCell) ?
do k=1,block % mesh % nVertLevels
@@ -239,6 +243,17 @@
/block % mesh % dcEdge % array(iEdge) )
enddo
+!TDR: it would be clean and less prone to error if the following action was computed via
+! uhSum = c0
+! hSum = c0
+! do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
+! uhSum = uhSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge) * uTemp(k)
+! hSum = hSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge)
+! enddo
+! block % state % time_levs(1) % state % GBtrForcing % array(iEdge) = split*uhSum/hSum/dt
+!TDR
+
+
uhSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge) * uTemp(1)
hSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge)
@@ -363,6 +378,11 @@
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+!TDR: we say that we are computing -f*uPerp, but then we call it uPerp. this is confusing on several fronts
+!TDR: first, we are computing an acceleration (i.e. tendency to u) but give it a name that implies velocity
+!TDR: second, we all the total of -f*uPerp to be uPerp ...
+!TDR: I would replace all "uPerp" with "CoriolisAcceleration" or something a bit shorter but along those lines
+
! Compute -f*uPerp
uPerp = 0.0
do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
@@ -425,6 +445,12 @@
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+
+!TDR: seems to me that we do not want to use hZlevel to compute total depth at edges.
+!TDR: seems like before we start this process we should compute the sum of h_edge(at time n) and subtract ssh(at time n), call this depthAtRestEdge
+!TDR: then here we sum depthAtRestEdge with sshEdge to compute flux. depthAtRest{Edge,Cell} could get moved in the the "grid.nc" file at some point
+!TDR: this way we remove any reference to a field (like hZlevel) that is associated with a specific vertical grid
+
sshEdge = 0.5 * (block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
hSum = sum(block % mesh % hZLevel % array (1:block % mesh % maxLevelEdgeTop % array(iEdge)))
@@ -493,6 +519,7 @@
sshCell2 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
+!TDR: I can not find where u_diffusionBtr is computed
block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
= (block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ dt/config_n_btr_subcycles *(uPerp - gravity *(sshCell2 - sshCell1) /block % mesh % dcEdge % array(iEdge) &
@@ -697,6 +724,16 @@
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!TDR: it seems almost trivial to hold off on doing T, S and rho updates until the
+!TDR: dycore time step is complete. we might want to take this opportunity to clean-up
+!TDR: Stage3 in order to faciliate the testing of not doing tracer updates after this code is committed to trunk.
+!TDR: at this point, I am suggesting just pushing some of this code into subroutines.
+!TDR: see comments farther down
+
+!TDR: this is the first occurrence of computing wTop that I can find.
+!TDR: the call to ocn_wtop is not found at initialization and wTop is not written to restart
+!TDR: so upon restart, won't uBaroclinic be computed with wTop=0 on the first time step?
+
block => domain % blocklist
do while (associated(block))
call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(2) % state, block % mesh)
@@ -732,6 +769,9 @@
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (split_explicit_step < config_n_ts_iter) then
+!TDR: should we move this code into a subroutine called "compute_intermediate_value_at_midtime"
+!TDR: this could be within a contains statement in this routine
+
! Only need T & S for earlier iterations,
! then all the tracers needed the last time through.
do iCell=1,block % mesh % nCells
@@ -781,6 +821,9 @@
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elseif (split_explicit_step == config_n_ts_iter) then
+!TDR: should we move this code into a subroutine called "compute_final_values_at_nplus1"?
+!TDR: this could be within a contains statement in this routine
+
do iCell=1,block % mesh % nCells
do k=1,block % mesh % maxLevelCell % array(iCell)
</font>
</pre>