<p><b>dwj07@fsu.edu</b> 2011-11-14 10:31:09 -0700 (Mon, 14 Nov 2011)</p><p><br>
        -- BRANCH COMMIT --<br>
<br>
        Adding a synchronous IO to diagnose some issuse seen in the ocean core on specific machines.<br>
        This is not really a long term solution, just a temporary setup to help diagnose.<br>
</p><hr noshade><pre><font color="gray">Modified: branches/ocean_projects/synchronous_io/namelist.input.sw
===================================================================
--- trunk/mpas/namelist.input.sw        2011-11-12 00:38:13 UTC (rev 1190)
+++ branches/ocean_projects/synchronous_io/namelist.input.sw        2011-11-14 17:31:09 UTC (rev 1192)
@@ -1,33 +1,94 @@
&sw_model
config_test_case = 5
- config_time_integration = 'RK4'
- config_dt = 172.8
+ config_time_integration = 'split_explicit'
+ config_rk_filter_btr_mode = .false.
+ config_dt = 100.0
config_start_time = '0000-01-01_00:00:00'
- config_run_duration = '15_00:00:00'
- config_stats_interval = 0
- config_h_ScaleWithMesh = .false.
- config_h_mom_eddy_visc2 = 0.0
- config_h_mom_eddy_visc4 = 0.0
- config_h_tracer_eddy_diff2 = 0.0
- config_h_tracer_eddy_diff4 = 0.0
- config_thickness_adv_order = 2
- config_tracer_adv_order = 2
- config_positive_definite = .false.
- config_monotonic = .false.
- config_wind_stress = .false.
- config_bottom_drag = .false.
+ config_run_duration = '0000_00:30:00'
+ config_stats_interval = 18
/
- config_stop_time = '0000-01-16_00:00:00'
-
&io
config_input_name = 'grid.nc'
config_output_name = 'output.nc'
config_restart_name = 'restart.nc'
- config_output_interval = '1_00:00:00'
- config_frames_per_outfile = 0
+ config_output_interval = '20_00:00:00'
+ config_frames_per_outfile = 1000000
/
-
&restart
- config_restart_interval = '15_00:00:00'
config_do_restart = .false.
+ config_restart_interval = '120_00:00:00'
/
+&grid
+ config_vert_grid_type = 'isopycnal'
+ config_rho0 = 1000
+/
+&split_explicit_ts
+ config_n_ts_iter = 2
+ config_n_bcl_iter_beg = 1
+ config_n_bcl_iter_mid = 2
+ config_n_bcl_iter_end = 2
+ config_n_btr_subcycles = 10
+ config_n_btr_cor_iter = 2
+ config_u_correction = .true.
+ config_filter_btr_mode = .false.
+ config_btr_mom_decay = .false.
+ config_btr_mom_decay_time = 3600.0
+ config_btr_mom_eddy_visc2 = 0.0
+ config_btr_subcycle_loop_factor = 2
+ config_SSH_from = 'avg_flux'
+ config_new_btr_variables_from = 'btr_avg'
+ config_btr_gam1_uWt1 = 0.5
+ config_btr_gam2_SSHWt1 = 1.0
+ config_btr_gam3_uWt2 = 1.0
+ config_btr_solve_SSH2 = .false.
+/
+&hmix
+ config_h_mom_eddy_visc2 = 1.0e5
+ config_h_mom_eddy_visc4 = 0.0
+ config_visc_vorticity_term = .true.
+ config_h_tracer_eddy_diff2 = 1.0e4
+ config_h_tracer_eddy_diff4 = 0.0
+ config_mom_decay = .false.
+ config_mom_decay_time = 3600.0
+/
+&vmix
+ config_vert_visc_type = 'rich'
+ config_vert_diff_type = 'rich'
+ config_implicit_vertical_mix = .true.
+ config_convective_visc = 1.0
+ config_convective_diff = 1.0
+ config_bottom_drag_coeff = 1.0e-3
+/
+&vmix_const
+ config_vert_visc = 2.5e-5
+ config_vert_diff = 2.5e-5
+/
+&vmix_rich
+ config_bkrd_vert_visc = 1.0e-4
+ config_bkrd_vert_diff = 1.0e-5
+ config_rich_mix = 0.005
+/
+&vmix_tanh
+ config_max_visc_tanh = 2.5e-1
+ config_min_visc_tanh = 1.0e-4
+ config_max_diff_tanh = 2.5e-2
+ config_min_diff_tanh = 1.0e-5
+ config_zMid_tanh = -100
+ config_zWidth_tanh = 100
+/
+&eos
+ config_eos_type = 'jm'
+/
+&advection
+ config_vert_tracer_adv = 'stencil'
+ config_vert_tracer_adv_order = 2
+ config_tracer_adv_order = 2
+ config_thickness_adv_order = 2
+ config_positive_definite = .false.
+ config_monotonic = .false.
+/
+&restore
+ config_restoreTS = .false.
+ config_restoreT_timescale = 90.0
+ config_restoreS_timescale = 90.0
+/
Modified: branches/ocean_projects/synchronous_io/src/framework/mpas_dmpar.F
===================================================================
--- trunk/mpas/src/framework/mpas_dmpar.F        2011-11-12 00:38:13 UTC (rev 1190)
+++ branches/ocean_projects/synchronous_io/src/framework/mpas_dmpar.F        2011-11-14 17:31:09 UTC (rev 1192)
@@ -636,12 +636,12 @@
end if
nMesgSend = nMesgRecv
- call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Srecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Ssend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Srecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Ssend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
end do
@@ -743,7 +743,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ call MPI_Srecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -755,7 +755,7 @@
allocate(sendListPtr % ibuffer(sendListPtr % nlist))
call mpas_pack_send_buf1d_integer(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ call MPI_Ssend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -834,7 +834,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d2 = dim1 * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ call MPI_Srecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -847,7 +847,7 @@
allocate(sendListPtr % ibuffer(d2))
call mpas_pack_send_buf2d_integer(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ call MPI_Ssend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -926,7 +926,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -938,7 +938,7 @@
allocate(sendListPtr % rbuffer(sendListPtr % nlist))
call mpas_pack_send_buf1d_real(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1017,7 +1017,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d2 = dim1 * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1030,7 +1030,7 @@
allocate(sendListPtr % rbuffer(d2))
call mpas_pack_send_buf2d_real(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1110,7 +1110,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d3 = dim1 * dim2 * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1123,7 +1123,7 @@
allocate(sendListPtr % rbuffer(d3))
call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1474,7 +1474,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ call MPI_Srecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1485,7 +1485,7 @@
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % ibuffer(sendListPtr % nlist))
call mpas_pack_send_buf1d_integer(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ call MPI_Ssend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1536,7 +1536,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d2 = dim1 * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ call MPI_Srecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1548,7 +1548,7 @@
d2 = dim1 * sendListPtr % nlist
allocate(sendListPtr % ibuffer(d2))
call mpas_pack_send_buf2d_integer(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ call MPI_Ssend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1600,7 +1600,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d3 = dim1 * dim2 * recvListPtr % nlist
allocate(recvListPtr % ibuffer(d3))
- call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
+ call MPI_Srecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1613,7 +1613,7 @@
allocate(sendListPtr % ibuffer(d3))
call mpas_pack_send_buf3d_integer(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
+ call MPI_Ssend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1754,7 +1754,7 @@
do while (associated(recvListPtr))
if (recvListPtr % procID /= dminfo % my_proc_id) then
allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1765,7 +1765,7 @@
if (sendListPtr % procID /= dminfo % my_proc_id) then
allocate(sendListPtr % rbuffer(sendListPtr % nlist))
call mpas_pack_send_buf1d_real(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1816,7 +1816,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d2 = dim1 * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1828,7 +1828,7 @@
d2 = dim1 * sendListPtr % nlist
allocate(sendListPtr % rbuffer(d2))
call mpas_pack_send_buf2d_real(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
@@ -1880,7 +1880,7 @@
if (recvListPtr % procID /= dminfo % my_proc_id) then
d3 = dim1 * dim2 * recvListPtr % nlist
allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
+ call MPI_Srecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
end if
recvListPtr => recvListPtr % next
@@ -1893,7 +1893,7 @@
allocate(sendListPtr % rbuffer(d3))
call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
+ call MPI_Ssend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
</font>
</pre>