<p><b>qchen3@fsu.edu</b> 2013-02-19 10:55:19 -0700 (Tue, 19 Feb 2013)</p><p><br>
BRANCH COMMIT<br>
<br>
Merging changes from the trunk.<br>
</p><hr noshade><pre><font color="gray">Index: branches/ocean_projects/gm_implement
===================================================================
--- branches/ocean_projects/gm_implement        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement        2013-02-19 17:55:19 UTC (rev 2483)

Property changes on: branches/ocean_projects/gm_implement
___________________________________________________________________
Modified: svn:mergeinfo
## -4,10 +4,12 ##
 /branches/ocean_projects/ale_vert_coord:1225-1383
 /branches/ocean_projects/ale_vert_coord_new:1387-1428
 /branches/ocean_projects/cesm_coupling:2147-2344
+/branches/ocean_projects/diagnostics_revision:2439-2462
 /branches/ocean_projects/gmvar:1214-1514,1517-1738
 /branches/ocean_projects/imp_vert_mix_error:1847-1887
 /branches/ocean_projects/imp_vert_mix_mrp:754-986
 /branches/ocean_projects/leith_mrp:2182-2241
+/branches/ocean_projects/linear_eos:2435-2437
 /branches/ocean_projects/monotonic_advection:1499-1640
 /branches/ocean_projects/monthly_forcing:1810-1867
 /branches/ocean_projects/namelist_cleanup:2319-2414
## -26,4 +28,4 ##
 /branches/omp_blocks/multiple_blocks:1803-2084
 /branches/source_renaming:1082-1113
 /branches/time_manager:924-962
-/trunk/mpas:2424-2428
+/trunk/mpas:2424-2482
\ No newline at end of property
Modified: branches/ocean_projects/gm_implement/namelist.input
===================================================================
--- branches/ocean_projects/gm_implement/namelist.input        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/namelist.input        2013-02-19 17:55:19 UTC (rev 2483)
@@ -1 +1 @@
-link namelist.input.sw
\ No newline at end of file
+link namelist.input.ocean
\ No newline at end of file

Modified: branches/ocean_projects/gm_implement/namelist.input.ocean
===================================================================
--- branches/ocean_projects/gm_implement/namelist.input.ocean        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/namelist.input.ocean        2013-02-19 17:55:19 UTC (rev 2483)
@@ -2,30 +2,28 @@
         config_do_restart = .false.
         config_start_time = '0000-01-01_00:00:00'
         config_stop_time = 'none'
-        config_run_duration = '0_06:00:00'
+        config_run_duration = '0_01:00:00'
         config_calendar_type = '360day'
-        config_ncouple_per_day = 1
 /
 &amp;io
         config_input_name = 'grid.nc'
         config_output_name = 'output.nc'
         config_restart_name = 'restart.nc'
-        config_restart_interval = '0_03:00:00'
+        config_restart_interval = '0_06:00:00'
         config_output_interval = '0_06:00:00'
-        config_stats_interval = '00_00:00:01'
-        config_write_stats_on_startup = .false.
-        config_write_output_on_startup = .false.
-        config_frames_per_outfile = 0
-        config_pio_num_iotasks = 0 
+        config_stats_interval = '0_01:00:00'
+        config_write_stats_on_startup = .true.
+        config_write_output_on_startup = .true.
+        config_frames_per_outfile = 1000
+        config_pio_num_iotasks = 0
         config_pio_stride = 1
 /
 &amp;time_integration
-        config_dt = 3000.0
+        config_dt = 20.0
         config_time_integrator = 'split_explicit'
 /
 &amp;grid
         config_num_halos = 3
-        config_enforce_grid_on_restart = .false.
         config_vert_coord_movement = 'uniform_stretching'
         config_alter_ICs_for_pbcs = 'zlevel_pbcs_off'
         config_min_pbc_fraction = 0.10
@@ -38,23 +36,23 @@
         config_proc_decomp_file_prefix = 'graph.info.part.'
 /
 &amp;hmix
-        config_h_ScaleWithMesh = .true.
+        config_hmix_ScaleWithMesh = .false.
         config_visc_vorticity_term = .true.
         config_apvm_scale_factor = 0.0
 /
 &amp;hmix_del2
         config_use_mom_del2 = .false.
         config_use_tracer_del2 = .false.
-        config_h_mom_eddy_visc2 = 0.0
-        config_h_tracer_eddy_diff2 = 0.0
-        config_visc_vorticity_visc2_scale = 1.0
+        config_mom_del2 = 0.0
+        config_tracer_del2 = 0.0
+        config_vorticity_del2_scale = 1.0
 /
 &amp;hmix_del4
         config_use_mom_del4 = .true.
         config_use_tracer_del4 = .false.
-        config_h_mom_eddy_visc4 = 5.0e13
-        config_h_tracer_eddy_diff4 = 0.0
-        config_visc_vorticity_visc4_scale = 1.0
+        config_mom_del4 = 5.0e13
+        config_tracer_del4 = 0.0
+        config_vorticity_del4_scale = 1.0
 /
 &amp;hmix_Leith
         config_use_Leith_del2 = .false.
@@ -121,6 +119,13 @@
 &amp;eos
         config_eos_type = 'jm'
 /
+&amp;eos_linear
+        config_eos_linear_alpha = 2.55e-1
+        config_eos_linear_beta = 7.64e-1
+        config_eos_linear_Tref = 19.0
+        config_eos_linear_Sref = 35.0
+        config_eos_linear_rhoref = 1025.022
+/
 &amp;split_explicit_ts
         config_n_ts_iter = 2
         config_n_bcl_iter_beg = 1
@@ -136,12 +141,12 @@
         config_btr_solve_SSH2 = .false.
 /
 &amp;sw_model
-        config_test_case = 0
+        config_test_case = 0
 /
 &amp;debug
         config_check_zlevel_consistency = .false.
         config_filter_btr_mode = .false.
-        config_prescribe_velocity  = .false.
+        config_prescribe_velocity = .false.
         config_prescribe_thickness = .false.
         config_include_KE_vertex = .false.
         config_check_tracer_monotonicity = .false.

Index: branches/ocean_projects/gm_implement/src/core_ocean
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean        2013-02-19 17:55:19 UTC (rev 2483)

Property changes on: branches/ocean_projects/gm_implement/src/core_ocean
___________________________________________________________________
Modified: svn:mergeinfo
## -4,10 +4,12 ##
 /branches/ocean_projects/ale_vert_coord/src/core_ocean:1225-1383
 /branches/ocean_projects/ale_vert_coord_new/src/core_ocean:1387-1428
 /branches/ocean_projects/cesm_coupling/src/core_ocean:2147-2344
+/branches/ocean_projects/diagnostics_revision/src/core_ocean:2439-2462
 /branches/ocean_projects/gmvar/src/core_ocean:1214-1514,1517-1738
 /branches/ocean_projects/imp_vert_mix_error/src/core_ocean:1847-1887
 /branches/ocean_projects/imp_vert_mix_mrp/src/core_ocean:754-986
 /branches/ocean_projects/leith_mrp/src/core_ocean:2182-2241
+/branches/ocean_projects/linear_eos/src/core_ocean:2435-2437
 /branches/ocean_projects/monotonic_advection/src/core_ocean:1499-1640
 /branches/ocean_projects/monthly_forcing/src/core_ocean:1810-1867
 /branches/ocean_projects/namelist_cleanup/src/core_ocean:2319-2414
## -28,4 +30,4 ##
 /branches/omp_blocks/openmp_test/src/core_ocean_elements:2161-2201
 /branches/source_renaming/src/core_ocean:1082-1113
 /branches/time_manager/src/core_ocean:924-962
-/trunk/mpas/src/core_ocean:2424-2428
+/trunk/mpas/src/core_ocean:2424-2482
\ No newline at end of property
Modified: branches/ocean_projects/gm_implement/src/core_ocean/Makefile
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/Makefile        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/Makefile        2013-02-19 17:55:19 UTC (rev 2483)
@@ -3,59 +3,61 @@
 OBJS = mpas_ocn_mpas_core.o \
        mpas_ocn_test_cases.o \
        mpas_ocn_advection.o \
-           mpas_ocn_thick_hadv.o \
-           mpas_ocn_thick_vadv.o \
-           mpas_ocn_gm.o \
-           mpas_ocn_vel_coriolis.o \
-           mpas_ocn_vel_vadv.o \
-           mpas_ocn_vel_hmix.o \
-           mpas_ocn_vel_hmix_del2.o \
-           mpas_ocn_vel_hmix_leith.o \
-           mpas_ocn_vel_hmix_del4.o \
-           mpas_ocn_vel_forcing.o \
-           mpas_ocn_vel_forcing_windstress.o \
-           mpas_ocn_vel_forcing_bottomdrag.o \
-           mpas_ocn_vel_forcing_rayleigh.o \
-           mpas_ocn_vel_pressure_grad.o \
-           mpas_ocn_tracer_vadv.o \
-           mpas_ocn_tracer_vadv_spline.o \
-           mpas_ocn_tracer_vadv_spline2.o \
-           mpas_ocn_tracer_vadv_spline3.o \
-           mpas_ocn_tracer_vadv_stencil.o \
-           mpas_ocn_tracer_vadv_stencil2.o \
-           mpas_ocn_tracer_vadv_stencil3.o \
-           mpas_ocn_tracer_vadv_stencil4.o \
-           mpas_ocn_tracer_hadv.o \
-           mpas_ocn_tracer_hadv2.o \
-           mpas_ocn_tracer_hadv3.o \
-           mpas_ocn_tracer_hadv4.o \
-           mpas_ocn_tracer_hmix.o \
-           mpas_ocn_tracer_hmix_del2.o \
-           mpas_ocn_tracer_hmix_del4.o \
-           mpas_ocn_vmix.o \
-           mpas_ocn_vmix_coefs_const.o \
-           mpas_ocn_vmix_coefs_rich.o \
-           mpas_ocn_vmix_coefs_tanh.o \
-           mpas_ocn_restoring.o \
-           mpas_ocn_tendency.o \
-           mpas_ocn_tracer_advection.o \
-           mpas_ocn_tracer_advection_std.o \
-           mpas_ocn_tracer_advection_std_hadv.o \
-           mpas_ocn_tracer_advection_std_vadv.o \
-           mpas_ocn_tracer_advection_std_vadv2.o \
-           mpas_ocn_tracer_advection_std_vadv3.o \
-           mpas_ocn_tracer_advection_std_vadv4.o \
-           mpas_ocn_tracer_advection_mono.o \
-           mpas_ocn_tracer_advection_helpers.o \
+       mpas_ocn_thick_hadv.o \
+       mpas_ocn_thick_vadv.o \
+       mpas_ocn_gm.o \
+       mpas_ocn_vel_coriolis.o \
+       mpas_ocn_vel_vadv.o \
+       mpas_ocn_vel_hmix.o \
+       mpas_ocn_vel_hmix_del2.o \
+       mpas_ocn_vel_hmix_leith.o \
+       mpas_ocn_vel_hmix_del4.o \
+       mpas_ocn_vel_forcing.o \
+       mpas_ocn_vel_forcing_windstress.o \
+       mpas_ocn_vel_forcing_bottomdrag.o \
+       mpas_ocn_vel_forcing_rayleigh.o \
+       mpas_ocn_vel_pressure_grad.o \
+       mpas_ocn_tracer_vadv.o \
+       mpas_ocn_tracer_vadv_spline.o \
+       mpas_ocn_tracer_vadv_spline2.o \
+       mpas_ocn_tracer_vadv_spline3.o \
+       mpas_ocn_tracer_vadv_stencil.o \
+       mpas_ocn_tracer_vadv_stencil2.o \
+       mpas_ocn_tracer_vadv_stencil3.o \
+       mpas_ocn_tracer_vadv_stencil4.o \
+       mpas_ocn_tracer_hadv.o \
+       mpas_ocn_tracer_hadv2.o \
+       mpas_ocn_tracer_hadv3.o \
+       mpas_ocn_tracer_hadv4.o \
+       mpas_ocn_tracer_hmix.o \
+       mpas_ocn_tracer_hmix_del2.o \
+       mpas_ocn_tracer_hmix_del4.o \
+       mpas_ocn_vmix.o \
+       mpas_ocn_vmix_coefs_const.o \
+       mpas_ocn_vmix_coefs_rich.o \
+       mpas_ocn_vmix_coefs_tanh.o \
+       mpas_ocn_restoring.o \
+       mpas_ocn_tendency.o \
+       mpas_ocn_diagnostics.o \
+       mpas_ocn_tracer_advection.o \
+       mpas_ocn_tracer_advection_std.o \
+       mpas_ocn_tracer_advection_std_hadv.o \
+       mpas_ocn_tracer_advection_std_vadv.o \
+       mpas_ocn_tracer_advection_std_vadv2.o \
+       mpas_ocn_tracer_advection_std_vadv3.o \
+       mpas_ocn_tracer_advection_std_vadv4.o \
+       mpas_ocn_tracer_advection_mono.o \
+       mpas_ocn_tracer_advection_helpers.o \
        mpas_ocn_time_integration.o \
        mpas_ocn_time_integration_rk4.o \
        mpas_ocn_time_integration_split.o \
-           mpas_ocn_equation_of_state.o \
-           mpas_ocn_equation_of_state_jm.o \
-           mpas_ocn_equation_of_state_linear.o \
+       mpas_ocn_equation_of_state.o \
+       mpas_ocn_equation_of_state_jm.o \
+       mpas_ocn_equation_of_state_linear.o \
+       mpas_ocn_diagnostics.o \
        mpas_ocn_global_diagnostics.o \
-           mpas_ocn_time_average.o \
-           mpas_ocn_monthly_forcing.o
+       mpas_ocn_time_average.o \
+       mpas_ocn_monthly_forcing.o
 
 all: core_hyd
 
@@ -68,12 +70,14 @@
 
 mpas_ocn_time_integration.o: mpas_ocn_time_integration_rk4.o mpas_ocn_time_integration_split.o
 
-mpas_ocn_time_integration_rk4.o:
+mpas_ocn_time_integration_rk4.o: mpas_ocn_tendency.o mpas_ocn_diagnostics.o
 
-mpas_ocn_time_integration_split.o:
+mpas_ocn_time_integration_split.o: mpas_ocn_tendency.o mpas_ocn_diagnostics.o
 
 mpas_ocn_tendency.o: mpas_ocn_time_average.o
 
+mpas_ocn_diagnostics.o: mpas_ocn_time_average.o
+
 mpas_ocn_global_diagnostics.o: 
 
 mpas_ocn_time_average.o:
@@ -172,61 +176,61 @@
 
 mpas_ocn_monthly_forcing.o:
 
-mpas_ocn_mpas_core.o: mpas_ocn_mpas_core.o \
-                                  mpas_ocn_test_cases.o \
-                                          mpas_ocn_advection.o \
-                                          mpas_ocn_thick_hadv.o \
+mpas_ocn_mpas_core.o: mpas_ocn_test_cases.o \
+                      mpas_ocn_advection.o \
+                      mpas_ocn_thick_hadv.o \
                       mpas_ocn_gm.o \
-                                          mpas_ocn_thick_vadv.o \
-                                          mpas_ocn_vel_coriolis.o \
-                                          mpas_ocn_vel_vadv.o \
-                                          mpas_ocn_vel_hmix.o \
-                                          mpas_ocn_vel_hmix_del2.o \
-                                          mpas_ocn_vel_hmix_leith.o \
-                                          mpas_ocn_vel_hmix_del4.o \
-                                          mpas_ocn_vel_forcing.o \
-                                          mpas_ocn_vel_forcing_windstress.o \
-                                          mpas_ocn_vel_forcing_bottomdrag.o \
-                                          mpas_ocn_vel_pressure_grad.o \
-                                          mpas_ocn_tracer_vadv.o \
-                                          mpas_ocn_tracer_vadv_spline.o \
-                                          mpas_ocn_tracer_vadv_spline2.o \
-                                          mpas_ocn_tracer_vadv_spline3.o \
-                                          mpas_ocn_tracer_vadv_stencil.o \
-                                          mpas_ocn_tracer_vadv_stencil2.o \
-                                          mpas_ocn_tracer_vadv_stencil3.o \
-                                          mpas_ocn_tracer_vadv_stencil4.o \
-                                          mpas_ocn_tracer_hadv.o \
-                                          mpas_ocn_tracer_hadv2.o \
-                                          mpas_ocn_tracer_hadv3.o \
-                                          mpas_ocn_tracer_hadv4.o \
-                                          mpas_ocn_tracer_hmix.o \
-                                          mpas_ocn_tracer_hmix_del2.o \
-                                          mpas_ocn_tracer_hmix_del4.o \
-                                          mpas_ocn_vmix.o \
-                                          mpas_ocn_vmix_coefs_const.o \
-                                          mpas_ocn_vmix_coefs_rich.o \
-                                          mpas_ocn_vmix_coefs_tanh.o \
-                                          mpas_ocn_restoring.o \
-                                          mpas_ocn_tracer_advection.o \
-                                          mpas_ocn_tracer_advection_std.o \
-                                          mpas_ocn_tracer_advection_std_hadv.o \
-                                          mpas_ocn_tracer_advection_std_vadv.o \
-                                          mpas_ocn_tracer_advection_std_vadv2.o \
-                                          mpas_ocn_tracer_advection_std_vadv3.o \
-                                          mpas_ocn_tracer_advection_std_vadv4.o \
-                                          mpas_ocn_tracer_advection_mono.o \
-                                          mpas_ocn_tracer_advection_helpers.o \
-                                          mpas_ocn_tendency.o \
-                                          mpas_ocn_time_integration.o \
-                                          mpas_ocn_time_integration_rk4.o \
-                                          mpas_ocn_time_integration_split.o \
-                                          mpas_ocn_equation_of_state.o \
-                                          mpas_ocn_equation_of_state_jm.o \
-                                          mpas_ocn_equation_of_state_linear.o \
-                                          mpas_ocn_global_diagnostics.o \
-                                          mpas_ocn_time_average.o \
-                                          mpas_ocn_monthly_forcing.o
+                      mpas_ocn_thick_vadv.o \
+                      mpas_ocn_vel_coriolis.o \
+                      mpas_ocn_vel_vadv.o \
+                      mpas_ocn_vel_hmix.o \
+                      mpas_ocn_vel_hmix_del2.o \
+                      mpas_ocn_vel_hmix_leith.o \
+                      mpas_ocn_vel_hmix_del4.o \
+                      mpas_ocn_vel_forcing.o \
+                      mpas_ocn_vel_forcing_windstress.o \
+                      mpas_ocn_vel_forcing_bottomdrag.o \
+                      mpas_ocn_vel_pressure_grad.o \
+                      mpas_ocn_tracer_vadv.o \
+                      mpas_ocn_tracer_vadv_spline.o \
+                      mpas_ocn_tracer_vadv_spline2.o \
+                      mpas_ocn_tracer_vadv_spline3.o \
+                      mpas_ocn_tracer_vadv_stencil.o \
+                      mpas_ocn_tracer_vadv_stencil2.o \
+                      mpas_ocn_tracer_vadv_stencil3.o \
+                      mpas_ocn_tracer_vadv_stencil4.o \
+                      mpas_ocn_tracer_hadv.o \
+                      mpas_ocn_tracer_hadv2.o \
+                      mpas_ocn_tracer_hadv3.o \
+                      mpas_ocn_tracer_hadv4.o \
+                      mpas_ocn_tracer_hmix.o \
+                      mpas_ocn_tracer_hmix_del2.o \
+                      mpas_ocn_tracer_hmix_del4.o \
+                      mpas_ocn_vmix.o \
+                      mpas_ocn_vmix_coefs_const.o \
+                      mpas_ocn_vmix_coefs_rich.o \
+                      mpas_ocn_vmix_coefs_tanh.o \
+                      mpas_ocn_restoring.o \
+                      mpas_ocn_tracer_advection.o \
+                      mpas_ocn_tracer_advection_std.o \
+                      mpas_ocn_tracer_advection_std_hadv.o \
+                      mpas_ocn_tracer_advection_std_vadv.o \
+                      mpas_ocn_tracer_advection_std_vadv2.o \
+                      mpas_ocn_tracer_advection_std_vadv3.o \
+                      mpas_ocn_tracer_advection_std_vadv4.o \
+                      mpas_ocn_tracer_advection_mono.o \
+                      mpas_ocn_tracer_advection_helpers.o \
+                      mpas_ocn_tendency.o \
+                      mpas_ocn_diagnostics.o \
+                      mpas_ocn_time_integration.o \
+                      mpas_ocn_time_integration_rk4.o \
+                      mpas_ocn_time_integration_split.o \
+                      mpas_ocn_equation_of_state.o \
+                      mpas_ocn_equation_of_state_jm.o \
+                      mpas_ocn_equation_of_state_linear.o \
+                      mpas_ocn_global_diagnostics.o \
+                      mpas_ocn_time_average.o \
+                      mpas_ocn_monthly_forcing.o
 
 clean:
         $(RM) *.o *.mod *.f90 libdycore.a

Modified: branches/ocean_projects/gm_implement/src/core_ocean/Registry
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/Registry        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/Registry        2013-02-19 17:55:19 UTC (rev 2483)
@@ -4,57 +4,55 @@
 namelist logical   time_management config_do_restart          .false.
 namelist character time_management config_start_time          '0000-01-01_00:00:00'
 namelist character time_management config_stop_time           'none'
-namelist character time_management config_run_duration        'none'
+namelist character time_management config_run_duration        '0_06:00:00'
 namelist character time_management config_calendar_type       '360day'
-namelist integer   time_management config_ncouple_per_day       1
 
 namelist character io       config_input_name          'grid.nc'
 namelist character io       config_output_name         'output.nc'
 namelist character io       config_restart_name        'restart.nc'
-namelist character io       config_restart_interval    'none'
-namelist character io       config_output_interval     '24:00:00'
-namelist character io       config_stats_interval      '24:00:00'
-namelist logical   io       config_write_stats_on_startup   .false.
+namelist character io       config_restart_interval    '0_06:00:00'
+namelist character io       config_output_interval     '0_06:00:00'
+namelist character io       config_stats_interval      '0_01:00:00'
+namelist logical   io       config_write_stats_on_startup   .true.
 namelist logical   io       config_write_output_on_startup  .true.
-namelist integer   io       config_frames_per_outfile  0
+namelist integer   io       config_frames_per_outfile  1000
 namelist integer   io       config_pio_num_iotasks     0 
 namelist integer   io       config_pio_stride          1
 
-namelist real      time_integration config_dt                  172.8
-namelist character time_integration config_time_integrator    'RK4'
+namelist real      time_integration config_dt                  3000.0
+namelist character time_integration config_time_integrator    'split_explicit'
 
-namelist integer   grid config_num_halos           3
-namelist logical   grid config_enforce_grid_on_restart .false.
-namelist character grid config_vert_coord_movement      'isopycnal'
-namelist character grid config_alter_ICs_for_pbcs 'zlevel_pbcs_off'
-namelist real      grid config_min_pbc_fraction  0.10
-namelist logical   grid config_check_ssh_consistency .true.
+namelist integer   grid config_num_halos                3
+namelist character grid config_vert_coord_movement      'uniform_stretching'
+namelist character grid config_alter_ICs_for_pbcs       'zlevel_pbcs_off'
+namelist real      grid config_min_pbc_fraction         0.10
+namelist logical   grid config_check_ssh_consistency    .true.
 
 namelist character decomposition config_block_decomp_file_prefix  'graph.info.part.'
 namelist integer   decomposition config_number_of_blocks          0
 namelist logical   decomposition config_explicit_proc_decomp      .false.
 namelist character decomposition config_proc_decomp_file_prefix   'graph.info.part.'
 
-namelist logical   hmix          config_h_ScaleWithMesh     .false.
+namelist logical   hmix          config_hmix_ScaleWithMesh  .false.
 namelist logical   hmix          config_visc_vorticity_term .true.
 namelist real      hmix          config_apvm_scale_factor      0.0
 
-namelist logical   hmix_del2     config_use_mom_del2     .true.
-namelist logical   hmix_del2     config_use_tracer_del2     .true.
-namelist real      hmix_del2     config_h_mom_eddy_visc2     0.0
-namelist real      hmix_del2     config_h_tracer_eddy_diff2  0.0
-namelist real      hmix_del2     config_visc_vorticity_visc2_scale 1.0
+namelist logical   hmix_del2     config_use_mom_del2        .false.
+namelist logical   hmix_del2     config_use_tracer_del2     .false.
+namelist real      hmix_del2     config_mom_del2             0.0
+namelist real      hmix_del2     config_tracer_del2          0.0
+namelist real      hmix_del2     config_vorticity_del2_scale 1.0
 
-namelist logical   hmix_del4     config_use_mom_del4     .true.
-namelist logical   hmix_del4     config_use_tracer_del4     .true.
-namelist real      hmix_del4     config_h_mom_eddy_visc4     0.0
-namelist real      hmix_del4     config_h_tracer_eddy_diff4  0.0
-namelist real      hmix_del4     config_visc_vorticity_visc4_scale 1.0
+namelist logical   hmix_del4     config_use_mom_del4        .true.
+namelist logical   hmix_del4     config_use_tracer_del4     .false.
+namelist real      hmix_del4     config_mom_del4             5.0e13
+namelist real      hmix_del4     config_tracer_del4          0.0
+namelist real      hmix_del4     config_vorticity_del4_scale 1.0
 
-namelist logical   hmix_Leith    config_use_Leith_del2         .false.
-namelist real      hmix_Leith    config_Leith_parameter        0.0
-namelist real      hmix_Leith    config_Leith_dx               0.0
-namelist real      hmix_Leith    config_Leith_visc2_max      1000000.0
+namelist logical   hmix_Leith    config_use_Leith_del2      .false.
+namelist real      hmix_Leith    config_Leith_parameter      1.0
+namelist real      hmix_Leith    config_Leith_dx             15000.0
+namelist real      hmix_Leith    config_Leith_visc2_max      2.5e3
 
 namelist real      standard_GM     config_h_kappa              0.0
 namelist real      standard_GM     config_h_kappa_q            0.0
@@ -66,8 +64,8 @@
 namelist real      vmix     config_convective_visc      1.0
 namelist real      vmix     config_convective_diff      1.0
 
-namelist logical   vmix_const   config_use_const_visc   .true.
-namelist logical   vmix_const   config_use_const_diff   .true.
+namelist logical   vmix_const   config_use_const_visc   .false.
+namelist logical   vmix_const   config_use_const_diff   .false.
 namelist real      vmix_const   config_vert_visc        2.5e-4
 namelist real      vmix_const   config_vert_diff        2.5e-5
 
@@ -77,8 +75,8 @@
 namelist real      vmix_rich    config_bkrd_vert_diff   1.0e-5
 namelist real      vmix_rich    config_rich_mix         0.005
 
-namelist logical   vmix_tanh    config_use_tanh_visc    .true.
-namelist logical   vmix_tanh    config_use_tanh_diff    .true.
+namelist logical   vmix_tanh    config_use_tanh_visc    .false.
+namelist logical   vmix_tanh    config_use_tanh_diff    .false.
 namelist real      vmix_tanh    config_max_visc_tanh    2.5e-1
 namelist real      vmix_tanh    config_min_visc_tanh    1.0e-4
 namelist real      vmix_tanh    config_max_diff_tanh    2.5e-2
@@ -92,20 +90,26 @@
 namelist real      forcing   config_restoreS_timescale  90.0
 
 namelist character advection config_vert_tracer_adv     'stencil'
-namelist integer   advection config_vert_tracer_adv_order  4
-namelist integer   advection config_horiz_tracer_adv_order 2
+namelist integer   advection config_vert_tracer_adv_order  3
+namelist integer   advection config_horiz_tracer_adv_order 3
 namelist real      advection config_coef_3rd_order      0.25
-namelist logical   advection config_monotonic           .false.
+namelist logical   advection config_monotonic           .true.
 
 namelist real      bottom_drag config_bottom_drag_coeff    1.0e-3
 
 namelist character pressure_gradient     config_pressure_gradient_type       'pressure_and_zmid'
 namelist real      pressure_gradient     config_rho0                1014.65
 
-namelist character eos       config_eos_type            'linear'
+namelist character eos       config_eos_type            'jm'
 
+namelist real      eos_linear config_eos_linear_alpha    2.55e-1
+namelist real      eos_linear config_eos_linear_beta     7.64e-1
+namelist real      eos_linear config_eos_linear_Tref     19.0
+namelist real      eos_linear config_eos_linear_Sref     35.0
+namelist real      eos_linear config_eos_linear_rhoref   1025.022
+
 namelist integer   split_explicit_ts config_n_ts_iter     2
-namelist integer   split_explicit_ts config_n_bcl_iter_beg   2
+namelist integer   split_explicit_ts config_n_bcl_iter_beg   1
 namelist integer   split_explicit_ts config_n_bcl_iter_mid   2
 namelist integer   split_explicit_ts config_n_bcl_iter_end   2
 namelist integer   split_explicit_ts config_n_btr_subcycles  20
@@ -327,7 +331,9 @@
 var persistent real    MontPot ( nVertLevels nCells Time ) 2 - MontPot state - -
 var persistent real    pressure ( nVertLevels nCells Time ) 2 - pressure state - -
 var persistent real    wTop ( nVertLevelsP1 nCells Time ) 2 - wTop state - -
+var persistent real    vertVelocityTop ( nVertLevelsP1 nCells Time ) 2 - vertVelocityTop state - -
 var persistent real    rhoDisplaced ( nVertLevels nCells Time ) 2 - rhoDisplaced state - -
+var persistent real    BruntVaisalaFreqTop ( nVertLevels nCells Time ) 2 o BruntVaisalaFreqTop state - -
 var persistent real    viscosity ( nVertLevels nEdges Time ) 2 o viscosity state - -
 
 % Other diagnostic variables: neither read nor written to any files
@@ -360,6 +366,7 @@
 var persistent real    acc_uReconstructMeridionalVar ( nVertLevels nCells Time ) 2 o acc_uReconstructMeridionalVar state - -
 var persistent real    acc_u ( nVertLevels nEdges Time ) 2 o acc_u state - -
 var persistent real    acc_uVar ( nVertLevels nEdges Time ) 2 o acc_uVar state - -
+var persistent real    acc_vertVelocityTop ( nVertLevelsP1 nCells Time ) 2 o acc_vertVelocityTop state - -
 
 % Sign fields, for openmp and bit reproducibility without branching statements.
 var persistent integer edgeSignOnCell ( maxEdges nCells ) 0 - edgeSignOnCell mesh - -

Copied: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_diagnostics.F (from rev 2482, trunk/mpas/src/core_ocean/mpas_ocn_diagnostics.F)
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_diagnostics.F                                (rev 0)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_diagnostics.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -0,0 +1,845 @@
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+!
+!  ocn_diagnostics
+!
+!&gt; \brief MPAS ocean diagnostics driver
+!&gt; \author Mark Petersen
+!&gt; \date   23 September 2011
+!&gt; \version SVN:$Id:$
+!&gt; \details
+!&gt;  This module contains the routines for computing
+!&gt;  diagnostic variables, and other quantities such as wTop.
+!
+!-----------------------------------------------------------------------
+
+module ocn_diagnostics
+
+   use mpas_grid_types
+   use mpas_configure
+   use mpas_constants
+   use mpas_timer
+
+   use ocn_gm
+   use ocn_equation_of_state
+
+   implicit none
+   private
+   save
+
+   type (timer_node), pointer :: diagEOSTimer
+
+   !--------------------------------------------------------------------
+   !
+   ! Public parameters
+   !
+   !--------------------------------------------------------------------
+
+   !--------------------------------------------------------------------
+   !
+   ! Public member functions
+   !
+   !--------------------------------------------------------------------
+
+   public :: ocn_diagnostic_solve, &amp;
+             ocn_wtop, &amp;
+             ocn_fuperp, &amp;
+             ocn_filter_btr_mode_u, &amp;
+             ocn_filter_btr_mode_tend_u, &amp;
+             ocn_diagnostics_init
+
+   !--------------------------------------------------------------------
+   !
+   ! Private module variables
+   !
+   !--------------------------------------------------------------------
+
+   integer :: ke_cell_flag, ke_vertex_flag
+   real (kind=RKIND) ::  coef_3rd_order, fCoef
+
+!***********************************************************************
+
+contains
+
+!***********************************************************************
+!
+!  routine ocn_diagnostic_solve
+!
+!&gt; \brief   Computes diagnostic variables
+!&gt; \author  Mark Petersen
+!&gt; \date    23 September 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine computes the diagnostic variables for the ocean
+!
+!-----------------------------------------------------------------------
+
+   subroutine ocn_diagnostic_solve(dt, s, grid)!{{{
+      implicit none
+
+      real (kind=RKIND), intent(in) :: dt !&lt; Input: Time step
+      type (state_type), intent(inout) :: s !&lt; Input/Output: State information
+      type (mesh_type), intent(in) :: grid !&lt; Input: Grid information
+
+
+      integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j
+      integer :: boundaryMask, velMask, nCells, nEdges, nVertices, nVertLevels, vertexDegree, err
+
+      integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &amp;
+        maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &amp;
+        maxLevelVertexBot
+      integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &amp;
+        verticesOnEdge, edgesOnEdge, edgesOnVertex,boundaryCell, kiteIndexOnCell, &amp;
+        verticesOnCell, edgeSignOnVertex, edgeSignOnCell, edgesOnCell
+
+      real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2, coef_3rd_order, r_tmp, &amp;
+        invAreaCell1, invAreaCell2, invAreaTri1, invAreaTri2, invLength, h_vertex, coef
+
+      real (kind=RKIND), dimension(:), allocatable:: pTop, div_hu
+
+      real (kind=RKIND), dimension(:), pointer :: &amp;
+        bottomDepth, fVertex, dvEdge, dcEdge, areaCell, areaTriangle, ssh
+      real (kind=RKIND), dimension(:,:), pointer :: &amp;
+        weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure,&amp;
+        circulation, vorticity, ke, ke_edge, MontPot, wTop, zMid, &amp;
+        Vor_edge, Vor_vertex, Vor_cell, gradVor_n, gradVor_t, divergence, &amp;
+        rho, rhoDisplaced, temperature, salinity, kev, kevc, uBolusGM, uTransport, &amp;
+        vertVelocityTop, BruntVaisalaFreqTop
+      real (kind=RKIND), dimension(:,:,:), pointer :: tracers, deriv_two
+      character :: c1*6
+
+      h           =&gt; s % h % array
+      u           =&gt; s % u % array
+      uTransport  =&gt; s % uTransport % array
+      uBolusGM    =&gt; s % uBolusGM % array
+      v           =&gt; s % v % array
+      h_edge      =&gt; s % h_edge % array
+      circulation =&gt; s % circulation % array
+      vorticity   =&gt; s % vorticity % array
+      divergence  =&gt; s % divergence % array
+      ke          =&gt; s % ke % array
+      kev         =&gt; s % kev % array
+      kevc        =&gt; s % kevc % array
+      ke_edge     =&gt; s % ke_edge % array
+      Vor_edge    =&gt; s % Vor_edge % array
+      Vor_vertex  =&gt; s % Vor_vertex % array
+      Vor_cell    =&gt; s % Vor_cell % array
+      gradVor_n   =&gt; s % gradVor_n % array
+      gradVor_t   =&gt; s % gradVor_t % array
+      rho         =&gt; s % rho % array
+      rhoDisplaced=&gt; s % rhoDisplaced % array
+      MontPot     =&gt; s % MontPot % array
+      pressure    =&gt; s % pressure % array
+      zMid        =&gt; s % zMid % array
+      ssh         =&gt; s % ssh % array
+      tracers     =&gt; s % tracers % array
+      vertVelocityTop =&gt; s % vertVelocityTop % array
+      BruntVaisalaFreqTop =&gt; s % BruntVaisalaFreqTop % array
+
+      weightsOnEdge     =&gt; grid % weightsOnEdge % array
+      kiteAreasOnVertex =&gt; grid % kiteAreasOnVertex % array
+      cellsOnEdge       =&gt; grid % cellsOnEdge % array
+      cellsOnVertex     =&gt; grid % cellsOnVertex % array
+      verticesOnEdge    =&gt; grid % verticesOnEdge % array
+      nEdgesOnCell      =&gt; grid % nEdgesOnCell % array
+      nEdgesOnEdge      =&gt; grid % nEdgesOnEdge % array
+      edgesOnCell       =&gt; grid % edgesOnCell % array
+      edgesOnEdge       =&gt; grid % edgesOnEdge % array
+      edgesOnVertex     =&gt; grid % edgesOnVertex % array
+      dcEdge            =&gt; grid % dcEdge % array
+      dvEdge            =&gt; grid % dvEdge % array
+      areaCell          =&gt; grid % areaCell % array
+      areaTriangle      =&gt; grid % areaTriangle % array
+      bottomDepth       =&gt; grid % bottomDepth % array
+      fVertex           =&gt; grid % fVertex % array
+      deriv_two         =&gt; grid % deriv_two % array
+      maxLevelCell      =&gt; grid % maxLevelCell % array
+      maxLevelEdgeTop   =&gt; grid % maxLevelEdgeTop % array
+      maxLevelEdgeBot   =&gt; grid % maxLevelEdgeBot % array
+      maxLevelVertexBot =&gt; grid % maxLevelVertexBot % array
+      kiteIndexOnCell =&gt; grid % kiteIndexOnCell % array
+      verticesOnCell =&gt; grid % verticesOnCell % array
+                  
+      nCells      = grid % nCells
+      nEdges      = grid % nEdges
+      nVertices   = grid % nVertices
+      nVertLevels = grid % nVertLevels
+      vertexDegree = grid % vertexDegree
+
+      boundaryCell =&gt; grid % boundaryCell % array
+
+      edgeSignOnVertex =&gt; grid % edgeSignOnVertex % array
+      edgeSignOnCell =&gt; grid % edgeSignOnCell % array
+
+      !
+      ! Compute height on cell edges at velocity locations
+      !   Namelist options control the order of accuracy of the reconstructed h_edge value
+      !
+      ! mrp 101115 note: in order to include flux boundary conditions, we will need to 
+      ! assign h_edge for maxLevelEdgeTop:maxLevelEdgeBot in the following section
+
+      ! initialize h_edge to avoid divide by zero and NaN problems.
+      h_edge = -1.0e34
+      coef_3rd_order = config_coef_3rd_order
+
+      do iEdge=1,nEdges
+         cell1 = cellsOnEdge(1,iEdge)
+         cell2 = cellsOnEdge(2,iEdge)
+         do k=1,maxLevelEdgeTop(iEdge)
+            h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
+         end do
+      end do
+
+      !
+      ! set the velocity and height at dummy address
+      !    used -1e34 so error clearly occurs if these values are used.
+      !
+      u(:,nEdges+1) = -1e34
+      h(:,nCells+1) = -1e34
+      tracers(s % index_temperature,:,nCells+1) = -1e34
+      tracers(s % index_salinity,:,nCells+1) = -1e34
+
+      circulation(:,:) = 0.0
+      vorticity(:,:) = 0.0
+      divergence(:,:) = 0.0
+      vertVelocityTop(:,:)=0.0
+      ke(:,:) = 0.0
+      v(:,:) = 0.0
+      do iVertex = 1, nVertices
+         invAreaTri1 = 1.0 / areaTriangle(iVertex)
+         do i = 1, vertexDegree
+            iEdge = edgesOnVertex(i, iVertex)
+            do k = 1, maxLevelVertexBot(iVertex)
+              r_tmp = dcEdge(iEdge) * u(k, iEdge)
+
+              circulation(k, iVertex) = circulation(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp 
+              vorticity(k, iVertex) = vorticity(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp * invAreaTri1
+            end do
+         end do
+      end do
+
+      allocate(div_hu(nVertLevels))
+      do iCell = 1, nCells
+         div_hu(:) = 0.0
+         invAreaCell1 = 1.0 / areaCell(iCell)
+         do i = 1, nEdgesOnCell(iCell)
+            iEdge = edgesOnCell(i, iCell)
+            do k = 1, maxLevelCell(iCell)
+               r_tmp = dvEdge(iEdge) * u(k, iEdge) * invAreaCell1
+
+               divergence(k, iCell) = divergence(k, iCell) - edgeSignOnCell(i, iCell) * r_tmp
+               div_hu(k)    = div_hu(k) - h_edge(k, iEdge) * edgeSignOnCell(i, iCell) * r_tmp 
+               ke(k, iCell) = ke(k, iCell) + 0.25 * r_tmp * dcEdge(iEdge) * u(k,iEdge)
+            end do
+         end do
+         ! Vertical velocity at bottom (maxLevelCell(iCell)+1) is zero, initialized above.
+         do k=maxLevelCell(iCell),1,-1
+            vertVelocityTop(k,iCell) = vertVelocityTop(k+1,iCell) - div_hu(k)
+         end do         
+      end do
+      deallocate(div_hu)
+
+      do iEdge=1,nEdges
+         ! Compute v (tangential) velocities
+         do i=1,nEdgesOnEdge(iEdge)
+            eoe = edgesOnEdge(i,iEdge)
+            ! mrp 101115 note: in order to include flux boundary conditions,
+            ! the following loop may need to change to maxLevelEdgeBot
+            do k = 1,maxLevelEdgeTop(iEdge) 
+               v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
+            end do
+         end do
+
+      end do
+
+      !
+      ! Compute kinetic energy in each vertex
+      !
+      kev(:,:) = 0.0; kevc(:,:) = 0.0
+      do iVertex = 1, nVertices*ke_vertex_flag
+        do i = 1, vertexDegree
+          iEdge = edgesOnVertex(i, iVertex)
+          r_tmp = dcEdge(iEdge) * dvEdge(iEdge) * 0.25 / areaTriangle(iVertex)
+          do k = 1, nVertLevels
+            kev(k, iVertex) = kev(k, iVertex) + r_tmp * u(k, iEdge)**2
+          end do
+        end do
+      end do
+
+      do iCell = 1, nCells*ke_vertex_flag
+        invAreaCell1 = 1.0 / areaCell(iCell)
+        do i = 1, nEdgesOnCell(iCell)
+          j = kiteIndexOnCell(i, iCell)
+          iVertex = verticesOnCell(i, iCell)
+          do k = 1, nVertLevels
+            kevc(k, iCell) = kevc(k, iCell) + kiteAreasOnVertex(j, iVertex) * kev(k, iVertex) * invAreaCell1
+          end do
+        end do
+      end do
+
+      !
+      ! Compute kinetic energy in each cell by blending ke and kevc
+      !
+      do iCell=1,nCells*ke_vertex_flag
+         do k=1,nVertLevels
+            ke(k,iCell) = 5.0/8.0*ke(k,iCell) + 3.0/8.0*kevc(k,iCell)
+         end do
+      end do
+
+      !
+      ! Compute ke on cell edges at velocity locations for quadratic bottom drag. 
+      !
+      ! mrp 101025 efficiency note: we could get rid of ke_edge completely by 
+      ! using sqrt(u(k,iEdge)**2 + v(k,iEdge)**2) in its place elsewhere.
+      do iEdge=1,nEdges
+         cell1 = cellsOnEdge(1,iEdge)
+         cell2 = cellsOnEdge(2,iEdge)
+         do k=1,maxLevelEdgeTop(iEdge)
+            ke_edge(k,iEdge) = 0.5 * (ke(k,cell1) + ke(k,cell2))
+         end do
+      end do
+
+      !
+      ! Compute height at vertices, pv at vertices, and average pv to edge locations
+      !  ( this computes Vor_vertex at all vertices bounding real cells and distance-1 ghost cells )
+      !
+      do iVertex = 1,nVertices
+         invAreaTri1 = 1.0 / areaTriangle(iVertex)
+         do k=1,maxLevelVertexBot(iVertex)
+            h_vertex = 0.0
+            do i=1,vertexDegree
+               h_vertex = h_vertex + h(k,cellsOnVertex(i,iVertex)) * kiteAreasOnVertex(i,iVertex)
+            end do
+            h_vertex = h_vertex * invAreaTri1
+
+            Vor_vertex(k,iVertex) = (fCoef*fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
+         end do
+      end do
+
+      Vor_cell(:,:) = 0.0
+      Vor_edge(:,:) = 0.0
+      do iEdge = 1, nEdges
+        vertex1 = verticesOnEdge(1, iEdge)
+        vertex2 = verticesOnEdge(2, iEdge)
+        do k = 1, maxLevelEdgeBot(iEdge)
+          Vor_edge(k, iEdge) = 0.5 * (Vor_vertex(k, vertex1) + Vor_vertex(k, vertex2))
+        end do
+      end do
+
+      do iCell = 1, nCells
+        invAreaCell1 = 1.0 / areaCell(iCell)
+
+        do i = 1, nEdgesOnCell(iCell)
+          j = kiteIndexOnCell(i, iCell)
+          iVertex = verticesOnCell(i, iCell)
+          do k = 1, maxLevelCell(iCell)
+            Vor_cell(k, iCell) = Vor_cell(k, iCell) + kiteAreasOnVertex(j, iVertex) * Vor_vertex(k, iVertex) * invAreaCell1
+          end do
+        end do
+      end do
+
+      do iEdge = 1,nEdges
+         cell1 = cellsOnEdge(1, iEdge)
+         cell2 = cellsOnEdge(2, iEdge)
+         vertex1 = verticesOnedge(1, iEdge)
+         vertex2 = verticesOnedge(2, iEdge)
+
+         invLength = 1.0 / dcEdge(iEdge)
+         ! Compute gradient of PV in normal direction
+         !   ( this computes gradVor_n for all edges bounding real cells )
+         do k=1,maxLevelEdgeTop(iEdge)
+            gradVor_n(k,iEdge) = (Vor_cell(k,cell2) - Vor_cell(k,cell1)) * invLength
+         enddo
+
+         invLength = 1.0 / dvEdge(iEdge)
+         ! Compute gradient of PV in the tangent direction
+         !   ( this computes gradVor_t at all edges bounding real cells and distance-1 ghost cells )
+         do k = 1,maxLevelEdgeBot(iEdge)
+           gradVor_t(k,iEdge) = (Vor_vertex(k,vertex2) - Vor_vertex(k,vertex1)) * invLength
+         enddo
+
+      enddo
+
+      !
+      ! Modify PV edge with upstream bias.
+      !
+      do iEdge = 1,nEdges
+         do k = 1,maxLevelEdgeBot(iEdge)
+           Vor_edge(k,iEdge) = Vor_edge(k,iEdge) &amp;
+             - config_apvm_scale_factor * dt* (  u(k,iEdge) * gradVor_n(k,iEdge) &amp;
+                          + v(k,iEdge) * gradVor_t(k,iEdge) )
+         enddo
+      enddo
+
+      !
+      ! equation of state
+      !
+      ! For an isopycnal model, density should remain constant.
+      ! For zlevel, calculate in-situ density
+      if (config_vert_coord_movement.ne.'isopycnal') then
+         call mpas_timer_start(&quot;equation of state&quot;, .false., diagEOSTimer)
+
+         ! compute in-place density
+         call ocn_equation_of_state_rho(s, grid, 0, 'relative', err)
+
+         ! compute rhoDisplaced, the potential density referenced to the top layer
+         call ocn_equation_of_state_rho(s, grid, 1, 'relative', err)
+
+         call mpas_timer_stop(&quot;equation of state&quot;, diagEOSTimer)
+      endif
+
+      !
+      ! Pressure
+      ! This section must be after computing rho
+      !
+      ! dwj: 10/25/2011 - Need to explore isopycnal vs zlevel flags
+      if (config_pressure_gradient_type.eq.'MontgomeryPotential') then
+
+        ! For Isopycnal model.
+        ! Compute pressure at top of each layer, and then
+        ! Montgomery Potential.
+        allocate(pTop(nVertLevels))
+        do iCell=1,nCells
+
+           ! assume atmospheric pressure at the surface is zero for now.
+           pTop(1) = 0.0
+           ! For isopycnal mode, p is the Montgomery Potential.
+           ! At top layer it is g*SSH, where SSH may be off by a 
+           ! constant (ie, bottomDepth can be relative to top or bottom)
+           MontPot(1,iCell) = gravity &amp;
+              * (bottomDepth(iCell) + sum(h(1:nVertLevels,iCell)))
+
+           do k=2,nVertLevels
+              pTop(k) = pTop(k-1) + rho(k-1,iCell)*gravity* h(k-1,iCell)
+
+              ! from delta M = p delta / rho
+              MontPot(k,iCell) = MontPot(k-1,iCell) &amp;
+                 + pTop(k)*(1.0/rho(k,iCell) - 1.0/rho(k-1,iCell)) 
+           end do
+
+        end do
+        deallocate(pTop)
+
+      else
+
+        do iCell=1,nCells
+           ! pressure for generalized coordinates
+           ! assume atmospheric pressure at the surface is zero for now.
+           pressure(1,iCell) = rho(1,iCell)*gravity &amp;
+              * 0.5*h(1,iCell)
+
+           do k=2,maxLevelCell(iCell)
+              pressure(k,iCell) = pressure(k-1,iCell)  &amp;
+                + 0.5*gravity*(  rho(k-1,iCell)*h(k-1,iCell) &amp;
+                               + rho(k  ,iCell)*h(k  ,iCell))
+           end do
+
+           ! Compute zMid, the z-coordinate of the middle of the layer.
+           ! This is used for the rho g grad z momentum term.
+           ! Note the negative sign, since bottomDepth is positive
+           ! and z-coordinates are negative below the surface.
+           k = maxLevelCell(iCell)
+           zMid(k:nVertLevels,iCell) = -bottomDepth(iCell) + 0.5*h(k,iCell)
+
+           do k=maxLevelCell(iCell)-1, 1, -1
+              zMid(k,iCell) = zMid(k+1,iCell)  &amp;
+                + 0.5*(  h(k+1,iCell) &amp;
+                       + h(k  ,iCell))
+           end do
+
+        end do
+
+      endif
+
+      !
+      ! Brunt-Vaisala frequency
+      !
+      coef = -gravity/config_rho0
+      do iCell=1,nCells
+         BruntVaisalaFreqTop(1,iCell) = 0.0
+         do k=2,maxLevelCell(iCell)
+            BruntVaisalaFreqTop(k,iCell) = coef * (rhoDisplaced(k-1,iCell) - rhoDisplaced(k,iCell)) &amp; 
+              / (zMid(k-1,iCell) - zMid(k,iCell))
+          end do
+      end do
+
+      !
+      ! Sea Surface Height
+      !
+      do iCell=1,nCells
+         ! Start at the bottom where we know the depth, and go up.
+         ! The bottom depth for this cell is bottomDepth(iCell).
+         ! Note the negative sign, since bottomDepth is positive
+         ! and z-coordinates are negative below the surface.
+
+         ssh(iCell) = - bottomDepth(iCell) + sum(h(1:maxLevelCell(iCell),iCell))
+
+      end do
+
+      !
+      ! Apply the GM closure as a bolus velocity
+      !
+      if (config_h_kappa .GE. epsilon(0D0)) then
+         call ocn_gm_compute_uBolus(s,grid)
+      else
+         ! mrp efficiency note: if uBolusGM is guaranteed to be zero, this can be removed.
+         uBolusGM = 0.0
+      end if
+
+   end subroutine ocn_diagnostic_solve!}}}
+
+!***********************************************************************
+!
+!  routine ocn_wtop
+!
+!&gt; \brief   Computes vertical transport
+!&gt; \author  Mark Petersen
+!&gt; \date    23 September 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine computes the vertical transport through the top of each 
+!&gt;  cell.
+!
+!-----------------------------------------------------------------------
+   subroutine ocn_wtop(grid,h,h_edge,u,wTop, err)!{{{
+
+      !-----------------------------------------------------------------
+      !
+      ! input variables
+      !
+      !-----------------------------------------------------------------
+
+      type (mesh_type), intent(in) :: &amp;
+         grid          !&lt; Input: grid information
+
+      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
+         h    !&lt; Input: thickness
+
+      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
+         h_edge     !&lt; Input: h interpolated to an edge
+
+      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
+         u     !&lt; Input: transport
+
+      !-----------------------------------------------------------------
+      !
+      ! output variables
+      !
+      !-----------------------------------------------------------------
+
+      real (kind=RKIND), dimension(:,:), intent(out) :: &amp;
+         wTop     !&lt; Output: vertical transport at top of cell
+
+      integer, intent(out) :: err !&lt; Output: error flag
+
+      !-----------------------------------------------------------------
+      !
+      ! local variables
+      !
+      !-----------------------------------------------------------------
+
+      integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
+      real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum, invAreaCell
+
+      integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree
+
+
+      real (kind=RKIND), dimension(:), pointer :: &amp;
+        dvEdge, areaCell, vertCoordMovementWeights
+      real (kind=RKIND), dimension(:), allocatable:: div_hu, h_tend_col
+      real (kind=RKIND) :: div_hu_btr
+
+      integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &amp;
+        verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &amp;
+        boundaryEdge, boundaryCell, edgeSignOnCell
+      integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &amp;
+        maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &amp;
+        maxLevelVertexBot,  maxLevelVertexTop
+
+      err = 0
+
+      nEdgesOnCell      =&gt; grid % nEdgesOnCell % array
+      areaCell          =&gt; grid % areaCell % array
+      cellsOnEdge       =&gt; grid % cellsOnEdge % array
+      edgesOnCell       =&gt; grid % edgesOnCell % array
+      edgeSignOnCell    =&gt; grid % edgeSignOnCell % array
+      maxLevelCell      =&gt; grid % maxLevelCell % array
+      maxLevelEdgeBot   =&gt; grid % maxLevelEdgeBot % array
+      dvEdge            =&gt; grid % dvEdge % array
+      vertCoordMovementWeights =&gt; grid % vertCoordMovementWeights % array
+
+      nCells      = grid % nCells
+      nEdges      = grid % nEdges
+      nVertLevels = grid % nVertLevels
+
+
+      if (config_vert_coord_movement.eq.'isopycnal') then
+        ! set vertical transport to zero in isopycnal case
+        wTop=0.0
+        return
+      end if
+
+      allocate(div_hu(nVertLevels), h_tend_col(nVertLevels))
+
+      !
+      ! Compute div(h^{edge} u) for each cell
+      ! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
+      !
+
+      do iCell=1,nCells
+        div_hu(:) = 0.0
+        div_hu_btr = 0.0
+        hSum = 0.0
+        invAreaCell = 1.0 / areaCell(iCell)
+
+        do i = 1, nEdgesOnCell(iCell)
+          iEdge = edgesOnCell(i, iCell)
+
+          do k = 1, maxLevelEdgeBot(iEdge)
+            flux = h_edge(k, iEdge) * u(k, iEdge) * dvEdge(iEdge) * edgeSignOnCell(i, iCell) * invAreaCell
+            div_hu(k) = div_hu(k) - flux
+            div_hu_btr = div_hu_btr - flux
+          end do
+        end do
+
+        do k = 1, maxLevelCell(iCell)
+           h_tend_col(k) = - vertCoordMovementWeights(k) * h(k, iCell) * div_hu_btr
+           hSum = hSum + vertCoordMovementWeights(k) * h(k, iCell)
+        end do
+
+        if(hSum &gt; 0.0) then
+           h_tend_col = h_tend_col / hSum
+        end if
+
+        ! Vertical transport through layer interface at top and bottom is zero.
+        wTop(1,iCell) = 0.0
+        wTop(maxLevelCell(iCell)+1,iCell) = 0.0
+        do k=maxLevelCell(iCell),2,-1
+           wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k) - h_tend_col(k)
+        end do
+      end do
+
+      deallocate(div_hu, h_tend_col)
+
+   end subroutine ocn_wtop!}}}
+
+!***********************************************************************
+!
+!  routine ocn_fuperp
+!
+!&gt; \brief   Computes f u_perp
+!&gt; \author  Mark Petersen
+!&gt; \date    23 September 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine computes f u_perp for the ocean
+!
+!-----------------------------------------------------------------------
+
+   subroutine ocn_fuperp(s, grid)!{{{
+      implicit none
+
+      type (state_type), intent(inout) :: s !&lt; Input/Output: State information
+      type (mesh_type), intent(in) :: grid !&lt; Input: Grid information
+
+! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
+!  Some of these variables can be removed, but at a later time.
+      integer :: iEdge, cell1, cell2, eoe, i, j, k
+
+      integer :: nEdgesSolve
+      real (kind=RKIND), dimension(:), pointer :: fEdge
+      real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, u, uBcl
+      type (dm_info) :: dminfo
+
+      integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnEdge
+      integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnEdge
+
+      call mpas_timer_start(&quot;ocn_fuperp&quot;)
+
+      u           =&gt; s % u % array
+      uBcl        =&gt; s % uBcl % array
+      weightsOnEdge     =&gt; grid % weightsOnEdge % array
+      fEdge             =&gt; grid % fEdge % array
+      maxLevelEdgeTop      =&gt; grid % maxLevelEdgeTop % array
+      cellsOnEdge       =&gt; grid % cellsOnEdge % array
+      nEdgesOnEdge      =&gt; grid % nEdgesOnEdge % array
+      edgesOnEdge       =&gt; grid % edgesOnEdge % array
+
+      fEdge       =&gt; grid % fEdge % array
+
+      nEdgesSolve = grid % nEdgesSolve
+
+      !
+      ! Put f*uBcl^{perp} in u as a work variable
+      !
+      do iEdge=1,nEdgesSolve
+         cell1 = cellsOnEdge(1,iEdge)
+         cell2 = cellsOnEdge(2,iEdge)
+
+         do k=1,maxLevelEdgeTop(iEdge)
+
+            u(k,iEdge) = 0.0
+            do j = 1,nEdgesOnEdge(iEdge)
+               eoe = edgesOnEdge(j,iEdge)
+               u(k,iEdge) = u(k,iEdge) + weightsOnEdge(j,iEdge) * uBcl(k,eoe) * fEdge(eoe) 
+            end do
+         end do
+      end do
+
+      call mpas_timer_stop(&quot;ocn_fuperp&quot;)
+
+   end subroutine ocn_fuperp!}}}
+
+!***********************************************************************
+!
+!  routine ocn_filter_btr_mode_u
+!
+!&gt; \brief   filters barotropic mode out of the velocity variable.
+!&gt; \author  Mark Petersen
+!&gt; \date    23 September 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine filters barotropic mode out of the velocity variable.
+!
+!-----------------------------------------------------------------------
+   subroutine ocn_filter_btr_mode_u(s, grid)!{{{
+      implicit none
+
+      type (state_type), intent(inout) :: s
+      type (mesh_type), intent(in) :: grid
+
+      integer :: iEdge, k, nEdges
+      real (kind=RKIND) :: vertSum, uhSum, hSum
+      real (kind=RKIND), dimension(:,:), pointer :: h_edge, u
+      integer, dimension(:), pointer :: maxLevelEdgeTop
+
+      call mpas_timer_start(&quot;ocn_filter_btr_mode_u&quot;)
+
+      u           =&gt; s % u % array
+      h_edge      =&gt; s % h_edge % array
+      maxLevelEdgeTop =&gt; grid % maxLevelEdgeTop % array
+      nEdges      = grid % nEdges
+
+      do iEdge=1,nEdges
+
+        ! hSum is initialized outside the loop because on land boundaries 
+        ! maxLevelEdgeTop=0, but I want to initialize hSum with a 
+        ! nonzero value to avoid a NaN.
+        uhSum = h_edge(1,iEdge) * u(1,iEdge)
+        hSum  = h_edge(1,iEdge)
+
+        do k=2,maxLevelEdgeTop(iEdge)
+          uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
+          hSum  =  hSum + h_edge(k,iEdge)
+        enddo
+
+        vertSum = uhSum/hSum
+        do k=1,maxLevelEdgeTop(iEdge)
+          u(k,iEdge) = u(k,iEdge) - vertSum
+        enddo
+      enddo ! iEdge
+
+      call mpas_timer_stop(&quot;ocn_filter_btr_mode_u&quot;)
+
+   end subroutine ocn_filter_btr_mode_u!}}}
+
+!***********************************************************************
+!
+!  routine ocn_filter_btr_mode_tend_u
+!
+!&gt; \brief   ocn_filters barotropic mode out of the u tendency
+!&gt; \author  Mark Petersen
+!&gt; \date    23 September 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine filters barotropic mode out of the u tendency.
+!
+!-----------------------------------------------------------------------
+   subroutine ocn_filter_btr_mode_tend_u(tend, s, grid)!{{{
+      implicit none
+
+      type (tend_type), intent(inout) :: tend
+      type (state_type), intent(in) :: s
+      type (mesh_type), intent(in) :: grid
+
+      integer :: iEdge, k, nEdges
+      real (kind=RKIND) :: vertSum, uhSum, hSum
+      real (kind=RKIND), dimension(:,:), pointer :: h_edge, tend_u
+
+      integer, dimension(:), pointer :: maxLevelEdgeTop
+
+      call mpas_timer_start(&quot;ocn_filter_btr_mode_tend_u&quot;)
+
+      tend_u      =&gt; tend % u % array
+      h_edge      =&gt; s % h_edge % array
+      maxLevelEdgeTop =&gt; grid % maxLevelEdgeTop % array
+      nEdges      = grid % nEdges
+
+      do iEdge=1,nEdges
+
+        ! hSum is initialized outside the loop because on land boundaries 
+        ! maxLevelEdgeTop=0, but I want to initialize hSum with a 
+        ! nonzero value to avoid a NaN.
+        uhSum = h_edge(1,iEdge) * tend_u(1,iEdge)
+        hSum  = h_edge(1,iEdge)
+
+        do k=2,maxLevelEdgeTop(iEdge)
+          uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
+          hSum  =  hSum + h_edge(k,iEdge)
+        enddo
+
+        vertSum = uhSum/hSum
+        do k=1,maxLevelEdgeTop(iEdge)
+          tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
+        enddo
+      enddo ! iEdge
+
+      call mpas_timer_stop(&quot;ocn_filter_btr_mode_tend_u&quot;)
+
+   end subroutine ocn_filter_btr_mode_tend_u!}}}
+
+!***********************************************************************
+!
+!  routine ocn_diagnostics_init
+!
+!&gt; \brief   Initializes flags used within diagnostics routines.
+!&gt; \author  Mark Petersen
+!&gt; \date    4 November 2011
+!&gt; \version SVN:$Id$
+!&gt; \details 
+!&gt;  This routine initializes flags related to quantities computed within
+!&gt;  other diagnostics routines.
+!
+!-----------------------------------------------------------------------
+    subroutine ocn_diagnostics_init(err)!{{{
+        integer, intent(out) :: err !&lt; Output: Error flag
+
+        err = 0
+
+        if(config_include_KE_vertex) then
+            ke_vertex_flag = 1
+            ke_cell_flag = 0
+        else
+            ke_vertex_flag = 0
+            ke_cell_flag = 1
+        endif
+
+        if (trim(config_time_integrator) == 'RK4') then
+            ! for RK4, PV is really PV = (eta+f)/h
+            fCoef = 1
+        elseif (trim(config_time_integrator) == 'split_explicit' &amp;
+          .or.trim(config_time_integrator) == 'unsplit_explicit') then
+            ! for split explicit, PV is eta/h because f is added separately to the momentum forcing.
+            ! mrp temp, new should be:
+            fCoef = 0
+            ! old, for testing:
+            !         fCoef = 1
+        end if
+
+    end subroutine ocn_diagnostics_init!}}}
+
+!***********************************************************************
+
+end module ocn_diagnostics
+
+!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+! vim: foldmethod=marker

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_equation_of_state_linear.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -82,27 +82,21 @@
       integer, intent(in) :: indexT, indexS
       integer, intent(out) :: err
 
-      real (kind=RKIND), parameter :: rho_ref = 1025.022 ! kg / m^3
-      real (kind=RKIND), parameter :: alpha =  2.55e-1 ! kg / m^3 / K (dT/dRho)
-      real (kind=RKIND), parameter :: beta = 7.64e-1 ! kg / m^3 / psu (dS/dRho)
-      real (kind=RKIND), parameter :: T_ref = 19.0 ! K
-      real (kind=RKIND), parameter :: S_ref = 35.0 ! psu
-      real (kind=RKIND), parameter :: rho_prime_ref = rho_ref + alpha * T_ref - beta * S_ref
-
       integer, dimension(:), pointer :: maxLevelCell
       integer :: nCells, iCell, k
       type (dm_info) :: dminfo
 
-      maxLevelCell      =&gt; grid % maxLevelCell % array
-      nCells      = grid % nCells
+      maxLevelCell  =&gt; grid % maxLevelCell % array
+      nCells        =  grid % nCells
 
       err = 0
 
       do iCell=1,nCells
          do k=1,maxLevelCell(iCell)
             ! Linear equation of state
-            ! rho = rho_ref - alpha * (T - T_ref) + beta * (S - S_ref)
-            rho(k,iCell) = rho_prime_ref - alpha*tracers(indexT,k,iCell) + beta*tracers(indexS,k,iCell)
+            rho(k,iCell) =  config_eos_linear_rhoref &amp;
+                  - config_eos_linear_alpha * (tracers(indexT,k,iCell)-config_eos_linear_Tref) &amp;
+                  + config_eos_linear_beta  * (tracers(indexS,k,iCell)-config_eos_linear_Sref)
          end do
       end do
 

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_mpas_core.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_mpas_core.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -10,6 +10,7 @@
    use ocn_test_cases
    use ocn_time_integration
    use ocn_tendency
+   use ocn_diagnostics
 
    use ocn_monthly_forcing
 
@@ -102,6 +103,8 @@
 
       call ocn_tendency_init(err_tmp)
       err = ior(err,err_tmp)
+      call ocn_diagnostics_init(err_tmp)
+      err = ior(err,err_tmp)
 
       call mpas_ocn_tracer_advection_init(err_tmp)
       err = ior(err,err_tmp)
@@ -1023,7 +1026,7 @@
       meshScalingDel2(:) = 1.0
       meshScalingDel4(:) = 1.0
       meshScaling(:)     = 1.0
-      if (config_h_ScaleWithMesh) then
+      if (config_hmix_ScaleWithMesh) then
          do iEdge=1,mesh%nEdges
             cell1 = mesh % cellsOnEdge % array(1,iEdge)
             cell2 = mesh % cellsOnEdge % array(2,iEdge)

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tendency.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tendency.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -8,9 +8,7 @@
 !&gt; \version SVN:$Id:$
 !&gt; \details
 !&gt;  This module contains the routines for computing
-!&gt;  various tendencies for the ocean. As well as routines
-!&gt;  for computing diagnostic variables, and other quantities
-!&gt;  such as wTop.
+!&gt;  tendency terms for the ocean primitive equations.
 !
 !-----------------------------------------------------------------------
 
@@ -25,29 +23,23 @@
 
    use ocn_thick_hadv
    use ocn_thick_vadv
-   use ocn_gm
 
    use ocn_vel_coriolis
    use ocn_vel_pressure_grad
    use ocn_vel_vadv
    use ocn_vel_hmix
    use ocn_vel_forcing
+   use ocn_vmix
 
    use ocn_tracer_hadv
    use ocn_tracer_vadv
    use ocn_tracer_hmix
    use ocn_restoring
 
-   use ocn_equation_of_state
-   use ocn_vmix
-
-   use ocn_time_average
-
    implicit none
    private
    save
 
-   type (timer_node), pointer :: diagEOSTimer
    type (timer_node), pointer :: thickHadvTimer, thickVadvTimer
    type (timer_node), pointer :: velCorTimer, velVadvTimer, velPgradTimer, velHmixTimer, velForceTimer, velExpVmixTimer
    type (timer_node), pointer :: tracerHadvTimer, tracerVadvTimer, tracerHmixTimer, tracerExpVmixTimer, tracerRestoringTimer
@@ -66,13 +58,8 @@
 
    public :: ocn_tend_h, &amp;
              ocn_tend_u, &amp;
-             ocn_tend_scalar, &amp;
-             ocn_diagnostic_solve, &amp;
-             ocn_wtop, &amp;
-             ocn_fuperp, &amp;
-             ocn_tendency_init, &amp;
-             ocn_filter_btr_mode_u, &amp;
-             ocn_filter_btr_mode_tend_u
+             ocn_tend_tracer, &amp;
+             ocn_tendency_init
 
    !--------------------------------------------------------------------
    !
@@ -80,10 +67,6 @@
    !
    !--------------------------------------------------------------------
 
-   integer :: ke_cell_flag, ke_vertex_flag
-   real (kind=RKIND) ::  coef_3rd_order, fCoef
-
-
 !***********************************************************************
 
 contains
@@ -238,7 +221,7 @@
       !
       ! velocity tendency: del2 dissipation, </font>
<font color="black">u_2 </font>
<font color="black">abla^2 u
       !   computed as </font>
<font color="black">u( </font>
<font color="black">abla divergence + k \times </font>
<font color="gray">abla vorticity )
-      !   strictly only valid for config_h_mom_eddy_visc2 == constant
+      !   strictly only valid for config_mom_del2 == constant
       !
       call mpas_timer_start(&quot;hmix&quot;, .false., velHmixTimer)
       call ocn_vel_hmix_tend(grid, divergence, vorticity, viscosity, tend_u, err)
@@ -268,17 +251,17 @@
 
 !***********************************************************************
 !
-!  routine ocn_tendSalar
+!  routine ocn_tend_tracer
 !
-!&gt; \brief   Computes scalar tendency
+!&gt; \brief   Computes tracer tendency
 !&gt; \author  Doug Jacobsen
 !&gt; \date    23 September 2011
 !&gt; \version SVN:$Id$
 !&gt; \details 
-!&gt;  This routine computes the scalar (tracer) tendency for the ocean
+!&gt;  This routine computes tracer tendencies for the ocean
 !
 !-----------------------------------------------------------------------
-   subroutine ocn_tend_scalar(tend, s, d, grid, dt)!{{{
+   subroutine ocn_tend_tracer(tend, s, d, grid, dt)!{{{
       implicit none
 
       type (tend_type), intent(inout) :: tend !&lt; Input/Output: Tendency structure
@@ -294,7 +277,7 @@
 
       integer :: err, iEdge, k
 
-      call mpas_timer_start(&quot;ocn_tend_scalar&quot;)
+      call mpas_timer_start(&quot;ocn_tend_tracer&quot;)
 
       uTransport  =&gt; s % uTransport % array
       h           =&gt; s % h % array
@@ -376,723 +359,14 @@
       call mpas_timer_stop(&quot;restoring&quot;, tracerRestoringTimer)
 
  10   format(2i8,10e20.10)
-      call mpas_timer_stop(&quot;ocn_tend_scalar&quot;)
+      call mpas_timer_stop(&quot;ocn_tend_tracer&quot;)
 
       deallocate(uh)
 
-   end subroutine ocn_tend_scalar!}}}
+   end subroutine ocn_tend_tracer!}}}
 
 !***********************************************************************
 !
-!  routine ocn_diagnostic_solve
-!
-!&gt; \brief   Computes diagnostic variables
-!&gt; \author  Doug Jacobsen
-!&gt; \date    23 September 2011
-!&gt; \version SVN:$Id$
-!&gt; \details 
-!&gt;  This routine computes the diagnostic variables for the ocean
-!
-!-----------------------------------------------------------------------
-
-   subroutine ocn_diagnostic_solve(dt, s, grid)!{{{
-      implicit none
-
-      real (kind=RKIND), intent(in) :: dt !&lt; Input: Time step
-      type (state_type), intent(inout) :: s !&lt; Input/Output: State information
-      type (mesh_type), intent(in) :: grid !&lt; Input: Grid information
-
-
-      integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j
-      integer :: boundaryMask, velMask, nCells, nEdges, nVertices, nVertLevels, vertexDegree, err
-
-      integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &amp;
-        maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &amp;
-        maxLevelVertexBot
-      integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &amp;
-        verticesOnEdge, edgesOnEdge, edgesOnVertex,boundaryCell, kiteIndexOnCell, verticesOnCell, edgeSignOnVertex, edgeSignOnCell, edgesOnCell
-
-      real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2, coef_3rd_order, r_tmp, invAreaCell1, invAreaCell2, invAreaTri1, invAreaTri2, invLength, h_vertex
-
-      real (kind=RKIND), dimension(:), allocatable:: pTop
-
-      real (kind=RKIND), dimension(:), pointer :: &amp;
-        bottomDepth, fVertex, dvEdge, dcEdge, areaCell, areaTriangle, ssh
-      real (kind=RKIND), dimension(:,:), pointer :: &amp;
-        weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure,&amp;
-        circulation, vorticity, ke, ke_edge, MontPot, wTop, zMid, &amp;
-        Vor_edge, Vor_vertex, Vor_cell, gradVor_n, gradVor_t, divergence, &amp;
-        rho, temperature, salinity, kev, kevc, uBolusGM, uTransport
-      real (kind=RKIND), dimension(:,:,:), pointer :: tracers, deriv_two
-      real (kind=RKIND), dimension(:,:), allocatable:: div_u
-      character :: c1*6
-
-      h           =&gt; s % h % array
-      u           =&gt; s % u % array
-      uTransport  =&gt; s % uTransport % array
-      uBolusGM    =&gt; s % uBolusGM % array
-      v           =&gt; s % v % array
-      h_edge      =&gt; s % h_edge % array
-      circulation =&gt; s % circulation % array
-      vorticity   =&gt; s % vorticity % array
-      divergence  =&gt; s % divergence % array
-      ke          =&gt; s % ke % array
-      kev         =&gt; s % kev % array
-      kevc        =&gt; s % kevc % array
-      ke_edge     =&gt; s % ke_edge % array
-      Vor_edge    =&gt; s % Vor_edge % array
-      Vor_vertex  =&gt; s % Vor_vertex % array
-      Vor_cell    =&gt; s % Vor_cell % array
-      gradVor_n   =&gt; s % gradVor_n % array
-      gradVor_t   =&gt; s % gradVor_t % array
-      rho         =&gt; s % rho % array
-      MontPot     =&gt; s % MontPot % array
-      pressure    =&gt; s % pressure % array
-      zMid        =&gt; s % zMid % array
-      ssh         =&gt; s % ssh % array
-      tracers     =&gt; s % tracers % array
-
-      weightsOnEdge     =&gt; grid % weightsOnEdge % array
-      kiteAreasOnVertex =&gt; grid % kiteAreasOnVertex % array
-      cellsOnEdge       =&gt; grid % cellsOnEdge % array
-      cellsOnVertex     =&gt; grid % cellsOnVertex % array
-      verticesOnEdge    =&gt; grid % verticesOnEdge % array
-      nEdgesOnCell      =&gt; grid % nEdgesOnCell % array
-      nEdgesOnEdge      =&gt; grid % nEdgesOnEdge % array
-      edgesOnCell       =&gt; grid % edgesOnCell % array
-      edgesOnEdge       =&gt; grid % edgesOnEdge % array
-      edgesOnVertex     =&gt; grid % edgesOnVertex % array
-      dcEdge            =&gt; grid % dcEdge % array
-      dvEdge            =&gt; grid % dvEdge % array
-      areaCell          =&gt; grid % areaCell % array
-      areaTriangle      =&gt; grid % areaTriangle % array
-      bottomDepth       =&gt; grid % bottomDepth % array
-      fVertex           =&gt; grid % fVertex % array
-      deriv_two         =&gt; grid % deriv_two % array
-      maxLevelCell      =&gt; grid % maxLevelCell % array
-      maxLevelEdgeTop   =&gt; grid % maxLevelEdgeTop % array
-      maxLevelEdgeBot   =&gt; grid % maxLevelEdgeBot % array
-      maxLevelVertexBot =&gt; grid % maxLevelVertexBot % array
-      kiteIndexOnCell =&gt; grid % kiteIndexOnCell % array
-      verticesOnCell =&gt; grid % verticesOnCell % array
-                  
-      nCells      = grid % nCells
-      nEdges      = grid % nEdges
-      nVertices   = grid % nVertices
-      nVertLevels = grid % nVertLevels
-      vertexDegree = grid % vertexDegree
-
-      boundaryCell =&gt; grid % boundaryCell % array
-
-      edgeSignOnVertex =&gt; grid % edgeSignOnVertex % array
-      edgeSignOnCell =&gt; grid % edgeSignOnCell % array
-
-
-      !
-      ! Compute height on cell edges at velocity locations
-      !   Namelist options control the order of accuracy of the reconstructed h_edge value
-      !
-      ! mrp 101115 note: in order to include flux boundary conditions, we will need to 
-      ! assign h_edge for maxLevelEdgeTop:maxLevelEdgeBot in the following section
-
-      ! initialize h_edge to avoid divide by zero and NaN problems.
-      h_edge = -1.0e34
-      coef_3rd_order = config_coef_3rd_order
-
-      do iEdge=1,nEdges
-         cell1 = cellsOnEdge(1,iEdge)
-         cell2 = cellsOnEdge(2,iEdge)
-         do k=1,maxLevelEdgeTop(iEdge)
-            h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
-         end do
-      end do
-
-      !
-      ! set the velocity and height at dummy address
-      !    used -1e34 so error clearly occurs if these values are used.
-      !
-      u(:,nEdges+1) = -1e34
-      h(:,nCells+1) = -1e34
-      tracers(s % index_temperature,:,nCells+1) = -1e34
-      tracers(s % index_salinity,:,nCells+1) = -1e34
-
-      circulation(:,:) = 0.0
-      vorticity(:,:) = 0.0
-      divergence(:,:) = 0.0
-      ke(:,:) = 0.0
-      v(:,:) = 0.0
-      do iVertex = 1, nVertices
-         invAreaTri1 = 1.0 / areaTriangle(iVertex)
-         do i = 1, vertexDegree
-            iEdge = edgesOnVertex(i, iVertex)
-            do k = 1, maxLevelVertexBot(iVertex)
-              r_tmp = dcEdge(iEdge) * u(k, iEdge)
-
-              circulation(k, iVertex) = circulation(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp 
-              vorticity(k, iVertex) = vorticity(k, iVertex) + edgeSignOnVertex(i, iVertex) * r_tmp * invAreaTri1
-            end do
-         end do
-      end do
-
-      do iCell = 1, nCells
-         invAreaCell1 = 1.0 / areaCell(iCell)
-         do i = 1, nEdgesOnCell(iCell)
-            iEdge = edgesOnCell(i, iCell)
-            do k = 1, maxLevelCell(iCell)
-               r_tmp = dvEdge(iEdge) * u(k, iEdge) * invAreaCell1
-
-               divergence(k, iCell) = divergence(k, iCell) - edgeSignOnCell(i, iCell) * r_tmp
-               ke(k, iCell) = ke(k, iCell) + 0.25 * r_tmp * dcEdge(iEdge) * u(k,iEdge)
-            end do
-         end do
-      end do
-
-      do iEdge=1,nEdges
-         ! Compute v (tangential) velocities
-         do i=1,nEdgesOnEdge(iEdge)
-            eoe = edgesOnEdge(i,iEdge)
-            ! mrp 101115 note: in order to include flux boundary conditions,
-            ! the following loop may need to change to maxLevelEdgeBot
-            do k = 1,maxLevelEdgeTop(iEdge) 
-               v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
-            end do
-         end do
-
-      end do
-
-      !
-      ! Compute kinetic energy in each vertex
-      !
-      kev(:,:) = 0.0; kevc(:,:) = 0.0
-      do iVertex = 1, nVertices*ke_vertex_flag
-        do i = 1, vertexDegree
-          iEdge = edgesOnVertex(i, iVertex)
-          r_tmp = dcEdge(iEdge) * dvEdge(iEdge) * 0.25 / areaTriangle(iVertex)
-          do k = 1, nVertLevels
-            kev(k, iVertex) = kev(k, iVertex) + r_tmp * u(k, iEdge)**2
-          end do
-        end do
-      end do
-
-      do iCell = 1, nCells*ke_vertex_flag
-        invAreaCell1 = 1.0 / areaCell(iCell)
-        do i = 1, nEdgesOnCell(iCell)
-          j = kiteIndexOnCell(i, iCell)
-          iVertex = verticesOnCell(i, iCell)
-          do k = 1, nVertLevels
-            kevc(k, iCell) = kevc(k, iCell) + kiteAreasOnVertex(j, iVertex) * kev(k, iVertex) * invAreaCell1
-          end do
-        end do
-      end do
-
-      !
-      ! Compute kinetic energy in each cell by blending ke and kevc
-      !
-      do iCell=1,nCells*ke_vertex_flag
-         do k=1,nVertLevels
-            ke(k,iCell) = 5.0/8.0*ke(k,iCell) + 3.0/8.0*kevc(k,iCell)
-         end do
-      end do
-
-      !
-      ! Compute ke on cell edges at velocity locations for quadratic bottom drag. 
-      !
-      ! mrp 101025 efficiency note: we could get rid of ke_edge completely by 
-      ! using sqrt(u(k,iEdge)**2 + v(k,iEdge)**2) in its place elsewhere.
-      do iEdge=1,nEdges
-         cell1 = cellsOnEdge(1,iEdge)
-         cell2 = cellsOnEdge(2,iEdge)
-         do k=1,maxLevelEdgeTop(iEdge)
-            ke_edge(k,iEdge) = 0.5 * (ke(k,cell1) + ke(k,cell2))
-         end do
-      end do
-
-      !
-      ! Compute height at vertices, pv at vertices, and average pv to edge locations
-      !  ( this computes Vor_vertex at all vertices bounding real cells and distance-1 ghost cells )
-      !
-      do iVertex = 1,nVertices
-         invAreaTri1 = 1.0 / areaTriangle(iVertex)
-         do k=1,maxLevelVertexBot(iVertex)
-            h_vertex = 0.0
-            do i=1,vertexDegree
-               h_vertex = h_vertex + h(k,cellsOnVertex(i,iVertex)) * kiteAreasOnVertex(i,iVertex)
-            end do
-            h_vertex = h_vertex * invAreaTri1
-
-            Vor_vertex(k,iVertex) = (fCoef*fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
-         end do
-      end do
-
-      Vor_cell(:,:) = 0.0
-      Vor_edge(:,:) = 0.0
-      do iEdge = 1, nEdges
-        vertex1 = verticesOnEdge(1, iEdge)
-        vertex2 = verticesOnEdge(2, iEdge)
-        do k = 1, maxLevelEdgeBot(iEdge)
-          Vor_edge(k, iEdge) = 0.5 * (Vor_vertex(k, vertex1) + Vor_vertex(k, vertex2))
-        end do
-      end do
-
-      do iCell = 1, nCells
-        invAreaCell1 = 1.0 / areaCell(iCell)
-
-        do i = 1, nEdgesOnCell(iCell)
-          j = kiteIndexOnCell(i, iCell)
-          iVertex = verticesOnCell(i, iCell)
-          do k = 1, maxLevelCell(iCell)
-            Vor_cell(k, iCell) = Vor_cell(k, iCell) + kiteAreasOnVertex(j, iVertex) * Vor_vertex(k, iVertex) * invAreaCell1
-          end do
-        end do
-      end do
-
-      do iEdge = 1,nEdges
-         cell1 = cellsOnEdge(1, iEdge)
-         cell2 = cellsOnEdge(2, iEdge)
-         vertex1 = verticesOnedge(1, iEdge)
-         vertex2 = verticesOnedge(2, iEdge)
-
-         invLength = 1.0 / dcEdge(iEdge)
-         ! Compute gradient of PV in normal direction
-         !   ( this computes gradVor_n for all edges bounding real cells )
-         do k=1,maxLevelEdgeTop(iEdge)
-            gradVor_n(k,iEdge) = (Vor_cell(k,cell2) - Vor_cell(k,cell1)) * invLength
-         enddo
-
-         invLength = 1.0 / dvEdge(iEdge)
-         ! Compute gradient of PV in the tangent direction
-         !   ( this computes gradVor_t at all edges bounding real cells and distance-1 ghost cells )
-         do k = 1,maxLevelEdgeBot(iEdge)
-           gradVor_t(k,iEdge) = (Vor_vertex(k,vertex2) - Vor_vertex(k,vertex1)) * invLength
-         enddo
-
-      enddo
-
-      !
-      ! Modify PV edge with upstream bias.
-      !
-      do iEdge = 1,nEdges
-         do k = 1,maxLevelEdgeBot(iEdge)
-           Vor_edge(k,iEdge) = Vor_edge(k,iEdge) &amp;
-             - config_apvm_scale_factor * dt* (  u(k,iEdge) * gradVor_n(k,iEdge) &amp;
-                          + v(k,iEdge) * gradVor_t(k,iEdge) )
-         enddo
-      enddo
-
-      !
-      ! equation of state
-      !
-      ! For an isopycnal model, density should remain constant.
-      ! For zlevel, calculate in-situ density
-      if (config_vert_coord_movement.ne.'isopycnal') then
-         call mpas_timer_start(&quot;equation of state&quot;, .false., diagEOSTimer)
-         call ocn_equation_of_state_rho(s, grid, 0, 'relative', err)
-      ! mrp 110324 In order to visualize rhoDisplaced, include the following
-         call ocn_equation_of_state_rho(s, grid, 1, 'relative', err)
-         call mpas_timer_stop(&quot;equation of state&quot;, diagEOSTimer)
-      endif
-
-      !
-      ! Pressure
-      ! This section must be after computing rho
-      !
-      ! dwj: 10/25/2011 - Need to explore isopycnal vs zlevel flags
-      if (config_pressure_gradient_type.eq.'MontgomeryPotential') then
-
-        ! For Isopycnal model.
-        ! Compute pressure at top of each layer, and then
-        ! Montgomery Potential.
-        allocate(pTop(nVertLevels))
-        do iCell=1,nCells
-
-           ! assume atmospheric pressure at the surface is zero for now.
-           pTop(1) = 0.0
-           ! For isopycnal mode, p is the Montgomery Potential.
-           ! At top layer it is g*SSH, where SSH may be off by a 
-           ! constant (ie, bottomDepth can be relative to top or bottom)
-           MontPot(1,iCell) = gravity &amp;
-              * (bottomDepth(iCell) + sum(h(1:nVertLevels,iCell)))
-
-           do k=2,nVertLevels
-              pTop(k) = pTop(k-1) + rho(k-1,iCell)*gravity* h(k-1,iCell)
-
-              ! from delta M = p delta / rho
-              MontPot(k,iCell) = MontPot(k-1,iCell) &amp;
-                 + pTop(k)*(1.0/rho(k,iCell) - 1.0/rho(k-1,iCell)) 
-           end do
-
-        end do
-        deallocate(pTop)
-
-      else
-
-        do iCell=1,nCells
-           ! pressure for generalized coordinates
-           ! assume atmospheric pressure at the surface is zero for now.
-           pressure(1,iCell) = rho(1,iCell)*gravity &amp;
-              * 0.5*h(1,iCell)
-
-           do k=2,maxLevelCell(iCell)
-              pressure(k,iCell) = pressure(k-1,iCell)  &amp;
-                + 0.5*gravity*(  rho(k-1,iCell)*h(k-1,iCell) &amp;
-                               + rho(k  ,iCell)*h(k  ,iCell))
-           end do
-
-           ! Compute zMid, the z-coordinate of the middle of the layer.
-           ! This is used for the rho g grad z momentum term.
-           ! Note the negative sign, since bottomDepth is positive
-           ! and z-coordinates are negative below the surface.
-           k = maxLevelCell(iCell)
-           zMid(k:nVertLevels,iCell) = -bottomDepth(iCell) + 0.5*h(k,iCell)
-
-           do k=maxLevelCell(iCell)-1, 1, -1
-              zMid(k,iCell) = zMid(k+1,iCell)  &amp;
-                + 0.5*(  h(k+1,iCell) &amp;
-                       + h(k  ,iCell))
-           end do
-
-        end do
-
-      endif
-
-      !
-      ! Sea Surface Height
-      !
-      do iCell=1,nCells
-         ! Start at the bottom where we know the depth, and go up.
-         ! The bottom depth for this cell is bottomDepth(iCell).
-         ! Note the negative sign, since bottomDepth is positive
-         ! and z-coordinates are negative below the surface.
-
-         ssh(iCell) = - bottomDepth(iCell) + sum(h(1:maxLevelCell(iCell),iCell))
-
-      end do
-
-      !
-      ! Apply the GM closure as a bolus velocity
-      !
-      if (config_h_kappa .GE. epsilon(0D0)) then
-         call ocn_gm_compute_uBolus(s,grid)
-      else
-         ! mrp efficiency note: if uBolusGM is guaranteed to be zero, this can be removed.
-         uBolusGM = 0.0
-      end if
-
-   end subroutine ocn_diagnostic_solve!}}}
-
-!***********************************************************************
-!
-!  routine ocn_wtop
-!
-!&gt; \brief   Computes vertical velocity
-!&gt; \author  Doug Jacobsen
-!&gt; \date    23 September 2011
-!&gt; \version SVN:$Id$
-!&gt; \details 
-!&gt;  This routine computes the vertical velocity in the top layer for the ocean
-!
-!-----------------------------------------------------------------------
-   subroutine ocn_wtop(grid,h,h_edge,u,wTop, err)!{{{
-
-      !-----------------------------------------------------------------
-      !
-      ! input variables
-      !
-      !-----------------------------------------------------------------
-
-      type (mesh_type), intent(in) :: &amp;
-         grid          !&lt; Input: grid information
-
-      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
-         h    !&lt; Input: thickness
-
-      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
-         h_edge     !&lt; Input: h interpolated to an edge
-
-      real (kind=RKIND), dimension(:,:), intent(in) :: &amp;
-         u     !&lt; Input: velocity
-
-      !-----------------------------------------------------------------
-      !
-      ! output variables
-      !
-      !-----------------------------------------------------------------
-
-      real (kind=RKIND), dimension(:,:), intent(out) :: &amp;
-         wTop     !&lt; Output: vertical transport at top edge
-
-      integer, intent(out) :: err !&lt; Output: error flag
-
-      !-----------------------------------------------------------------
-      !
-      ! local variables
-      !
-      !-----------------------------------------------------------------
-
-      integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
-      real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum, invAreaCell
-
-      integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree
-
-
-      real (kind=RKIND), dimension(:), pointer :: &amp;
-        dvEdge, areaCell, vertCoordMovementWeights
-      real (kind=RKIND), dimension(:), allocatable:: div_hu, h_tend_col
-      real (kind=RKIND) :: div_hu_btr
-
-      integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &amp;
-        verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &amp;
-        boundaryEdge, boundaryCell, edgeSignOnCell
-      integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &amp;
-        maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &amp;
-        maxLevelVertexBot,  maxLevelVertexTop
-
-      err = 0
-
-      nEdgesOnCell      =&gt; grid % nEdgesOnCell % array
-      areaCell          =&gt; grid % areaCell % array
-      cellsOnEdge       =&gt; grid % cellsOnEdge % array
-      edgesOnCell       =&gt; grid % edgesOnCell % array
-      edgeSignOnCell    =&gt; grid % edgeSignOnCell % array
-      maxLevelCell      =&gt; grid % maxLevelCell % array
-      maxLevelEdgeBot   =&gt; grid % maxLevelEdgeBot % array
-      dvEdge            =&gt; grid % dvEdge % array
-      vertCoordMovementWeights =&gt; grid % vertCoordMovementWeights % array
-
-      nCells      = grid % nCells
-      nEdges      = grid % nEdges
-      nVertLevels = grid % nVertLevels
-
-
-      if (config_vert_coord_movement.eq.'isopycnal') then
-        ! set vertical velocity to zero in isopycnal case
-        wTop=0.0_RKIND
-        return
-      end if
-
-      allocate(div_hu(nVertLevels), h_tend_col(nVertLevels))
-
-      !
-      ! Compute div(h^{edge} u) for each cell
-      ! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
-      !
-
-      do iCell=1,nCells
-        div_hu(:) = 0.0_RKIND
-        div_hu_btr = 0.0_RKIND
-        hSum = 0.0_RKIND
-        invAreaCell = 1.0_RKIND / areaCell(iCell)
-
-        do i = 1, nEdgesOnCell(iCell)
-          iEdge = edgesOnCell(i, iCell)
-
-          do k = 1, maxLevelEdgeBot(iEdge)
-            flux = u(k, iEdge) * dvEdge(iEdge) * h_edge(k, iEdge)
-            flux = edgeSignOnCell(i, iCell) * flux * invAreaCell
-            div_hu(k) = div_hu(k) - flux
-            div_hu_btr = div_hu_btr - flux
-          end do
-        end do
-
-        do k = 1, maxLevelCell(iCell)
-           h_tend_col(k) = - vertCoordMovementWeights(k) * h(k, iCell) * div_hu_btr
-           hSum = hSum + vertCoordMovementWeights(k) * h(k, iCell)
-        end do
-
-        if(hSum &gt; 0.0) then
-           h_tend_col = h_tend_col / hSum
-        end if
-
-        ! Vertical velocity through layer interface at top and 
-        ! bottom is zero.
-        wTop(1,iCell) = 0.0_RKIND
-        wTop(maxLevelCell(iCell)+1,iCell) = 0.0_RKIND
-        do k=maxLevelCell(iCell),2,-1
-           wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k) - h_tend_col(k)
-        end do
-      end do
-
-      deallocate(div_hu, h_tend_col)
-
-   end subroutine ocn_wtop!}}}
-
-!***********************************************************************
-!
-!  routine ocn_fuperp
-!
-!&gt; \brief   Computes f u_perp
-!&gt; \author  Doug Jacobsen
-!&gt; \date    23 September 2011
-!&gt; \version SVN:$Id$
-!&gt; \details 
-!&gt;  This routine computes f u_perp for the ocean
-!
-!-----------------------------------------------------------------------
-
-   subroutine ocn_fuperp(s, grid)!{{{
-      implicit none
-
-      type (state_type), intent(inout) :: s !&lt; Input/Output: State information
-      type (mesh_type), intent(in) :: grid !&lt; Input: Grid information
-
-! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
-!  Some of these variables can be removed, but at a later time.
-      integer :: iEdge, cell1, cell2, eoe, i, j, k
-
-      integer :: nEdgesSolve
-      real (kind=RKIND), dimension(:), pointer :: fEdge
-      real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, u, uBcl
-      type (dm_info) :: dminfo
-
-      integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnEdge
-      integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnEdge
-
-      call mpas_timer_start(&quot;ocn_fuperp&quot;)
-
-      u           =&gt; s % u % array
-      uBcl        =&gt; s % uBcl % array
-      weightsOnEdge     =&gt; grid % weightsOnEdge % array
-      fEdge             =&gt; grid % fEdge % array
-      maxLevelEdgeTop      =&gt; grid % maxLevelEdgeTop % array
-      cellsOnEdge       =&gt; grid % cellsOnEdge % array
-      nEdgesOnEdge      =&gt; grid % nEdgesOnEdge % array
-      edgesOnEdge       =&gt; grid % edgesOnEdge % array
-
-      fEdge       =&gt; grid % fEdge % array
-
-      nEdgesSolve = grid % nEdgesSolve
-
-      !
-      ! Put f*uBcl^{perp} in u as a work variable
-      !
-      do iEdge=1,nEdgesSolve
-         cell1 = cellsOnEdge(1,iEdge)
-         cell2 = cellsOnEdge(2,iEdge)
-
-         do k=1,maxLevelEdgeTop(iEdge)
-
-            u(k,iEdge) = 0.0
-            do j = 1,nEdgesOnEdge(iEdge)
-               eoe = edgesOnEdge(j,iEdge)
-               u(k,iEdge) = u(k,iEdge) + weightsOnEdge(j,iEdge) * uBcl(k,eoe) * fEdge(eoe) 
-            end do
-         end do
-      end do
-
-      call mpas_timer_stop(&quot;ocn_fuperp&quot;)
-
-   end subroutine ocn_fuperp!}}}
-
-!***********************************************************************
-!
-!  routine ocn_filter_btr_mode_u
-!
-!&gt; \brief   filters barotropic mode out of the velocity variable.
-!&gt; \author  Mark Petersen
-!&gt; \date    23 September 2011
-!&gt; \version SVN:$Id$
-!&gt; \details 
-!&gt;  This routine filters barotropic mode out of the velocity variable.
-!
-!-----------------------------------------------------------------------
-   subroutine ocn_filter_btr_mode_u(s, grid)!{{{
-      implicit none
-
-      type (state_type), intent(inout) :: s
-      type (mesh_type), intent(in) :: grid
-
-      integer :: iEdge, k, nEdges
-      real (kind=RKIND) :: vertSum, uhSum, hSum
-      real (kind=RKIND), dimension(:,:), pointer :: h_edge, u
-      integer, dimension(:), pointer :: maxLevelEdgeTop
-
-      call mpas_timer_start(&quot;ocn_filter_btr_mode_u&quot;)
-
-      u           =&gt; s % u % array
-      h_edge      =&gt; s % h_edge % array
-      maxLevelEdgeTop =&gt; grid % maxLevelEdgeTop % array
-      nEdges      = grid % nEdges
-
-      do iEdge=1,nEdges
-
-        ! hSum is initialized outside the loop because on land boundaries 
-        ! maxLevelEdgeTop=0, but I want to initialize hSum with a 
-        ! nonzero value to avoid a NaN.
-        uhSum = h_edge(1,iEdge) * u(1,iEdge)
-        hSum  = h_edge(1,iEdge)
-
-        do k=2,maxLevelEdgeTop(iEdge)
-          uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
-          hSum  =  hSum + h_edge(k,iEdge)
-        enddo
-
-        vertSum = uhSum/hSum
-        do k=1,maxLevelEdgeTop(iEdge)
-          u(k,iEdge) = u(k,iEdge) - vertSum
-        enddo
-      enddo ! iEdge
-
-      call mpas_timer_stop(&quot;ocn_filter_btr_mode_u&quot;)
-
-   end subroutine ocn_filter_btr_mode_u!}}}
-
-!***********************************************************************
-!
-!  routine ocn_filter_btr_mode_tend_u
-!
-!&gt; \brief   ocn_filters barotropic mode out of the u tendency
-!&gt; \author  Mark Petersen
-!&gt; \date    23 September 2011
-!&gt; \version SVN:$Id$
-!&gt; \details 
-!&gt;  This routine filters barotropic mode out of the u tendency.
-!
-!-----------------------------------------------------------------------
-   subroutine ocn_filter_btr_mode_tend_u(tend, s, grid)!{{{
-      implicit none
-
-      type (tend_type), intent(inout) :: tend
-      type (state_type), intent(in) :: s
-      type (mesh_type), intent(in) :: grid
-
-      integer :: iEdge, k, nEdges
-      real (kind=RKIND) :: vertSum, uhSum, hSum
-      real (kind=RKIND), dimension(:,:), pointer :: h_edge, tend_u
-
-      integer, dimension(:), pointer :: maxLevelEdgeTop
-
-      call mpas_timer_start(&quot;ocn_filter_btr_mode_tend_u&quot;)
-
-      tend_u      =&gt; tend % u % array
-      h_edge      =&gt; s % h_edge % array
-      maxLevelEdgeTop =&gt; grid % maxLevelEdgeTop % array
-      nEdges      = grid % nEdges
-
-      do iEdge=1,nEdges
-
-        ! hSum is initialized outside the loop because on land boundaries 
-        ! maxLevelEdgeTop=0, but I want to initialize hSum with a 
-        ! nonzero value to avoid a NaN.
-        uhSum = h_edge(1,iEdge) * tend_u(1,iEdge)
-        hSum  = h_edge(1,iEdge)
-
-        do k=2,maxLevelEdgeTop(iEdge)
-          uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
-          hSum  =  hSum + h_edge(k,iEdge)
-        enddo
-
-        vertSum = uhSum/hSum
-        do k=1,maxLevelEdgeTop(iEdge)
-          tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
-        enddo
-      enddo ! iEdge
-
-      call mpas_timer_stop(&quot;ocn_filter_btr_mode_tend_u&quot;)
-
-   end subroutine ocn_filter_btr_mode_tend_u!}}}
-
-!***********************************************************************
-!
 !  routine ocn_tendency_init
 !
 !&gt; \brief   Initializes flags used within tendency routines.
@@ -1109,28 +383,6 @@
 
         err = 0
 
-        coef_3rd_order = 0.
-
-        if(config_include_KE_vertex) then
-            ke_vertex_flag = 1
-            ke_cell_flag = 0
-        else
-            ke_vertex_flag = 0
-            ke_cell_flag = 1
-        endif
-
-        if (trim(config_time_integrator) == 'RK4') then
-            ! for RK4, PV is really PV = (eta+f)/h
-            fCoef = 1
-        elseif (trim(config_time_integrator) == 'split_explicit' &amp;
-          .or.trim(config_time_integrator) == 'unsplit_explicit') then
-            ! for split explicit, PV is eta/h because f is added separately to the momentum forcing.
-            ! mrp temp, new should be:
-            fCoef = 0
-            ! old, for testing:
-            !         fCoef = 1
-        end if
-
     end subroutine ocn_tendency_init!}}}
 
 !***********************************************************************

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_average.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_average.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_average.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -15,7 +15,7 @@
 
         real (kind=RKIND), dimension(:), pointer :: acc_ssh, acc_sshVar
         real (kind=RKIND), dimension(:,:), pointer :: acc_uReconstructZonal, acc_uReconstructMeridional, acc_uReconstructZonalVar, acc_uReconstructMeridionalVar
-        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar
+        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar, acc_vertVelocityTop
 
         nAccumulate =&gt; state % nAccumulate % scalar
 
@@ -27,6 +27,7 @@
         acc_uReconstructMeridionalVar =&gt; state % acc_uReconstructMeridionalVar % array
         acc_u =&gt; state % acc_u % array
         acc_uVar =&gt; state % acc_uVar % array
+        acc_vertVelocityTop =&gt; state % acc_vertVelocityTop % array
 
         nAccumulate = 0
 
@@ -38,6 +39,7 @@
         acc_uReconstructMeridionalVar = 0.0
         acc_u = 0.0
         acc_uVar = 0.0
+        acc_vertVelocityTop = 0.0
 
     end subroutine ocn_time_average_init!}}}
 
@@ -48,13 +50,13 @@
         real (kind=RKIND), pointer :: nAccumulate, old_nAccumulate
 
         real (kind=RKIND), dimension(:), pointer :: ssh
-        real (kind=RKIND), dimension(:,:), pointer :: uReconstructZonal, uReconstructMeridional, u
+        real (kind=RKIND), dimension(:,:), pointer :: uReconstructZonal, uReconstructMeridional, u, vertVelocityTop
 
-        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar
+        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar, acc_vertVelocityTop
         real (kind=RKIND), dimension(:,:), pointer :: acc_uReconstructZonal, acc_uReconstructMeridional, acc_uReconstructZonalVar, acc_uReconstructMeridionalVar
         real (kind=RKIND), dimension(:), pointer :: acc_ssh, acc_sshVar
 
-        real (kind=RKIND), dimension(:,:), pointer :: old_acc_u, old_acc_uVar
+        real (kind=RKIND), dimension(:,:), pointer :: old_acc_u, old_acc_uVar, old_acc_vertVelocityTop
         real (kind=RKIND), dimension(:,:), pointer :: old_acc_uReconstructZonal, old_acc_uReconstructMeridional, old_acc_uReconstructZonalVar, old_acc_uReconstructMeridionalVar
         real (kind=RKIND), dimension(:), pointer :: old_acc_ssh, old_acc_sshVar
 
@@ -65,6 +67,7 @@
         uReconstructZonal =&gt; state % uReconstructZonal % array
         uReconstructMeridional =&gt; state % uReconstructMeridional % array
         u =&gt; state % u % array
+        vertVelocityTop =&gt; state % vertVelocityTop % array
 
         acc_ssh =&gt; state % acc_ssh % array
         acc_sshVar =&gt; state % acc_sshVar % array
@@ -74,6 +77,7 @@
         acc_uReconstructMeridionalVar =&gt; state % acc_uReconstructMeridionalVar % array
         acc_u =&gt; state % acc_u % array
         acc_uVar =&gt; state % acc_uVar % array
+        acc_vertVelocityTop =&gt; state % acc_vertVelocityTop % array
 
         old_acc_ssh =&gt; old_state % acc_ssh % array
         old_acc_sshVar =&gt; old_state % acc_sshVar % array
@@ -83,6 +87,7 @@
         old_acc_uReconstructMeridionalVar =&gt; old_state % acc_uReconstructMeridionalVar % array
         old_acc_u =&gt; old_state % acc_u % array
         old_acc_uVar =&gt; old_state % acc_uVar % array
+        old_acc_vertVelocityTop =&gt; old_state % acc_vertVelocityTop % array
 
         acc_ssh = old_acc_ssh + ssh
         acc_sshVar = old_acc_sshVar + ssh**2
@@ -92,6 +97,7 @@
         acc_uReconstructMeridionalVar = old_acc_uReconstructMeridionalVar + uReconstructMeridional**2
         acc_u = old_acc_u + u
         acc_uVar = old_acc_uVar + u**2
+        acc_vertVelocityTop = old_acc_vertVelocityTop + vertVelocityTop
 
         nAccumulate = old_nAccumulate + 1
     end subroutine ocn_time_average_accumulate!}}}
@@ -103,7 +109,7 @@
 
         real (kind=RKIND), dimension(:), pointer :: acc_ssh, acc_sshVar
         real (kind=RKIND), dimension(:,:), pointer :: acc_uReconstructZonal, acc_uReconstructMeridional, acc_uReconstructZonalVar, acc_uReconstructMeridionalVar
-        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar
+        real (kind=RKIND), dimension(:,:), pointer :: acc_u, acc_uVar, acc_vertVelocityTop
 
         nAccumulate =&gt; state % nAccumulate  % scalar
 
@@ -115,6 +121,7 @@
         acc_uReconstructMeridionalVar =&gt; state % acc_uReconstructMeridionalVar % array
         acc_u =&gt; state % acc_u % array
         acc_uVar =&gt; state % acc_uVar % array
+        acc_vertVelocityTop =&gt; state % acc_vertVelocityTop % array
 
         if(nAccumulate &gt; 0) then
           acc_ssh = acc_ssh / nAccumulate
@@ -125,6 +132,7 @@
           acc_uReconstructMeridionalVar = acc_uReconstructMeridionalVar / nAccumulate
           acc_u = acc_u / nAccumulate
           acc_uVar = acc_uVar / nAccumulate
+          acc_vertVelocityTop = acc_vertVelocityTop / nAccumulate
         end if
     end subroutine ocn_time_average_normalize!}}}
 

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_rk4.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_rk4.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -22,6 +22,7 @@
    use mpas_timer
 
    use ocn_tendency
+   use ocn_diagnostics
 
    use ocn_equation_of_state
    use ocn_vmix
@@ -137,7 +138,7 @@
 
         call mpas_timer_start(&quot;RK4-diagnostic halo update&quot;)
         call mpas_dmpar_exch_halo_field(domain % blocklist % provis % Vor_edge)
-        if (config_h_mom_eddy_visc4 &gt; 0.0) then
+        if (config_mom_del4 &gt; 0.0) then
            call mpas_dmpar_exch_halo_field(domain % blocklist % provis % divergence)
            call mpas_dmpar_exch_halo_field(domain % blocklist % provis % vorticity)
         end if
@@ -166,7 +167,7 @@
                call ocn_filter_btr_mode_tend_u(block % tend, block % provis, block % mesh)
            endif
 
-           call ocn_tend_scalar(block % tend, block % provis, block % diagnostics, block % mesh, dt)
+           call ocn_tend_tracer(block % tend, block % provis, block % diagnostics, block % mesh, dt)
            block =&gt; block % next
         end do
         call mpas_timer_stop(&quot;RK4-tendency computations&quot;)
@@ -254,7 +255,7 @@
       call mpas_timer_stop(&quot;RK4-main loop&quot;)
 
       !
-      !  A little clean up at the end: decouple new scalar fields and compute diagnostics for new state
+      !  A little clean up at the end: rescale tracer fields and compute diagnostics for new state
       !
       call mpas_timer_start(&quot;RK4-cleaup phase&quot;)
 

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_split.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_time_integration_split.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -24,6 +24,7 @@
    use mpas_timer
 
    use ocn_tendency
+   use ocn_diagnostics
 
    use ocn_equation_of_state
    use ocn_vmix
@@ -167,7 +168,7 @@
 
          call mpas_timer_start(&quot;se halo diag&quot;, .false., timer_halo_diagnostic)
          call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % Vor_edge)
-         if (config_h_mom_eddy_visc4 &gt; 0.0) then
+         if (config_mom_del4 &gt; 0.0) then
            call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % divergence)
            call mpas_dmpar_exch_halo_field(domain % blocklist % state % time_levs(2) % state % vorticity)
          end if
@@ -802,7 +803,7 @@
 
          block =&gt; domain % blocklist
          do while (associated(block))
-            call ocn_tend_scalar(block % tend, block % state % time_levs(2) % state, block % diagnostics, block % mesh, dt)
+            call ocn_tend_tracer(block % tend, block % state % time_levs(2) % state, block % diagnostics, block % mesh, dt)
 
             block =&gt; block % next
          end do

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_advection_std.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_advection_std.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_advection_std.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -51,7 +51,7 @@
    ! Input: s - current model state
    !        grid - grid metadata
    !
-   ! Output: tend - computed scalar tendencies
+   ! Output: tend - computed tracer tendencies
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 
 
       real (kind=RKIND), dimension(:,:,:), intent(inout) :: tend !&lt; Input/Output: Tracer tendency

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del2.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -211,9 +211,9 @@
 
       del2on = .false.
 
-      if ( config_h_tracer_eddy_diff2 &gt; 0.0 ) then
+      if ( config_tracer_del2 &gt; 0.0 ) then
           del2On = .true.
-          eddyDiff2 = config_h_tracer_eddy_diff2
+          eddyDiff2 = config_tracer_del2
       endif
 
       if(.not.config_use_tracer_del2) del2on = .false.

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del4.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -233,9 +233,9 @@
       err = 0
       del4on = .false.
 
-      if ( config_h_tracer_eddy_diff4 &gt; 0.0 ) then
+      if ( config_tracer_del4 &gt; 0.0 ) then
           del4On = .true.
-          eddyDiff4 = config_h_tracer_eddy_diff4
+          eddyDiff4 = config_tracer_del4
       endif
 
       if(.not.config_use_tracer_del4) del4on = .false.

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del2.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -201,13 +201,12 @@
 
    hmixDel2On = .false.
 
-   if ( config_h_mom_eddy_visc2 &gt; 0.0 ) then
+   if ( config_mom_del2 &gt; 0.0 ) then
       hmixDel2On = .true.
-      eddyVisc2 = config_h_mom_eddy_visc2
+      eddyVisc2 = config_mom_del2
 
-
       if (config_visc_vorticity_term) then
-         viscVortCoef = config_visc_vorticity_visc2_scale
+         viscVortCoef = config_vorticity_del2_scale
       else
          viscVortCoef = 0.0
       endif

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del4.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -261,11 +261,11 @@
 
    hmixDel4On = .false.
 
-   if ( config_h_mom_eddy_visc4 &gt; 0.0 ) then
+   if ( config_mom_del4 &gt; 0.0 ) then
       hmixDel4On = .true.
-      eddyVisc4 = config_h_mom_eddy_visc4
+      eddyVisc4 = config_mom_del4
       if (config_visc_vorticity_term) then
-         viscVortCoef = config_visc_vorticity_visc4_scale
+         viscVortCoef = config_vorticity_del4_scale
       else
          viscVortCoef = 0.0
       endif

Modified: branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_leith.F
===================================================================
--- branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_leith.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/core_ocean/mpas_ocn_vel_hmix_leith.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -216,7 +216,7 @@
       hmixLeithOn = .true.
 
       if (config_visc_vorticity_term) then
-         viscVortCoef = config_visc_vorticity_visc2_scale
+         viscVortCoef = config_vorticity_del2_scale
       else
          viscVortCoef = 0.0
       endif

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_block_decomp.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_block_decomp.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_block_decomp.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -165,7 +165,7 @@
            allocate(block_id(blocks_per_proc))
            allocate(block_start(blocks_per_proc))
            allocate(block_count(blocks_per_proc))
-   
+
            do i = 1, blocks_per_proc
              block_start = 0
              block_count = 0
@@ -436,11 +436,11 @@
        end if
      else
        blocks_per_proc = 0
-       do i = 1, total_blocks
+       do i = 0, total_blocks-1
          call mpas_get_owning_proc(dminfo, i, owning_proc)
          if(owning_proc == proc_number) then
            call mpas_get_local_block_id(dminfo, i, local_block_id)
-           blocks_per_proc = max(blocks_per_proc, local_block_id)
+           blocks_per_proc = max(blocks_per_proc, local_block_id+1)
          end if
        end do
      end if

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_dmpar.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_dmpar.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_dmpar.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -25,6 +25,8 @@
       module procedure mpas_dmpar_alltoall_field1d_real
       module procedure mpas_dmpar_alltoall_field2d_real
       module procedure mpas_dmpar_alltoall_field3d_real
+      module procedure mpas_dmpar_alltoall_field4d_real
+      module procedure mpas_dmpar_alltoall_field5d_real
    end interface
 
    private :: mpas_dmpar_alltoall_field1d_integer
@@ -32,6 +34,8 @@
    private :: mpas_dmpar_alltoall_field1d_real
    private :: mpas_dmpar_alltoall_field2d_real
    private :: mpas_dmpar_alltoall_field3d_real
+   private :: mpas_dmpar_alltoall_field4d_real
+   private :: mpas_dmpar_alltoall_field5d_real
 
 
    interface mpas_dmpar_exch_halo_field
@@ -41,6 +45,8 @@
       module procedure mpas_dmpar_exch_halo_field1d_real
       module procedure mpas_dmpar_exch_halo_field2d_real
       module procedure mpas_dmpar_exch_halo_field3d_real
+      module procedure mpas_dmpar_exch_halo_field4d_real
+      module procedure mpas_dmpar_exch_halo_field5d_real
    end interface
 
    private :: mpas_dmpar_exch_halo_field1d_integer
@@ -49,6 +55,8 @@
    private :: mpas_dmpar_exch_halo_field1d_real
    private :: mpas_dmpar_exch_halo_field2d_real
    private :: mpas_dmpar_exch_halo_field3d_real
+   private :: mpas_dmpar_exch_halo_field4d_real
+   private :: mpas_dmpar_exch_halo_field5d_real
 
    interface mpas_dmpar_copy_field
       module procedure mpas_dmpar_copy_field1d_integer
@@ -57,6 +65,8 @@
       module procedure mpas_dmpar_copy_field1d_real
       module procedure mpas_dmpar_copy_field2d_real
       module procedure mpas_dmpar_copy_field3d_real
+      module procedure mpas_dmpar_copy_field4d_real
+      module procedure mpas_dmpar_copy_field5d_real
    end interface
 
    private :: mpas_dmpar_copy_field1d_integer
@@ -65,6 +75,8 @@
    private :: mpas_dmpar_copy_field1d_real
    private :: mpas_dmpar_copy_field2d_real
    private :: mpas_dmpar_copy_field3d_real
+   private :: mpas_dmpar_copy_field4d_real
+   private :: mpas_dmpar_copy_field5d_real
 
    contains
 
@@ -2810,7 +2822,610 @@
 
    end subroutine mpas_dmpar_alltoall_field3d_real!}}}
 
+   subroutine mpas_dmpar_alltoall_field4d_real(fieldIn, fieldout, haloLayersIn)!{{{
 
+     implicit none
+
+     type (field4dReal), pointer :: fieldIn
+     type (field4dReal), pointer :: fieldOut
+     integer, dimension(:), pointer, optional :: haloLayersIn
+
+     type (field4dReal), pointer :: fieldInPtr, fieldOutPtr
+     type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+     type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+     type (dm_info), pointer :: dminfo
+
+     logical :: comm_list_found
+
+     integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+     integer :: nAdded, bufferOffset
+     integer :: mpi_ierr
+     integer :: iHalo, iBuffer, i, j, k, l
+     integer :: nHaloLayers
+     integer, dimension(:), pointer :: haloLayers
+
+     dminfo =&gt; fieldIn % block % domain % dminfo
+
+     if(present(haloLayersIn)) then
+       nHaloLayers = size(haloLayersIn)
+       allocate(haloLayers(nHaloLayers))
+       do iHalo = 1, nHaloLayers
+         haloLayers(iHalo) = haloLayersIn(iHalo)
+       end do
+     else
+       nHaloLayers = size(fieldIn % sendList % halos)
+       allocate(haloLayers(nHaloLayers))
+       do iHalo = 1, nHaloLayers
+         haloLayers(iHalo) = iHalo
+       end do
+     end if
+
+#ifdef _MPI
+     nullify(sendList)
+     nullify(recvList)
+
+     ! Setup recieve lists.
+     do iHalo = 1, nHaloLayers
+       fieldOutPtr =&gt; fieldOut
+       do while(associated(fieldOutPtr))
+         exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           comm_list_found = .false.
+  
+           ! Search for an already created commList to this processor.
+           commListPtr =&gt; recvList
+           do while(associated(commListPtr))
+             if(commListPtr % procID == exchListPtr % endPointID) then
+               commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+               comm_list_found = .true.
+               exit
+             end if
+  
+             commListPtr =&gt; commListPtr % next
+           end do
+  
+           ! If no comm list exists, create a new one.
+           if(.not. comm_list_found) then
+             if(.not.associated(recvList)) then
+               allocate(recvList)
+               nullify(recvList % next)
+               commListPtr =&gt; recvList
+             else
+               commListPtr =&gt; recvList
+               commListPtr2 =&gt; commListPtr % next
+               do while(associated(commListPtr2))
+                 commListPtr =&gt; commListPtr % next
+                 commListPtr2 =&gt; commListPtr % next
+               end do
+
+               allocate(commListPtr % next)
+               commListPtr =&gt; commListPtr % next
+               nullify(commListPtr % next)
+             end if
+  
+             commListPtr % procID = exchListPtr % endPointID
+             commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+           end if
+
+           exchListPtr =&gt; exchListPtr % next
+         end do
+  
+         fieldOutPtr =&gt; fieldOutPtr % next
+       end do
+     end do
+
+     ! Determine size of receive list buffers.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldOutPtr =&gt; fieldOut
+         do while(associated(fieldOutPtr))
+           exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3))
+             end if
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldOutPtr =&gt; fieldOutPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+       commListPtr % nList = nAdded
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       allocate(commListPtr % rbuffer(commListPtr % nList))
+       nullify(commListPtr % ibuffer)
+       call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Setup send lists, and determine the size of their buffers.
+     do iHalo = 1, nHaloLayers
+       fieldInPtr =&gt; fieldIn
+       do while(associated(fieldInPtr))
+         exchListPtr =&gt; fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           comm_list_found = .false.
+  
+           ! Search for an already created commList to this processor.
+           commListPtr =&gt; sendList
+           do while(associated(commListPtr))
+             if(commListPtr % procID == exchListPtr % endPointID) then
+               commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+               comm_list_found = .true.
+               exit
+             end if
+  
+             commListPtr =&gt; commListPtr % next
+           end do
+  
+           ! If no comm list exists, create a new one.
+           if(.not. comm_list_found) then
+             if(.not.associated(sendList)) then
+               allocate(sendList)
+               nullify(sendList % next)
+               commListPtr =&gt; sendList
+             else
+               commListPtr =&gt; sendList
+               commListPtr2 =&gt; commListPtr % next
+               do while(associated(commListPtr2))
+                 commListPtr =&gt; commListPtr % next
+                 commListPtr2 =&gt; commListPtr % next
+               end do
+    
+               allocate(commListPtr % next)
+               commListPtr =&gt; commListPtr % next
+               nullify(commListPtr % next)
+             end if
+             commListPtr % procID = exchListPtr % endPointID
+             commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+           end if
+  
+           exchListPtr =&gt; exchListPtr % next
+         end do
+  
+         fieldInPtr =&gt; fieldInPtr % next
+       end do
+     end do
+
+     ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+     commListPtr =&gt; sendList
+     do while(associated(commListPtr))
+       allocate(commListPtr % rbuffer(commListPtr % nList))
+       nullify(commListPtr % ibuffer)
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldInPtr =&gt; fieldIn
+         do while(associated(fieldInPtr))
+           exchListPtr =&gt; fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               do i = 1, exchListPtr % nList
+                 do j = 1, fieldInPtr % dimSizes(3)
+                   do k = 1, fieldInPtr % dimSizes(2)
+                     do l = 1, fieldInPtr % dimSizes(1)
+                       iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) &amp;
+                               + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) &amp;
+                               + (k-1) * fieldInPtr % dimSizes(1) + l + bufferOffset
+                       commListPtr % rbuffer(iBuffer) = fieldInPtr % array(l, k, j, exchListPtr % srcList(i))
+                       nAdded = nAdded + 1
+                     end do
+                   end do
+                 end do
+               end do
+             end if
+  
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldInPtr =&gt; fieldInPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+
+       call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &amp;
+                      commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+#endif     
+
+     ! Handle Local Copies. Only local copies if no MPI
+     do iHalo = 1, nHaloLayers
+       fieldInPtr =&gt; fieldIn
+       do while(associated(fieldInPtr))
+         exchListPtr =&gt; fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           fieldOutPtr =&gt; fieldOut
+           do while(associated(fieldOutPtr))
+             if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+               do i = 1, exchListPtr % nList
+                 fieldOutPtr % array(:, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, exchListPtr % srcList(i))
+               end do
+             end if
+             fieldOutPtr =&gt; fieldOutPtr % next
+           end do
+  
+           exchListPtr =&gt; exchListPtr % next
+         end do
+         fieldInPtr =&gt; fieldInPtr % next
+       end do
+     end do
+
+#ifdef _MPI
+     ! Wait for MPI_Irecv's to finish, and unpack data.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldOutPtr =&gt; fieldOut
+         do while(associated(fieldOutPtr))
+           exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               do i = 1, exchListPtr % nList
+                 do j = 1, fieldOutPtr % dimSizes(3)
+                   do k = 1, fieldOutPtr % dimSizes(2)
+                     do l = 1, fieldOutPtr % dimSizes(1)
+                       iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1)  &amp;
+                               + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2)  &amp;
+                               + (k-1) * fieldOutPtr % dimSizes(1) + l + bufferOffset
+                       fieldOutPtr % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+                     end do
+                   end do
+                 end do
+               end do
+               nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3))
+             end if
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldOutPtr =&gt; fieldOutPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Wait for MPI_Isend's to finish.
+     commListPtr =&gt; sendList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Destroy commLists.
+     call mpas_dmpar_destroy_communication_list(sendList)
+     call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+     deallocate(haloLayers)
+
+   end subroutine mpas_dmpar_alltoall_field4d_real!}}}
+
+   subroutine mpas_dmpar_alltoall_field5d_real(fieldIn, fieldout, haloLayersIn)!{{{
+
+     implicit none
+
+     type (field5dReal), pointer :: fieldIn
+     type (field5dReal), pointer :: fieldOut
+     integer, dimension(:), pointer, optional :: haloLayersIn
+
+     type (field5dReal), pointer :: fieldInPtr, fieldOutPtr
+     type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+     type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+     type (dm_info), pointer :: dminfo
+
+     logical :: comm_list_found
+
+     integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+     integer :: nAdded, bufferOffset
+     integer :: mpi_ierr
+     integer :: iHalo, iBuffer, i, j, k, l, m
+     integer :: nHaloLayers
+     integer, dimension(:), pointer :: haloLayers
+
+     dminfo =&gt; fieldIn % block % domain % dminfo
+
+     if(present(haloLayersIn)) then
+       nHaloLayers = size(haloLayersIn)
+       allocate(haloLayers(nHaloLayers))
+       do iHalo = 1, nHaloLayers
+         haloLayers(iHalo) = haloLayersIn(iHalo)
+       end do
+     else
+       nHaloLayers = size(fieldIn % sendList % halos)
+       allocate(haloLayers(nHaloLayers))
+       do iHalo = 1, nHaloLayers
+         haloLayers(iHalo) = iHalo
+       end do
+     end if
+
+#ifdef _MPI
+     nullify(sendList)
+     nullify(recvList)
+
+     ! Setup recieve lists.
+     do iHalo = 1, nHaloLayers
+       fieldOutPtr =&gt; fieldOut
+       do while(associated(fieldOutPtr))
+         exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           comm_list_found = .false.
+  
+           ! Search for an already created commList to this processor.
+           commListPtr =&gt; recvList
+           do while(associated(commListPtr))
+             if(commListPtr % procID == exchListPtr % endPointID) then
+               commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)
+               comm_list_found = .true.
+               exit
+             end if
+  
+             commListPtr =&gt; commListPtr % next
+           end do
+  
+           ! If no comm list exists, create a new one.
+           if(.not. comm_list_found) then
+             if(.not.associated(recvList)) then
+               allocate(recvList)
+               nullify(recvList % next)
+               commListPtr =&gt; recvList
+             else
+               commListPtr =&gt; recvList
+               commListPtr2 =&gt; commListPtr % next
+               do while(associated(commListPtr2))
+                 commListPtr =&gt; commListPtr % next
+                 commListPtr2 =&gt; commListPtr % next
+               end do
+
+               allocate(commListPtr % next)
+               commListPtr =&gt; commListPtr % next
+               nullify(commListPtr % next)
+             end if
+  
+             commListPtr % procID = exchListPtr % endPointID
+             commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)
+           end if
+
+           exchListPtr =&gt; exchListPtr % next
+         end do
+  
+         fieldOutPtr =&gt; fieldOutPtr % next
+       end do
+     end do
+
+     ! Determine size of receive list buffers.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldOutPtr =&gt; fieldOut
+         do while(associated(fieldOutPtr))
+           exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4))
+             end if
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldOutPtr =&gt; fieldOutPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+       commListPtr % nList = nAdded
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       allocate(commListPtr % rbuffer(commListPtr % nList))
+       nullify(commListPtr % ibuffer)
+       call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Setup send lists, and determine the size of their buffers.
+     do iHalo = 1, nHaloLayers
+       fieldInPtr =&gt; fieldIn
+       do while(associated(fieldInPtr))
+         exchListPtr =&gt; fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           comm_list_found = .false.
+  
+           ! Search for an already created commList to this processor.
+           commListPtr =&gt; sendList
+           do while(associated(commListPtr))
+             if(commListPtr % procID == exchListPtr % endPointID) then
+               commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4)
+               comm_list_found = .true.
+               exit
+             end if
+  
+             commListPtr =&gt; commListPtr % next
+           end do
+  
+           ! If no comm list exists, create a new one.
+           if(.not. comm_list_found) then
+             if(.not.associated(sendList)) then
+               allocate(sendList)
+               nullify(sendList % next)
+               commListPtr =&gt; sendList
+             else
+               commListPtr =&gt; sendList
+               commListPtr2 =&gt; commListPtr % next
+               do while(associated(commListPtr2))
+                 commListPtr =&gt; commListPtr % next
+                 commListPtr2 =&gt; commListPtr % next
+               end do
+    
+               allocate(commListPtr % next)
+               commListPtr =&gt; commListPtr % next
+               nullify(commListPtr % next)
+             end if
+             commListPtr % procID = exchListPtr % endPointID
+             commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4)
+           end if
+  
+           exchListPtr =&gt; exchListPtr % next
+         end do
+  
+         fieldInPtr =&gt; fieldInPtr % next
+       end do
+     end do
+
+     ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+     commListPtr =&gt; sendList
+     do while(associated(commListPtr))
+       allocate(commListPtr % rbuffer(commListPtr % nList))
+       nullify(commListPtr % ibuffer)
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldInPtr =&gt; fieldIn
+         do while(associated(fieldInPtr))
+           exchListPtr =&gt; fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               do i = 1, exchListPtr % nList
+                 do j = 1, fieldInPtr % dimSizes(4)
+                   do k = 1, fieldInPtr % dimSizes(3)
+                     do l = 1, fieldInPtr % dimSizes(2)
+                       do m = 1, fieldInPtr % dimSizes(1)
+                         iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) * fieldInPtr % dimSizes(4) &amp;
+                                 + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) &amp;
+                                 + (k-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) &amp;
+                                 + (l-1) * fieldInPtr % dimSizes(1) + m + bufferOffset
+                         commListPtr % rbuffer(iBuffer) = fieldInPtr % array(m, l, k, j, exchListPtr % srcList(i))
+                         nAdded = nAdded + 1
+                       end do
+                     end do
+                   end do
+                 end do
+               end do
+             end if
+  
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldInPtr =&gt; fieldInPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+
+       call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &amp;
+                      commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+#endif     
+
+     ! Handle Local Copies. Only local copies if no MPI
+     do iHalo = 1, nHaloLayers
+       fieldInPtr =&gt; fieldIn
+       do while(associated(fieldInPtr))
+         exchListPtr =&gt; fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+         do while(associated(exchListPtr))
+           fieldOutPtr =&gt; fieldOut
+           do while(associated(fieldOutPtr))
+             if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+               do i = 1, exchListPtr % nList
+                 fieldOutPtr % array(:, :, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, :, exchListPtr % srcList(i))
+               end do
+             end if
+             fieldOutPtr =&gt; fieldOutPtr % next
+           end do
+  
+           exchListPtr =&gt; exchListPtr % next
+         end do
+         fieldInPtr =&gt; fieldInPtr % next
+       end do
+     end do
+
+#ifdef _MPI
+     ! Wait for MPI_Irecv's to finish, and unpack data.
+     commListPtr =&gt; recvList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+       bufferOffset = 0
+       do iHalo = 1, nHaloLayers
+         nAdded = 0
+         fieldOutPtr =&gt; fieldOut
+         do while(associated(fieldOutPtr))
+           exchListPtr =&gt; fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+           do while(associated(exchListPtr))
+             if(exchListPtr % endPointID == commListPtr % procID) then
+               do i = 1, exchListPtr % nList
+                 do j = 1, fieldOutPtr % dimSizes(4)
+                   do k = 1, fieldOutPtr % dimSizes(3)
+                     do l = 1, fieldOutPtr % dimSizes(2)
+                       do m = 1, fieldOutPtr % dimSizes(1)
+                         iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(4) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) &amp;
+                                 + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) &amp;
+                                 + (k-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) &amp;
+                                 + (l-1) * fieldOutPtr % dimSizes(1) + m + bufferOffset
+                         fieldOutPtr % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+                       end do
+                     end do
+                   end do
+                 end do
+               end do
+               nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4))
+             end if
+             exchListPtr =&gt; exchListPtr % next
+           end do
+  
+           fieldOutPtr =&gt; fieldOutPtr % next
+         end do
+         bufferOffset = bufferOffset + nAdded
+       end do
+
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Wait for MPI_Isend's to finish.
+     commListPtr =&gt; sendList
+     do while(associated(commListPtr))
+       call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+       commListPtr =&gt; commListPtr % next
+     end do
+
+     ! Destroy commLists.
+     call mpas_dmpar_destroy_communication_list(sendList)
+     call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+     deallocate(haloLayers)
+
+   end subroutine mpas_dmpar_alltoall_field5d_real!}}}
+
+
+
    subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{
 
       implicit none
@@ -4507,6 +5122,602 @@
 
    end subroutine mpas_dmpar_exch_halo_field3d_real!}}}
 
+   subroutine mpas_dmpar_exch_halo_field4d_real(field, haloLayersIn)!{{{
+
+      implicit none
+
+      type (field4dReal), pointer :: field
+      integer, dimension(:), intent(in), optional :: haloLayersIn
+
+      type (dm_info), pointer :: dminfo
+      type (field4dReal), pointer :: fieldCursor, fieldCursor2
+      type (mpas_exchange_list), pointer :: exchListPtr
+      type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+      integer :: mpi_ierr
+      integer :: nHaloLayers, iHalo, i, j, k, l
+      integer :: bufferOffset, nAdded
+      integer, dimension(:), pointer :: haloLayers
+
+      logical :: comm_list_found
+
+      do i = 1, 4
+        if(field % dimSizes(i) &lt;= 0) then
+          return
+        end if
+      end do
+
+      dminfo =&gt; field % block % domain % dminfo
+
+      if(present(haloLayersIn)) then
+        nHaloLayers = size(haloLayersIn)
+        allocate(haloLayers(nHaloLayers))
+        do iHalo = 1, nHaloLayers
+          haloLayers(iHalo) = haloLayersIn(iHalo)
+        end do
+      else
+        nHaloLayers = size(field % sendList % halos)
+        allocate(haloLayers(nHaloLayers))
+        do iHalo = 1, nHaloLayers
+          haloLayers(iHalo) = iHalo
+        end do
+      end if
+
+#ifdef _MPI
+      ! Allocate communication lists, and setup dead header nodes
+      allocate(sendList)
+      nullify(sendList % next)
+      sendList % procID = -1
+      sendList % nList = 0
+
+      allocate(recvList)
+      nullify(recvList % next)
+      recvList % procID = -1
+      recvList % nList = 0
+
+      dminfo   = field % block % domain % dminfo
+
+      ! Determine size of buffers for communication lists
+      fieldCursor =&gt; field
+      do while(associated(fieldCursor))
+
+        ! Need to aggregate across halo layers
+        do iHalo = 1, nHaloLayers
+          
+          ! Determine size from send lists
+          exchListPtr =&gt; fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+          do while(associated(exchListPtr))
+            comm_list_found = .false.
+
+            commListPtr =&gt; sendList
+            do while(associated(commListPtr))
+              if(commListPtr % procID == exchListPtr % endPointId) then
+                comm_list_found = .true.
+                commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)
+                exit
+              end if
+
+              commListPtr =&gt; commListPtr % next
+            end do
+
+            if(.not. comm_list_found) then
+              commListPtr =&gt; sendList
+              commListPtr2 =&gt; commListPtr % next
+              do while(associated(commListPtr2))
+                commListPtr =&gt; commListPtr % next
+                commListPtr2 =&gt; commListPtr % next
+              end do
+
+              allocate(commListPtr % next)
+              commListPtr =&gt; commListPtr % next
+              nullify(commListPtr % next)
+              commListPtr % procID = exchListPtr % endPointID
+              commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)
+            end if
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+
+          ! Setup recv lists
+          exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+          do while(associated(exchListPtr))
+            comm_list_found = .false.
+
+            commListPtr =&gt; recvList
+            do while(associated(commListPtr))
+              if(commListPtr % procID == exchListPtr % endPointId) then
+                comm_list_found = .true.
+                exit
+              end if
+
+              commListPtr =&gt; commListPtr % next
+            end do
+
+            if(.not. comm_list_found) then
+              commListPtr =&gt; recvList
+              commListPtr2 =&gt; commListPtr % next
+              do while(associated(commListPtr2))
+                commListPtr =&gt; commListPtr % next
+                commListPtr2 =&gt; commListPtr % next
+              end do
+
+              allocate(commListPtr % next)
+              commListPtr =&gt; commListPtr % next
+              nullify(commListPtr % next)
+              commListPtr % procID = exchListPtr % endPointID
+            end if
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+        end do
+
+        fieldCursor =&gt; fieldCursor % next
+      end do
+
+      ! Remove the dead head pointer on send and recv list
+      commListPtr =&gt; sendList
+      sendList =&gt; sendList % next
+      deallocate(commListPtr)
+
+      commListPtr =&gt; recvList
+      recvList =&gt; recvList % next
+      deallocate(commListPtr)
+
+      ! Determine size of recv lists
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3))
+              end if
+              exchListPtr =&gt; exchListPtr % next
+            end do
+            
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+        commListPtr % nList = bufferOffset
+
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! Allocate space in recv lists, and initiate mpi_irecv calls
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        allocate(commListPtr % rbuffer(commListPtr % nList))
+        nullify(commListPtr % ibuffer)
+        call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+      commListPtr =&gt; sendList
+      do while(associated(commListPtr))
+        allocate(commListPtr % rbuffer(commListPtr % nList))
+        nullify(commListPtr % ibuffer)
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                do  i = 1, exchListPtr % nList
+                  do j = 1, fieldCursor % dimSizes(3)
+                    do k = 1, fieldCursor % dimSizes(2)
+                      do l = 1, fieldCursor % dimSizes(1)
+                        commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &amp;
+                            + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &amp;
+                            + (k-1) * fieldCursor % dimSizes(1) + l  + bufferOffset) &amp;
+                            = fieldCursor % array(l, k, j, exchListPtr % srcList(i))
+                        nAdded = nAdded + 1
+                      end do
+                    end do
+                  end do
+                end do
+              end if
+
+              exchListPtr =&gt; exchListPtr % next
+            end do
+
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+
+        call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+        commListPtr =&gt; commListPtr % next
+      end do
+#endif
+
+      ! Handle local copy. If MPI is off, then only local copies are performed.
+      fieldCursor =&gt; field
+      do while(associated(fieldCursor))
+        do iHalo = 1, nHaloLayers
+          exchListPtr =&gt; fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+          do while(associated(exchListPtr))
+            fieldCursor2 =&gt; field
+            do while(associated(fieldCursor2))
+              if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+                do i = 1, exchListPtr % nList
+                  fieldCursor2 % array(:, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, exchListPtr % srcList(i))
+                end do
+              end if
+              
+              fieldCursor2 =&gt; fieldCursor2 % next
+            end do
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+        end do
+
+        fieldCursor =&gt; fieldCursor % next
+      end do
+
+#ifdef _MPI
+
+      ! Wait for mpi_irecv to finish, and unpack data from buffer
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                do i = 1, exchListPtr % nList
+                  do j = 1, fieldCursor % dimSizes(3)
+                    do k = 1, fieldCursor % dimSizes(2)
+                      do l = 1, fieldCursor % dimSizes(1)
+                        fieldCursor % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)&amp;
+                                                                               *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3)&amp;
+                                                                             + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &amp;
+                                                                             + (k-1)*fieldCursor % dimSizes(1) + l + bufferOffset)
+                      end do
+                    end do
+                  end do
+                end do
+                nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3))
+              end if
+              exchListPtr =&gt; exchListPtr % next
+            end do
+            
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! wait for mpi_isend to finish.
+      commListPtr =&gt; sendList
+      do while(associated(commListPtr))
+        call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+        commListPtr =&gt; commListPtr % next
+      end do
+
+     ! Destroy commLists.
+     call mpas_dmpar_destroy_communication_list(sendList)
+     call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+     deallocate(haloLayers)
+
+   end subroutine mpas_dmpar_exch_halo_field4d_real!}}}
+
+   subroutine mpas_dmpar_exch_halo_field5d_real(field, haloLayersIn)!{{{
+
+      implicit none
+
+      type (field5dReal), pointer :: field
+      integer, dimension(:), intent(in), optional :: haloLayersIn
+
+      type (dm_info), pointer :: dminfo
+      type (field5dReal), pointer :: fieldCursor, fieldCursor2
+      type (mpas_exchange_list), pointer :: exchListPtr
+      type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+      integer :: mpi_ierr
+      integer :: nHaloLayers, iHalo, i, j, k, l, m
+      integer :: bufferOffset, nAdded
+      integer, dimension(:), pointer :: haloLayers
+
+      logical :: comm_list_found
+
+      do i = 1, 5
+        if(field % dimSizes(i) &lt;= 0) then
+          return
+        end if
+      end do
+
+      dminfo =&gt; field % block % domain % dminfo
+
+      if(present(haloLayersIn)) then
+        nHaloLayers = size(haloLayersIn)
+        allocate(haloLayers(nHaloLayers))
+        do iHalo = 1, nHaloLayers
+          haloLayers(iHalo) = haloLayersIn(iHalo)
+        end do
+      else
+        nHaloLayers = size(field % sendList % halos)
+        allocate(haloLayers(nHaloLayers))
+        do iHalo = 1, nHaloLayers
+          haloLayers(iHalo) = iHalo
+        end do
+      end if
+
+#ifdef _MPI
+      ! Allocate communication lists, and setup dead header nodes
+      allocate(sendList)
+      nullify(sendList % next)
+      sendList % procID = -1
+      sendList % nList = 0
+
+      allocate(recvList)
+      nullify(recvList % next)
+      recvList % procID = -1
+      recvList % nList = 0
+
+      dminfo   = field % block % domain % dminfo
+
+      ! Determine size of buffers for communication lists
+      fieldCursor =&gt; field
+      do while(associated(fieldCursor))
+
+        ! Need to aggregate across halo layers
+        do iHalo = 1, nHaloLayers
+          
+          ! Determine size from send lists
+          exchListPtr =&gt; fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+          do while(associated(exchListPtr))
+            comm_list_found = .false.
+
+            commListPtr =&gt; sendList
+            do while(associated(commListPtr))
+              if(commListPtr % procID == exchListPtr % endPointId) then
+                comm_list_found = .true.
+                commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)
+                exit
+              end if
+
+              commListPtr =&gt; commListPtr % next
+            end do
+
+            if(.not. comm_list_found) then
+              commListPtr =&gt; sendList
+              commListPtr2 =&gt; commListPtr % next
+              do while(associated(commListPtr2))
+                commListPtr =&gt; commListPtr % next
+                commListPtr2 =&gt; commListPtr % next
+              end do
+
+              allocate(commListPtr % next)
+              commListPtr =&gt; commListPtr % next
+              nullify(commListPtr % next)
+              commListPtr % procID = exchListPtr % endPointID
+              commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)
+            end if
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+
+          ! Setup recv lists
+          exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+          do while(associated(exchListPtr))
+            comm_list_found = .false.
+
+            commListPtr =&gt; recvList
+            do while(associated(commListPtr))
+              if(commListPtr % procID == exchListPtr % endPointId) then
+                comm_list_found = .true.
+                exit
+              end if
+
+              commListPtr =&gt; commListPtr % next
+            end do
+
+            if(.not. comm_list_found) then
+              commListPtr =&gt; recvList
+              commListPtr2 =&gt; commListPtr % next
+              do while(associated(commListPtr2))
+                commListPtr =&gt; commListPtr % next
+                commListPtr2 =&gt; commListPtr % next
+              end do
+
+              allocate(commListPtr % next)
+              commListPtr =&gt; commListPtr % next
+              nullify(commListPtr % next)
+              commListPtr % procID = exchListPtr % endPointID
+            end if
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+        end do
+
+        fieldCursor =&gt; fieldCursor % next
+      end do
+
+      ! Remove the dead head pointer on send and recv list
+      commListPtr =&gt; sendList
+      sendList =&gt; sendList % next
+      deallocate(commListPtr)
+
+      commListPtr =&gt; recvList
+      recvList =&gt; recvList % next
+      deallocate(commListPtr)
+
+      ! Determine size of recv lists
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4))
+              end if
+              exchListPtr =&gt; exchListPtr % next
+            end do
+            
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+        commListPtr % nList = bufferOffset
+
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! Allocate space in recv lists, and initiate mpi_irecv calls
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        allocate(commListPtr % rbuffer(commListPtr % nList))
+        nullify(commListPtr % ibuffer)
+        call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+      commListPtr =&gt; sendList
+      do while(associated(commListPtr))
+        allocate(commListPtr % rbuffer(commListPtr % nList))
+        nullify(commListPtr % ibuffer)
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                do  i = 1, exchListPtr % nList
+                  do j = 1, fieldCursor % dimSizes(4)
+                    do k = 1, fieldCursor % dimSizes(3)
+                      do l = 1, fieldCursor % dimSizes(2)
+                        do m = 1, fieldCursor % dimSizes(1)
+                          commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4) &amp;
+                              + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &amp;
+                              + (k-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &amp;
+                              + (l-1) * fieldCursor % dimSizes(1) + m + bufferOffset) &amp;
+                              = fieldCursor % array(m, l, k, j, exchListPtr % srcList(i))
+                          nAdded = nAdded + 1
+                        end do
+                      end do
+                    end do
+                  end do
+                end do
+              end if
+
+              exchListPtr =&gt; exchListPtr % next
+            end do
+
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+
+        call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+        commListPtr =&gt; commListPtr % next
+      end do
+#endif
+
+      ! Handle local copy. If MPI is off, then only local copies are performed.
+      fieldCursor =&gt; field
+      do while(associated(fieldCursor))
+        do iHalo = 1, nHaloLayers
+          exchListPtr =&gt; fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+          do while(associated(exchListPtr))
+            fieldCursor2 =&gt; field
+            do while(associated(fieldCursor2))
+              if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+                do i = 1, exchListPtr % nList
+                  fieldCursor2 % array(:, :, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, :, exchListPtr % srcList(i))
+                end do
+              end if
+              
+              fieldCursor2 =&gt; fieldCursor2 % next
+            end do
+
+            exchListPtr =&gt; exchListPtr % next
+          end do
+        end do
+
+        fieldCursor =&gt; fieldCursor % next
+      end do
+
+#ifdef _MPI
+
+      ! Wait for mpi_irecv to finish, and unpack data from buffer
+      commListPtr =&gt; recvList
+      do while(associated(commListPtr))
+        call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+        bufferOffset = 0
+        do iHalo = 1, nHaloLayers
+          nAdded = 0
+          fieldCursor =&gt; field
+          do while(associated(fieldCursor))
+            exchListPtr =&gt; fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+            do while(associated(exchListPtr))
+              if(exchListPtr % endPointID == commListPtr % procID) then
+                do i = 1, exchListPtr % nList
+                  do j = 1, fieldCursor % dimSizes(4)
+                    do k = 1, fieldCursor % dimSizes(3)
+                      do l = 1, fieldCursor % dimSizes(2)
+                        do m = 1, fieldCursor % dimSizes(1)
+                          fieldCursor % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)&amp;
+                                                                                 *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)&amp;
+                                                                               + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &amp;
+                                                                               + (k-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &amp;
+                                                                               + (l-1)*fieldCursor % dimSizes(1) + m + bufferOffset)
+                        end do
+                      end do
+                    end do
+                  end do
+                end do
+                nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4))
+              end if
+              exchListPtr =&gt; exchListPtr % next
+            end do
+            
+            fieldCursor =&gt; fieldCursor % next
+          end do
+          bufferOffset = bufferOffset + nAdded
+        end do
+        commListPtr =&gt; commListPtr % next
+      end do
+
+      ! wait for mpi_isend to finish.
+      commListPtr =&gt; sendList
+      do while(associated(commListPtr))
+        call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+        commListPtr =&gt; commListPtr % next
+      end do
+
+     ! Destroy commLists.
+     call mpas_dmpar_destroy_communication_list(sendList)
+     call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+     deallocate(haloLayers)
+
+   end subroutine mpas_dmpar_exch_halo_field5d_real!}}}
+
    subroutine mpas_dmpar_init_mulithalo_exchange_list(exchList, nHalos)!{{{
      type (mpas_multihalo_exchange_list), pointer :: exchList
      integer, intent(in) :: nHalos
@@ -4668,4 +5879,30 @@
        end if
    end subroutine mpas_dmpar_copy_field3d_real!}}}
 
+   subroutine mpas_dmpar_copy_field4d_real(field)!{{{
+       type (field4dReal), pointer :: field
+       type (field4dReal), pointer :: fieldCursor
+
+       if(associated(field % next)) then
+         fieldCursor =&gt; field % next
+         do while(associated(fieldCursor))
+           fieldCursor % array = field % array
+           fieldCursor =&gt; fieldCursor % next
+         end do
+       end if
+   end subroutine mpas_dmpar_copy_field4d_real!}}}
+
+   subroutine mpas_dmpar_copy_field5d_real(field)!{{{
+       type (field5dReal), pointer :: field
+       type (field5dReal), pointer :: fieldCursor
+
+       if(associated(field % next)) then
+         fieldCursor =&gt; field % next
+         do while(associated(fieldCursor))
+           fieldCursor % array = field % array
+           fieldCursor =&gt; fieldCursor % next
+         end do
+       end if
+   end subroutine mpas_dmpar_copy_field5d_real!}}}
+
 end module mpas_dmpar

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_grid_types.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_grid_types.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_grid_types.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -20,8 +20,66 @@
       logical :: output
    end type io_info
 
+   ! Derived type for storing fields
+   type field5DReal
+  
+      ! Back-pointer to the containing block
+      type (block_type), pointer :: block
 
+      ! Raw array holding field data on this block
+      real (kind=RKIND), dimension(:,:,:,:,:), pointer :: array
+
+      ! Information used by the I/O layer
+      type (io_info), pointer :: ioinfo       ! to be removed later
+      character (len=StrKIND) :: fieldName
+      character (len=StrKIND), dimension(:), pointer :: constituentNames =&gt; null()
+      character (len=StrKIND), dimension(5) :: dimNames
+      integer, dimension(5) :: dimSizes
+      logical :: hasTimeDimension
+      logical :: isSuperArray
+      type (att_list_type), pointer :: attList =&gt; null()     
+
+      ! Pointers to the prev and next blocks for this field on this task
+      type (field5DReal), pointer :: prev, next
+
+      ! Halo communication lists
+      type (mpas_multihalo_exchange_list), pointer :: sendList
+      type (mpas_multihalo_exchange_list), pointer :: recvList
+      type (mpas_multihalo_exchange_list), pointer :: copyList
+   end type field5DReal
+
+
    ! Derived type for storing fields
+   type field4DReal
+  
+      ! Back-pointer to the containing block
+      type (block_type), pointer :: block
+
+      ! Raw array holding field data on this block
+      real (kind=RKIND), dimension(:,:,:,:), pointer :: array
+
+      ! Information used by the I/O layer
+      type (io_info), pointer :: ioinfo       ! to be removed later
+      character (len=StrKIND) :: fieldName
+      character (len=StrKIND), dimension(:), pointer :: constituentNames =&gt; null()
+      character (len=StrKIND), dimension(4) :: dimNames
+      integer, dimension(4) :: dimSizes
+      logical :: hasTimeDimension
+      logical :: isSuperArray
+      type (att_list_type), pointer :: attList =&gt; null()     
+
+      ! Pointers to the prev and next blocks for this field on this task
+      type (field4DReal), pointer :: prev, next
+
+      ! Halo communication lists
+      type (mpas_multihalo_exchange_list), pointer :: sendList
+      type (mpas_multihalo_exchange_list), pointer :: recvList
+      type (mpas_multihalo_exchange_list), pointer :: copyList
+   end type field4DReal
+
+
+
+   ! Derived type for storing fields
    type field3DReal
   
       ! Back-pointer to the containing block
@@ -370,6 +428,8 @@
      module procedure mpas_allocate_scratch_field1d_real
      module procedure mpas_allocate_scratch_field2d_real
      module procedure mpas_allocate_scratch_field3d_real
+     module procedure mpas_allocate_scratch_field4d_real
+     module procedure mpas_allocate_scratch_field5d_real
      module procedure mpas_allocate_scratch_field1d_char
    end interface
 
@@ -380,6 +440,8 @@
      module procedure mpas_deallocate_scratch_field1d_real
      module procedure mpas_deallocate_scratch_field2d_real
      module procedure mpas_deallocate_scratch_field3d_real
+     module procedure mpas_deallocate_scratch_field4d_real
+     module procedure mpas_deallocate_scratch_field5d_real
      module procedure mpas_deallocate_scratch_field1d_char
    end interface
 
@@ -392,6 +454,8 @@
      module procedure mpas_deallocate_field1d_real
      module procedure mpas_deallocate_field2d_real
      module procedure mpas_deallocate_field3d_real
+     module procedure mpas_deallocate_field4d_real
+     module procedure mpas_deallocate_field5d_real
      module procedure mpas_deallocate_field0d_char
      module procedure mpas_deallocate_field1d_char
    end interface
@@ -632,6 +696,62 @@
 
    end subroutine mpas_allocate_scratch_field3d_real!}}}
 
+   subroutine mpas_allocate_scratch_field4d_real(f, single_block_in)!{{{
+       type (field4dReal), pointer :: f
+       logical, intent(in), optional :: single_block_in
+       logical :: single_block
+       type (field4dReal), pointer :: f_cursor
+
+       if(present(single_block_in)) then
+          single_block = single_block_in
+       else
+          single_block = .false.
+       end if
+
+       if(.not. single_block) then
+          f_cursor =&gt; f
+          do while(associated(f_cursor))
+             if(.not.associated(f_cursor % array)) then
+                allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4)))
+             end if
+             f_cursor =&gt; f_cursor % next
+          end do
+       else
+          if(.not.associated(f % array)) then
+            allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4)))
+          end if
+       end if
+
+   end subroutine mpas_allocate_scratch_field4d_real!}}}
+
+   subroutine mpas_allocate_scratch_field5d_real(f, single_block_in)!{{{
+       type (field5dReal), pointer :: f
+       logical, intent(in), optional :: single_block_in
+       logical :: single_block
+       type (field5dReal), pointer :: f_cursor
+
+       if(present(single_block_in)) then
+          single_block = single_block_in
+       else
+          single_block = .false.
+       end if
+
+       if(.not. single_block) then
+          f_cursor =&gt; f
+          do while(associated(f_cursor))
+             if(.not.associated(f_cursor % array)) then
+                allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4), f_cursor % dimSizes(5)))
+             end if
+             f_cursor =&gt; f_cursor % next
+          end do
+       else
+          if(.not.associated(f % array)) then
+            allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4), f % dimSizes(5)))
+          end if
+       end if
+
+   end subroutine mpas_allocate_scratch_field5d_real!}}}
+
    subroutine mpas_allocate_scratch_field1d_char(f, single_block_in)!{{{
        type (field1dChar), pointer :: f
        logical, intent(in), optional :: single_block_in
@@ -834,6 +954,64 @@
 
    end subroutine mpas_deallocate_scratch_field3d_real!}}}
 
+   subroutine mpas_deallocate_scratch_field4d_real(f, single_block_in)!{{{
+       type (field4dReal), pointer :: f
+       logical, intent(in), optional :: single_block_in
+       logical :: single_block
+       type (field4dReal), pointer :: f_cursor
+
+       if(present(single_block_in)) then
+          single_block = single_block_in
+       else
+          single_block = .false.
+       end if
+
+       if(.not.single_block) then
+          f_cursor =&gt; f
+          do while(associated(f_cursor))
+            if(associated(f_cursor % array)) then
+              deallocate(f_cursor % array)
+            end if
+   
+            f_cursor =&gt; f_cursor % next
+          end do
+       else
+          if(associated(f % array)) then
+             deallocate(f % array)
+          end if
+       end if
+
+   end subroutine mpas_deallocate_scratch_field4d_real!}}}
+
+   subroutine mpas_deallocate_scratch_field5d_real(f, single_block_in)!{{{
+       type (field5dReal), pointer :: f
+       logical, intent(in), optional :: single_block_in
+       logical :: single_block
+       type (field5dReal), pointer :: f_cursor
+
+       if(present(single_block_in)) then
+          single_block = single_block_in
+       else
+          single_block = .false.
+       end if
+
+       if(.not.single_block) then
+          f_cursor =&gt; f
+          do while(associated(f_cursor))
+            if(associated(f_cursor % array)) then
+              deallocate(f_cursor % array)
+            end if
+   
+            f_cursor =&gt; f_cursor % next
+          end do
+       else
+          if(associated(f % array)) then
+             deallocate(f % array)
+          end if
+       end if
+
+   end subroutine mpas_deallocate_scratch_field5d_real!}}}
+
    subroutine mpas_deallocate_scratch_field1d_char(f, single_block_in)!{{{
        type (field1dChar), pointer :: f
        logical, intent(in), optional :: single_block_in
@@ -1073,6 +1251,60 @@
 
    end subroutine mpas_deallocate_field3d_real!}}}
 
+   subroutine mpas_deallocate_field4d_real(f)!{{{
+       type (field4dReal), pointer :: f
+       type (field4dReal), pointer :: f_cursor
+
+       f_cursor =&gt; f
+       do while(associated(f_cursor))
+         if(associated(f % next)) then
+           f =&gt; f % next
+         else
+           nullify(f)
+         end if
+
+         if(associated(f_cursor % ioinfo)) then
+           deallocate(f_cursor % ioinfo)
+         end if
+
+         if(associated(f_cursor % array)) then
+           deallocate(f_cursor % array)
+         end if
+
+         deallocate(f_cursor)
+
+         f_cursor =&gt; f
+       end do
+
+   end subroutine mpas_deallocate_field4d_real!}}}
+
+   subroutine mpas_deallocate_field5d_real(f)!{{{
+       type (field5dReal), pointer :: f
+       type (field5dReal), pointer :: f_cursor
+
+       f_cursor =&gt; f
+       do while(associated(f_cursor))
+         if(associated(f % next)) then
+           f =&gt; f % next
+         else
+           nullify(f)
+         end if
+
+         if(associated(f_cursor % ioinfo)) then
+           deallocate(f_cursor % ioinfo)
+         end if
+
+         if(associated(f_cursor % array)) then
+           deallocate(f_cursor % array)
+         end if
+
+         deallocate(f_cursor)
+
+         f_cursor =&gt; f
+       end do
+
+   end subroutine mpas_deallocate_field5d_real!}}}
+
    subroutine mpas_deallocate_field0d_char(f)!{{{
        type (field0dChar), pointer :: f
        type (field0dChar), pointer :: f_cursor

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_io.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_io.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_io.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -77,6 +77,7 @@
       module procedure MPAS_io_get_var_real2d
       module procedure MPAS_io_get_var_real3d
       module procedure MPAS_io_get_var_real4d
+      module procedure MPAS_io_get_var_real5d
       module procedure MPAS_io_get_var_char0d
    end interface MPAS_io_get_var
 
@@ -91,6 +92,7 @@
       module procedure MPAS_io_put_var_real2d
       module procedure MPAS_io_put_var_real3d
       module procedure MPAS_io_put_var_real4d
+      module procedure MPAS_io_put_var_real5d
       module procedure MPAS_io_put_var_char0d
    end interface MPAS_io_put_var
 
@@ -1146,7 +1148,7 @@
 
 
    subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &amp;
-                                                        realVal, realArray1d, realArray2d, realArray3d, realArray4d, &amp;
+                                                        realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, &amp;
                                                         charVal, ierr)
 
       implicit none
@@ -1163,6 +1165,7 @@
       real (kind=RKIND), dimension(:,:), intent(out), optional :: realArray2d
       real (kind=RKIND), dimension(:,:,:), intent(out), optional :: realArray3d
       real (kind=RKIND), dimension(:,:,:,:), intent(out), optional :: realArray4d
+      real (kind=RKIND), dimension(:,:,:,:,:), intent(out), optional :: realArray5d
       character (len=*), intent(out), optional :: charVal
       integer, intent(out), optional :: ierr
 
@@ -1263,6 +1266,10 @@
 !         write (0,*) '  value is real4'
          call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
                               realArray4d, pio_ierr)
+      else if (present(realArray5d)) then
+!         write (0,*) '  value is real5'
+         call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
+                              realArray5d, pio_ierr)
       else if (present(intArray1d)) then
 !         write (0,*) '  value is int1'
          call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
@@ -1492,6 +1499,26 @@
    end subroutine MPAS_io_get_var_real4d
 
 
+   subroutine MPAS_io_get_var_real5d(handle, fieldname, array, ierr)
+
+      implicit none
+
+      type (MPAS_IO_Handle_type), intent(inout) :: handle
+      character (len=*), intent(in) :: fieldname
+      real (kind=RKIND), dimension(:,:,:,:,:), intent(out) :: array
+      integer, intent(out), optional :: ierr
+
+      integer :: pio_ierr
+      type (fieldlist_type), pointer :: field_cursor
+
+!      write(0,*) 'Called MPAS_io_get_var_real5d()'
+      if (present(ierr)) ierr = MPAS_IO_NOERR
+
+      call MPAS_io_get_var_generic(handle, fieldname, realArray5d=array, ierr=ierr)
+
+   end subroutine MPAS_io_get_var_real5d
+
+
    subroutine MPAS_io_get_var_char0d(handle, fieldname, val, ierr)
 
       implicit none
@@ -1513,7 +1540,7 @@
 
 
    subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &amp;
-                                                        realVal, realArray1d, realArray2d, realArray3d, realArray4d, &amp;
+                                                        realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, &amp;
                                                         charVal, ierr)
 
       implicit none
@@ -1530,6 +1557,7 @@
       real (kind=RKIND), dimension(:,:), intent(in), optional :: realArray2d
       real (kind=RKIND), dimension(:,:,:), intent(in), optional :: realArray3d
       real (kind=RKIND), dimension(:,:,:,:), intent(in), optional :: realArray4d
+      real (kind=RKIND), dimension(:,:,:,:,:), intent(in), optional :: realArray5d
       character (len=*), intent(in), optional :: charVal
       integer, intent(out), optional :: ierr
 
@@ -1629,6 +1657,9 @@
       else if (present(realArray4d)) then
          call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
                                realArray4d, pio_ierr)
+      else if (present(realArray5d)) then
+         call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
+                               realArray5d, pio_ierr)
       else if (present(intArray1d)) then
          call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &amp;
                                intArray1d, pio_ierr)
@@ -1852,6 +1883,26 @@
    end subroutine MPAS_io_put_var_real4d
 
 
+   subroutine MPAS_io_put_var_real5d(handle, fieldname, array, ierr)
+
+      implicit none
+
+      type (MPAS_IO_Handle_type), intent(inout) :: handle
+      character (len=*), intent(in) :: fieldname
+      real (kind=RKIND), dimension(:,:,:,:,:), intent(in) :: array
+      integer, intent(out), optional :: ierr
+
+      integer :: pio_ierr
+      type (fieldlist_type), pointer :: field_cursor
+
+!      write(0,*) 'Called MPAS_io_put_var_real5d()'
+      if (present(ierr)) ierr = MPAS_IO_NOERR
+
+      call MPAS_io_put_var_generic(handle, fieldname, realArray5d=array, ierr=ierr)
+
+   end subroutine MPAS_io_put_var_real5d
+
+
    subroutine MPAS_io_put_var_char0d(handle, fieldname, val, ierr)
 
       implicit none

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_io_streams.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_io_streams.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_io_streams.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -19,6 +19,8 @@
       type (field1dReal), pointer :: real1dField =&gt; null()
       type (field2dReal), pointer :: real2dField =&gt; null()
       type (field3dReal), pointer :: real3dField =&gt; null()
+      type (field4dReal), pointer :: real4dField =&gt; null()
+      type (field5dReal), pointer :: real5dField =&gt; null()
       type (field0dChar), pointer :: char0dField =&gt; null()
       type (field1dChar), pointer :: char1dField =&gt; null()
       type (field_list_type), pointer :: next =&gt; null()
@@ -44,6 +46,8 @@
       module procedure MPAS_streamAddField_1dReal
       module procedure MPAS_streamAddField_2dReal
       module procedure MPAS_streamAddField_3dReal
+      module procedure MPAS_streamAddField_4dReal
+      module procedure MPAS_streamAddField_5dReal
       module procedure MPAS_streamAddField_0dChar
    end interface MPAS_streamAddField
 
@@ -82,8 +86,10 @@
                          FIELD_1D_REAL  =  6, &amp;
                          FIELD_2D_REAL  =  7, &amp;
                          FIELD_3D_REAL  =  8, &amp;
-                         FIELD_0D_CHAR  =  9, &amp;
-                         FIELD_1D_CHAR  =  10
+                         FIELD_4D_REAL  =  9, &amp;
+                         FIELD_5D_REAL  =  10, &amp;
+                         FIELD_0D_CHAR  =  11, &amp;
+                         FIELD_1D_CHAR  =  12
 
    private mergeArrays
 
@@ -996,6 +1002,208 @@
    end subroutine MPAS_streamAddField_3dReal
 
 
+   subroutine MPAS_streamAddField_4dReal(stream, field, ierr)
+
+      implicit none
+
+      type (MPAS_Stream_type), intent(inout) :: stream
+      type (field4DReal), intent(in), target :: field
+      integer, intent(out), optional :: ierr
+
+      integer :: io_err
+      integer :: i
+      integer :: idim
+      integer :: totalDimSize, globalDimSize
+      logical :: isDecomposed
+      integer :: ndims
+      type (field4dReal), pointer :: field_ptr
+      character (len=StrKIND), dimension(5) :: dimNames
+      character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+      integer, dimension(:), pointer :: indices
+      type (field_list_type), pointer :: field_list_cursor
+      type (field_list_type), pointer :: new_field_list_node
+      logical :: any_success
+      logical, dimension(:), pointer :: isAvailable
+
+      if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+      !
+      ! Sanity checks
+      !
+      if (.not. stream % isInitialized) then
+         if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+         return
+      end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+      ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+      ! 
+      ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+      !    and the total number of outer-indices owned by this task
+      ! 
+#include &quot;add_field_indices.inc&quot;
+
+      
+      any_success = .false.
+      if (field % isSuperArray) then
+!write(0,*) '^^^^^^^^^^^^^^^^^^^^^^^^^^^ we are adding a super-array'
+         allocate(isAvailable(size(field % constituentNames)))
+         isAvailable(:) = .false.
+         do i=1,size(field % constituentNames)
+            call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &amp;
+                                             field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &amp;
+                                             indices, io_err)
+            if (io_err == MPAS_STREAM_NOERR) then
+               isAvailable(i) = .true.
+               any_success = .true.
+            end if
+         end do
+      else
+         nullify(isAvailable)
+         call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &amp;
+                                          field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+         if (io_err == MPAS_STREAM_NOERR) then
+            any_success = .true.
+         end if
+      end if
+
+      deallocate(indices)
+      if (.not. any_success) then
+          if (present(ierr)) ierr = MPAS_IO_ERR
+          return
+      end if
+
+      if (field % isSuperArray) then
+         do i=1,size(field % constituentNames)
+            call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+         end do
+      else
+         call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+      end if
+
+
+      !
+      ! Set field pointer and type in fieldList
+      !
+      new_field_list_node =&gt; stream % fieldList
+      do while (associated(new_field_list_node % next))
+         new_field_list_node =&gt; new_field_list_node % next
+      end do
+      new_field_list_node % field_type = FIELD_4D_REAL
+      new_field_list_node % real4dField =&gt; field
+      new_field_list_node % isAvailable =&gt; isAvailable
+
+!write(0,*) '... done adding field'
+!write(0,*) 'DEBUGGING : Finished adding 4d real field '//trim(field % fieldName)
+
+   end subroutine MPAS_streamAddField_4dReal
+
+
+   subroutine MPAS_streamAddField_5dReal(stream, field, ierr)
+
+      implicit none
+
+      type (MPAS_Stream_type), intent(inout) :: stream
+      type (field5DReal), intent(in), target :: field
+      integer, intent(out), optional :: ierr
+
+      integer :: io_err
+      integer :: i
+      integer :: idim
+      integer :: totalDimSize, globalDimSize
+      logical :: isDecomposed
+      integer :: ndims
+      type (field5dReal), pointer :: field_ptr
+      character (len=StrKIND), dimension(5) :: dimNames
+      character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+      integer, dimension(:), pointer :: indices
+      type (field_list_type), pointer :: field_list_cursor
+      type (field_list_type), pointer :: new_field_list_node
+      logical :: any_success
+      logical, dimension(:), pointer :: isAvailable
+
+      if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+      !
+      ! Sanity checks
+      !
+      if (.not. stream % isInitialized) then
+         if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+         return
+      end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+      ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+      ! 
+      ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+      !    and the total number of outer-indices owned by this task
+      ! 
+#include &quot;add_field_indices.inc&quot;
+
+      
+      any_success = .false.
+      if (field % isSuperArray) then
+!write(0,*) '^^^^^^^^^^^^^^^^^^^^^^^^^^^ we are adding a super-array'
+         allocate(isAvailable(size(field % constituentNames)))
+         isAvailable(:) = .false.
+         do i=1,size(field % constituentNames)
+            call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &amp;
+                                             field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &amp;
+                                             indices, io_err)
+            if (io_err == MPAS_STREAM_NOERR) then
+               isAvailable(i) = .true.
+               any_success = .true.
+            end if
+         end do
+      else
+         nullify(isAvailable)
+         call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &amp;
+                                          field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+         if (io_err == MPAS_STREAM_NOERR) then
+            any_success = .true.
+         end if
+      end if
+
+      deallocate(indices)
+      if (.not. any_success) then
+          if (present(ierr)) ierr = MPAS_IO_ERR
+          return
+      end if
+
+      if (field % isSuperArray) then
+         do i=1,size(field % constituentNames)
+            call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+         end do
+      else
+         call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+      end if
+
+
+      !
+      ! Set field pointer and type in fieldList
+      !
+      new_field_list_node =&gt; stream % fieldList
+      do while (associated(new_field_list_node % next))
+         new_field_list_node =&gt; new_field_list_node % next
+      end do
+      new_field_list_node % field_type = FIELD_5D_REAL
+      new_field_list_node % real5dField =&gt; field
+      new_field_list_node % isAvailable =&gt; isAvailable
+
+!write(0,*) '... done adding field'
+!write(0,*) 'DEBUGGING : Finished adding 3d real field '//trim(field % fieldName)
+
+   end subroutine MPAS_streamAddField_5dReal
+
+
    subroutine MPAS_streamAddField_0dChar(stream, field, ierr)
 
       implicit none
@@ -1313,6 +1521,8 @@
       type (field1dReal), pointer :: field_1dreal_ptr
       type (field2dReal), pointer :: field_2dreal_ptr
       type (field3dReal), pointer :: field_3dreal_ptr
+      type (field4dReal), pointer :: field_4dreal_ptr
+      type (field5dReal), pointer :: field_5dreal_ptr
       type (field0dChar), pointer :: field_0dchar_ptr
       type (field1dChar), pointer :: field_1dchar_ptr
       type (field_list_type), pointer :: field_cursor
@@ -1324,6 +1534,8 @@
       real (kind=RKIND), dimension(:),     pointer :: real1d_temp
       real (kind=RKIND), dimension(:,:),   pointer :: real2d_temp
       real (kind=RKIND), dimension(:,:,:), pointer :: real3d_temp
+      real (kind=RKIND), dimension(:,:,:,:), pointer :: real4d_temp
+      real (kind=RKIND), dimension(:,:,:,:,:), pointer :: real5d_temp
 
       if (present(ierr)) ierr = MPAS_STREAM_NOERR
 
@@ -1876,7 +2088,185 @@
             else
                deallocate(real3d_temp)
             end if
+         else if (field_cursor % field_type == FIELD_4D_REAL) then
 
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real3dField % fieldName)
+!write(0,*) 'DEBUGGING : reading a 4d real array'
+            if (field_cursor % real4dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : reading a 4d real super-array'
+               ncons = size(field_cursor % real4dField % constituentNames)
+               allocate(real3d_temp(field_cursor % real4dField % dimSizes(2), &amp;
+                                    field_cursor % real4dField % dimSizes(3), &amp;
+                                    field_cursor % totalDimSize))
+            else
+               ncons = 1
+               allocate(real4d_temp(field_cursor % real4dField % dimSizes(1), &amp;
+                                    field_cursor % real4dField % dimSizes(2), &amp;
+                                    field_cursor % real4dField % dimSizes(3), &amp;
+                                    field_cursor % totalDimSize))
+            end if
+
+            do j=1,ncons
+               if (field_cursor % real4dField % isSuperArray) then
+                  if (.not. field_cursor % isAvailable(j)) cycle
+!write(0,*) 'DEBUGGING : calling get_var for a constitutent'
+                  call MPAS_io_get_var(stream % fileHandle, field_cursor % real4dField % constituentNames(j), real3d_temp, io_err)
+               else
+                  call MPAS_io_get_var(stream % fileHandle, field_cursor % real4dField % fieldName, real4d_temp, io_err)
+               end if
+               call MPAS_io_err_mesg(io_err, .false.)
+               if (io_err /= MPAS_IO_NOERR) then
+                   if (present(ierr)) ierr = MPAS_IO_ERR
+                   if (field_cursor % real4dField % isSuperArray) then
+                      deallocate(real3d_temp)
+                   else
+                      deallocate(real4d_temp)
+                   end if
+                   return
+               end if
+   
+               if (field_cursor % isDecomposed) then
+                  ! Distribute field to multiple blocks
+                  field_4dreal_ptr =&gt; field_cursor % real4dField
+                  i = 1
+                  do while (associated(field_4dreal_ptr))
+                     if (trim(field_4dreal_ptr % dimNames(4)) == 'nCells') then
+                        ownedSize = field_4dreal_ptr % block % mesh % nCellsSolve
+                     else if (trim(field_4dreal_ptr % dimNames(4)) == 'nEdges') then
+                        ownedSize = field_4dreal_ptr % block % mesh % nEdgesSolve
+                     else if (trim(field_4dreal_ptr % dimNames(4)) == 'nVertices') then
+                        ownedSize = field_4dreal_ptr % block % mesh % nVerticesSolve
+                     else
+                        ownedSize = field_4dreal_ptr % dimSizes(4)
+                     end if
+
+                     if (field_cursor % real4dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : copying the temporary array'
+                        field_4dreal_ptr % array(j, :,:,1:ownedSize) = real3d_temp(:,:,i:i+ownedSize-1)
+                     else
+                        field_4dreal_ptr % array(:,:,:,1:ownedSize) = real4d_temp(:,:,:,i:i+ownedSize-1)
+                     end if
+                     i = i + ownedSize
+                     field_4dreal_ptr =&gt; field_4dreal_ptr % next
+                  end do
+
+               else
+   
+                  if (field_cursor % real3dField % isSuperArray) then
+                     call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real3d_temp), real3d_temp(:,1,1))
+                     field_4dreal_ptr =&gt; field_cursor % real4dField
+                     do while (associated(field_4dreal_ptr))
+                        field_4dreal_ptr % array(j,:,:,:) = real3d_temp(:,:,:)
+                        field_4dreal_ptr =&gt; field_4dreal_ptr % next
+                     end do
+                  else
+                     call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1))
+                     field_4dreal_ptr =&gt; field_cursor % real4dField
+                     do while (associated(field_4dreal_ptr))
+                        field_4dreal_ptr % array(:,:,:,:) = real4d_temp(:,:,:,:)
+                        field_4dreal_ptr =&gt; field_4dreal_ptr % next
+                     end do
+                  end if
+               end if
+            end do
+
+            if (field_cursor % real4dField % isSuperArray) then
+               deallocate(real3d_temp)
+            else
+               deallocate(real4d_temp)
+            end if
+
+         else if (field_cursor % field_type == FIELD_5D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real3dField % fieldName)
+!write(0,*) 'DEBUGGING : reading a 4d real array'
+            if (field_cursor % real5dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : reading a 4d real super-array'
+               ncons = size(field_cursor % real5dField % constituentNames)
+               allocate(real4d_temp(field_cursor % real5dField % dimSizes(2), &amp;
+                                    field_cursor % real5dField % dimSizes(3), &amp;
+                                    field_cursor % real5dField % dimSizes(4), &amp;
+                                    field_cursor % totalDimSize))
+            else
+               ncons = 1
+               allocate(real5d_temp(field_cursor % real5dField % dimSizes(1), &amp;
+                                    field_cursor % real5dField % dimSizes(2), &amp;
+                                    field_cursor % real5dField % dimSizes(3), &amp;
+                                    field_cursor % real5dField % dimSizes(4), &amp;
+                                    field_cursor % totalDimSize))
+            end if
+
+            do j=1,ncons
+               if (field_cursor % real5dField % isSuperArray) then
+                  if (.not. field_cursor % isAvailable(j)) cycle
+!write(0,*) 'DEBUGGING : calling get_var for a constitutent'
+                  call MPAS_io_get_var(stream % fileHandle, field_cursor % real5dField % constituentNames(j), real4d_temp, io_err)
+               else
+                  call MPAS_io_get_var(stream % fileHandle, field_cursor % real5dField % fieldName, real5d_temp, io_err)
+               end if
+               call MPAS_io_err_mesg(io_err, .false.)
+               if (io_err /= MPAS_IO_NOERR) then
+                   if (present(ierr)) ierr = MPAS_IO_ERR
+                   if (field_cursor % real5dField % isSuperArray) then
+                      deallocate(real4d_temp)
+                   else
+                      deallocate(real5d_temp)
+                   end if
+                   return
+               end if
+   
+               if (field_cursor % isDecomposed) then
+                  ! Distribute field to multiple blocks
+                  field_5dreal_ptr =&gt; field_cursor % real5dField
+                  i = 1
+                  do while (associated(field_5dreal_ptr))
+                     if (trim(field_5dreal_ptr % dimNames(5)) == 'nCells') then
+                        ownedSize = field_5dreal_ptr % block % mesh % nCellsSolve
+                     else if (trim(field_5dreal_ptr % dimNames(5)) == 'nEdges') then
+                        ownedSize = field_5dreal_ptr % block % mesh % nEdgesSolve
+                     else if (trim(field_5dreal_ptr % dimNames(5)) == 'nVertices') then
+                        ownedSize = field_5dreal_ptr % block % mesh % nVerticesSolve
+                     else
+                        ownedSize = field_5dreal_ptr % dimSizes(5)
+                     end if
+
+                     if (field_cursor % real5dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : copying the temporary array'
+                        field_5dreal_ptr % array(j,:,:,:,1:ownedSize) = real4d_temp(:,:,:,i:i+ownedSize-1)
+                     else
+                        field_5dreal_ptr % array(:,:,:,:,1:ownedSize) = real5d_temp(:,:,:,:,i:i+ownedSize-1)
+                     end if
+                     i = i + ownedSize
+                     field_5dreal_ptr =&gt; field_5dreal_ptr % next
+                  end do
+
+               else
+   
+                  if (field_cursor % real5dField % isSuperArray) then
+                     call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1))
+                     field_5dreal_ptr =&gt; field_cursor % real5dField
+                     do while (associated(field_5dreal_ptr))
+                        field_5dreal_ptr % array(j,:,:,:,:) = real4d_temp(:,:,:,:)
+                        field_5dreal_ptr =&gt; field_5dreal_ptr % next
+                     end do
+                  else
+                     call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real5d_temp), real5d_temp(:,1,1,1,1))
+                     field_5dreal_ptr =&gt; field_cursor % real5dField
+                     do while (associated(field_5dreal_ptr))
+                        field_5dreal_ptr % array(:,:,:,:,:) = real5d_temp(:,:,:,:,:)
+                        field_5dreal_ptr =&gt; field_5dreal_ptr % next
+                     end do
+                  end if
+               end if
+            end do
+
+            if (field_cursor % real5dField % isSuperArray) then
+               deallocate(real4d_temp)
+            else
+               deallocate(real5d_temp)
+            end if
+
+
          else if (field_cursor % field_type == FIELD_0D_CHAR) then
 
 !write(0,*) 'Reading in field '//trim(field_cursor % char0dField % fieldName)

Modified: branches/ocean_projects/gm_implement/src/framework/mpas_timer.F
===================================================================
--- branches/ocean_projects/gm_implement/src/framework/mpas_timer.F        2013-02-15 18:04:59 UTC (rev 2482)
+++ branches/ocean_projects/gm_implement/src/framework/mpas_timer.F        2013-02-19 17:55:19 UTC (rev 2483)
@@ -269,7 +269,7 @@
               timer_ptr%avg_time = 0.0d0
               percent = 0.0d0
             else
-              timer_ptr%avg_time = timer_ptr%avg_time/timer_ptr%calls
+              timer_ptr%avg_time = timer_ptr%total_time/timer_ptr%calls
               percent = timer_ptr%total_time/total_ptr%total_time
             endif
 

</font>
</pre>