<p><b>duda</b> 2012-02-27 15:33:23 -0700 (Mon, 27 Feb 2012)</p><p>BRANCH COMMIT<br>
<br>
Copy updated Makefile from trunk r1536.<br>
<br>
Also, merge ocean core changes from trunk.<br>
<br>
<br>
M Makefile<br>
M src/core_ocean/...<br>
M namelist.input.ocean<br>
</p><hr noshade><pre><font color="gray">Modified: branches/atmos_physics/Makefile
===================================================================
--- branches/atmos_physics/Makefile        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/Makefile        2012-02-27 22:33:23 UTC (rev 1538)
@@ -14,234 +14,185 @@
dummy:
-        @( echo "try one of:"; \
-        echo " make xlf"; \
-        echo " make pgi"; \
-        echo " make ifort"; \
-        echo " make gfortran"; \
-        )
+        ( make error )
-xlf-serial:
+xlf:
        ( make all \
-        "FC = xlf90" \
-        "CC = xlc" \
-        "SFC = xlf90" \
-        "SCC = xlc" \
-        "FFLAGS = -qrealsize=8 -g -C " \
-        "CFLAGS = -g" \
-        "LDFLAGS = -g -C" \
+        "FC_PARALLEL = mpxlf90" \
+        "CC_PARALLEL = mpcc" \
+        "FC_SERIAL = xlf90" \
+        "CC_SERIAL = xlc" \
+        "FFLAGS_OPT = -O3 -qrealsize=8" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
        "CPPFLAGS = $(MODEL_FORMULATION) $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-
-xlf:
-        ( make all \
-        "FC = mpxlf90" \
-        "CC = mpcc" \
-        "SFC = xlf90" \
-        "SCC = xlc" \
-        "FFLAGS = -O3 -qrealsize=8" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
-        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
ftn:
        ( make all \
-        "FC = ftn" \
-        "CC = cc" \
-        "SFC = ftn" \
-        "SCC = cc" \
-        "FFLAGS = -i4 -r8 -gopt -O2 -Mvect=nosse -Kieee -convert big_endian" \
-        "CFLAGS = -fast" \
-        "LDFLAGS = " \
+        "FC_PARALLEL = ftn" \
+        "CC_PARALLEL = cc" \
+        "FC_SERIAL = ftn" \
+        "CC_SERIAL = cc" \
+        "FFLAGS_OPT = -i4 -r8 -gopt -O2 -Mvect=nosse -Kieee -convert big_endian" \
+        "CFLAGS_OPT = -fast" \
+        "LDFLAGS_OPT = " \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi:
        ( make all \
-        "FC = mpif90" \
-        "CC = mpicc" \
-        "SFC = pgf90" \
-        "SCC = pgcc" \
-        "FFLAGS = -r8 -O3 -byteswapio -Mfree" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = mpif90" \
+        "CC_PARALLEL = mpicc" \
+        "FC_SERIAL = pgf90" \
+        "CC_SERIAL = pgcc" \
+        "FFLAGS_OPT = -r8 -O3 -byteswapio -Mfree" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
+        "FFLAGS_DEBUG = -r8 -O0 -g -Mbounds -Mchkptr -byteswapio -Mfree" \
+        "CFLAGS_DEBUG = -O0 -g" \
+        "LDFLAGS_DEBUG = -O0 -g -Mbounds -Mchkptr" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi-nersc:
        ( make all \
-        "FC = ftn" \
-        "CC = cc" \
-        "SFC = ftn" \
-        "SCC = cc" \
-        "FFLAGS = -r8 -O3 -byteswapio -Mfree" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = ftn" \
+        "CC_PARALLEL = cc" \
+        "FC_SERIAL = ftn" \
+        "CC_SERIAL = cc" \
+        "FFLAGS_OPT = -r8 -O3 -byteswapio -Mfree" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
pgi-llnl:
        ( make all \
-        "FC = mpipgf90" \
-        "CC = pgcc" \
-        "SFC = pgf90" \
-        "SCC = pgcc" \
-        "FFLAGS = -i4 -r8 -g -O2 -byteswapio" \
-        "CFLAGS = -fast" \
-        "LDFLAGS = " \
+        "FC_PARALLEL = mpipgf90" \
+        "CC_PARALLEL = pgcc" \
+        "FC_SERIAL = pgf90" \
+        "CC_SERIAL = pgcc" \
+        "FFLAGS_OPT = -i4 -r8 -g -O2 -byteswapio" \
+        "CFLAGS_OPT = -fast" \
+        "LDFLAGS_OPT = " \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-
-pgi-serial:
-        ( make all \
-        "FC = pgf90" \
-        "CC = pgcc" \
-        "SFC = pgf90" \
-        "SCC = pgcc" \
-        "FFLAGS = -r8 -O0 -g -Mbounds -Mchkptr -byteswapio -Mfree" \
-        "CFLAGS = -O0 -g" \
-        "LDFLAGS = -O0 -g -Mbounds -Mchkptr" \
-        "CORE = $(CORE)" \
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-ifort-serial:
+ifort:
        ( make all \
-        "FC = ifort" \
-        "CC = gcc" \
-        "SFC = ifort" \
-        "SCC = gcc" \
-        "FFLAGS = -real-size 64 -O3 -convert big_endian -FR" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = mpif90" \
+        "CC_PARALLEL = gcc" \
+        "FC_SERIAL = ifort" \
+        "CC_SERIAL = gcc" \
+        "FFLAGS_OPT = -real-size 64 -O3 -convert big_endian -FR" \
+        "CFLAGS_OPT = -O3 -m64" \
+        "LDFLAGS_OPT = -O3" \
+        "FFLAGS_DEBUG = -real-size 64 -g -convert big_endian -FR -CU -CB -check all" \
+        "CFLAGS_DEBUG = -g -m64" \
+        "LDFLAGS_DEBUG = -g" \
        "CORE = $(CORE)" \
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-ifort-papi:
-        ( make all \
-        "FC = mpif90" \
-        "CC = gcc" \
-        "SFC = ifort" \
-        "SCC = gcc" \
-        "FFLAGS = -real-size 64 -O3 -convert big_endian -FR" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3" \
-        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_PAPI -D_MPI -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" \
-        "PAPILIBS = -L$(PAPI)/lib -lpapi" )
-
-ifort-papi-serial:
-        ( make all \
-        "FC = ifort" \
-        "CC = gcc" \
-        "SFC = ifort" \
-        "SCC = gcc" \
-        "FFLAGS = -real-size 64 -O3 -convert big_endian -FR" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3" \
-        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_PAPI -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" \
-        "PAPILIBS = -L$(PAPI)/lib -lpapi" )
-
-ifort:
-        ( make all \
-        "FC = mpif90" \
-        "CC = gcc" \
-        "SFC = ifort" \
-        "SCC = gcc" \
-        "FFLAGS = -real-size 64 -O3 -convert big_endian -FR" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3" \
-        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-
gfortran:
        ( make all \
-        "FC = mpif90" \
-        "CC = mpicc" \
-        "SFC = gfortran" \
-        "SCC = gcc" \
-        "FFLAGS = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3 -m64" \
+        "FC_PARALLEL = mpif90" \
+        "CC_PARALLEL = mpicc" \
+        "FC_SERIAL = gfortran" \
+        "CC_SERIAL = gcc" \
+        "FFLAGS_OPT = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form" \
+        "CFLAGS_OPT = -O3 -m64" \
+        "LDFLAGS_OPT = -O3 -m64" \
+        "FFLAGS_DEBUG = -g -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form -fbounds-check" \
+        "CFLAGS_DEBUG = -g -m64" \
+        "LDFLAGS_DEBUG = -g -m64" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-
-gfortran-serial:
-        ( make all \
-        "FC = gfortran" \
-        "CC = gcc" \
-        "SFC = gfortran" \
-        "SCC = gcc" \
-        "FFLAGS = -O3 -m64 -ffree-line-length-none -fdefault-real-8 -fconvert=big-endian -ffree-form" \
-        "CFLAGS = -O3 -m64" \
-        "LDFLAGS = -O3 -m64" \
-        "CORE = $(CORE)" \
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
g95:
        ( make all \
-        "FC = mpif90" \
-        "CC = mpicc" \
-        "SFC = g95" \
-        "SCC = gcc" \
-        "FFLAGS = -O3 -ffree-line-length-huge -r8 -fendian=big -ffree-form" \
-        "CFLAGS = -O3 -m32" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = mpif90" \
+        "CC_PARALLEL = mpicc" \
+        "FC_SERIAL = g95" \
+        "CC_SERIAL = gcc" \
+        "FFLAGS_OPT = -O3 -ffree-line-length-huge -r8 -fendian=big" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DDOUBLEUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-g95-serial:
-        ( make all \
-        "FC = g95" \
-        "CC = gcc" \
-        "SFC = g95" \
-        "SCC = gcc" \
-        "FFLAGS = -O3 -ffree-line-length-huge -r8 -fendian=big -ffree-form" \
-        "CFLAGS = -O3 -m32" \
-        "LDFLAGS = -O3" \
-        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -DDOUBLEUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-
pathscale-nersc:
        ( make all \
-        "FC = ftn" \
-        "CC = cc" \
-        "SFC = ftn" \
-        "SCC = cc" \
-        "FFLAGS = -r8 -O3 -freeform -extend-source" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = ftn" \
+        "CC_PARALLEL = cc" \
+        "FC_SERIAL = ftn" \
+        "CC_SERIAL = cc" \
+        "FFLAGS_OPT = -r8 -O3 -freeform -extend-source" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
cray-nersc:
        ( make all \
-        "FC = ftn" \
-        "CC = cc" \
-        "SFC = ftn" \
-        "SCC = cc" \
-        "FFLAGS = -default64 -O3 -f free" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = ftn" \
+        "CC_PARALLEL = cc" \
+        "FC_SERIAL = ftn" \
+        "CC_SERIAL = cc" \
+        "FFLAGS_OPT = -default64 -O3 -f free" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
intel-nersc:
        ( make all \
-        "FC = ftn" \
-        "CC = cc" \
-        "SFC = ftn" \
-        "SCC = cc" \
-        "FFLAGS = -real-size 64 -O3 -FR" \
-        "CFLAGS = -O3" \
-        "LDFLAGS = -O3" \
+        "FC_PARALLEL = ftn" \
+        "CC_PARALLEL = cc" \
+        "FC_SERIAL = ftn" \
+        "CC_SERIAL = cc" \
+        "FFLAGS_OPT = -real-size 64 -O3 -FR" \
+        "CFLAGS_OPT = -O3" \
+        "LDFLAGS_OPT = -O3" \
        "CORE = $(CORE)" \
-        "CPPFLAGS = $(MODEL_FORMULATION) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+        "DEBUG = $(DEBUG)" \
+        "SERIAL = $(SERIAL)" \
+        "USE_PAPI = $(USE_PAPI)" \
+        "CPPFLAGS = $(MODEL_FORMULATION) -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
-CPPINCLUDES = -I../inc -I$(NETCDF)/include -I$(PAPI)/include
-FCINCLUDES = -I../inc -I$(NETCDF)/include -I$(PAPI)/include
-LIBS = -L$(NETCDF)/lib -lnetcdf $(PAPILIBS)
+CPPINCLUDES = -I../inc -I$(NETCDF)/include
+FCINCLUDES = -I../inc -I$(NETCDF)/include
+LIBS = -L$(NETCDF)/lib -lnetcdf
RM = rm -f
CPP = cpp -C -P -traditional
@@ -268,34 +219,109 @@
ifdef CORE
+ifeq "$(DEBUG)" "true"
+
+ifndef FFLAGS_DEBUG
+        FFLAGS=$(FFLAGS_OPT)
+        CFLAGS=$(CFLAGS_OPT)
+        LDFLAGS=$(LDFLAGS_OPT)
+        DEBUG_MESSAGE="Debug flags are not defined for this compile group. Defaulting to Optimized flags"
+else # FFLAGS_DEBUG IF
+        FFLAGS=$(FFLAGS_DEBUG)
+        CFLAGS=$(CFLAGS_DEBUG)
+        LDFLAGS=$(LDFLAGS_DEBUG)
+        DEBUG_MESSAGE="Debugging is on."
+endif # FFLAGS_DEBUG IF
+
+else # DEBUG IF
+        FFLAGS=$(FFLAGS_OPT)
+        CFLAGS=$(CFLAGS_OPT)
+        LDFLAGS=$(LDFLAGS_OPT)
+        DEBUG_MESSAGE="Debugging is off."
+endif # DEBUG IF
+
+ifeq "$(SERIAL)" "true"
+        FC=$(FC_SERIAL)
+        CC=$(CC_SERIAL)
+        SFC=$(FC_SERIAL)
+        SCC=$(CC_SERIAL)
+        SERIAL_MESSAGE="Serial version is on."
+else # SERIAL IF
+        FC=$(FC_PARALLEL)
+        CC=$(CC_PARALLEL)
+        SFC=$(FC_SERIAL)
+        SCC=$(CC_SERIAL)
+        CPPFLAGS += -D_MPI
+        SERIAL_MESSAGE="Parallel version is on."
+endif # SERIAL IF
+
+ifeq "$(USE_PAPI)" "true"
+        CPPINCLUDES += -I$(PAPI)/include -D_PAPI
+        FCINCLUDES += -I$(PAPI)/include
+        LIBS += -L$(PAPI)/lib -lpapi
+        PAPI_MESSAGE="Papi libraries are on."
+else # USE_PAPI IF
+        PAPI_MESSAGE="Papi libraries are off."
+endif # USE_PAPI IF
+
all: mpas_main
mpas_main:
        cd src; make FC="$(FC)" \
- CC="$(CC)" \
- CFLAGS="$(CFLAGS)" \
- FFLAGS="$(FFLAGS)" \
- LDFLAGS="$(LDFLAGS)" \
- RM="$(RM)" \
- CPP="$(CPP)" \
- CPPFLAGS="$(CPPFLAGS)" \
- LIBS="$(LIBS)" \
- CPPINCLUDES="$(CPPINCLUDES)" \
- FCINCLUDES="$(FCINCLUDES)" \
- CORE="$(CORE)"
+ CC="$(CC)" \
+ SFC="$(SFC)" \
+ SCC="$(SCC)" \
+ CFLAGS="$(CFLAGS)" \
+ FFLAGS="$(FFLAGS)" \
+ LDFLAGS="$(LDFLAGS)" \
+ RM="$(RM)" \
+ CPP="$(CPP)" \
+ CPPFLAGS="$(CPPFLAGS)" \
+ LIBS="$(LIBS)" \
+ CPPINCLUDES="$(CPPINCLUDES)" \
+ FCINCLUDES="$(FCINCLUDES)" \
+ CORE="$(CORE)"
        if [ ! -e $(CORE)_model.exe ]; then ln -s src/$(CORE)_model.exe .; fi
-
+        @echo ""
+        @echo $(DEBUG_MESSAGE)
+        @echo $(SERIAL_MESSAGE)
+        @echo $(PAPI_MESSAGE)
clean:
        cd src; make clean RM="$(RM)" CORE="$(CORE)"
        $(RM) $(CORE)_model.exe
+error: errmsg
-else
+else # CORE IF
-all: errmsg
+all: error
clean: errmsg
-errmsg:
+error: errmsg
        @echo "************ ERROR ************"
        @echo "No CORE specified. Quitting."
        @echo "************ ERROR ************"
+        @echo ""
-endif
+endif # CORE IF
+
+errmsg:
+        @echo ""
+        @echo "Usage: make target CORE=[core] [options]"
+        @echo ""
+        @echo "Example targets:"
+        @echo " ifort"
+        @echo " gfortran"
+        @echo " xlf"
+        @echo " pgi"
+        @echo ""
+        @echo "Availabe Cores:"
+        @cd src; ls -d core_* | grep ".*" | sed "s/core_/ /g"
+        @echo ""
+        @echo "Available Options:"
+        @echo " SERIAL=true - builds serial version. Default is parallel version."
+        @echo " DEBUG=true - builds debug version. Default is optimized version."
+        @echo " USE_PAPI=true - builds version using PAPI for timers and hardware counters. Default is off."
+        @echo ""
+        @echo "Ensure that NETCDF (and PAPI if USE_PAPI=true) are environment variables"
+        @echo "that point to the absolute paths for the libraries."
+        @echo ""
+
Modified: branches/atmos_physics/namelist.input.ocean
===================================================================
--- branches/atmos_physics/namelist.input.ocean        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/namelist.input.ocean        2012-02-27 22:33:23 UTC (rev 1538)
@@ -1,8 +1,8 @@
&sw_model
config_test_case = 0
- config_time_integration = 'split_explicit'
+ config_time_integration = 'RK4'
config_rk_filter_btr_mode = .false.
- config_dt = 10.0
+ config_dt = 100.0
config_start_time = '0000-01-01_00:00:00'
config_run_duration = '2000_00:00:00'
config_stats_interval = 1920
@@ -20,6 +20,7 @@
/
&grid
config_vert_grid_type = 'zlevel'
+ config_pressure_type = 'pressure'
config_rho0 = 1000
/
&split_explicit_ts
@@ -31,12 +32,7 @@
config_n_btr_cor_iter = 2
config_u_correction = .true.
config_filter_btr_mode = .false.
- config_btr_mom_decay = .false.
- config_btr_mom_decay_time = 3600.0
- config_btr_mom_eddy_visc2 = 0.0
config_btr_subcycle_loop_factor = 2
- config_SSH_from = 'avg_flux'
- config_new_btr_variables_from = 'btr_avg'
config_btr_gam1_uWt1 = 0.5
config_btr_gam2_SSHWt1 = 1.0
config_btr_gam3_uWt2 = 1.0
@@ -46,7 +42,7 @@
config_h_mom_eddy_visc2 = 1.0e5
config_h_mom_eddy_visc4 = 0.0
config_visc_vorticity_term = .true.
- config_h_tracer_eddy_diff2 = 1.0e4
+ config_h_tracer_eddy_diff2 = 1.0e5
config_h_tracer_eddy_diff4 = 0.0
/
&vmix
@@ -58,11 +54,11 @@
config_bottom_drag_coeff = 1.0e-3
/
&vmix_const
- config_vert_visc = 2.5e-5
- config_vert_diff = 2.5e-5
+ config_vert_visc = 1.0e-5
+ config_vert_diff = 1.0e-5
/
&vmix_rich
- config_bkrd_vert_visc = 1.0e-4
+ config_bkrd_vert_visc = 1.0e-5
config_bkrd_vert_diff = 1.0e-5
config_rich_mix = 0.005
/
Modified: branches/atmos_physics/src/core_ocean/Registry
===================================================================
--- branches/atmos_physics/src/core_ocean/Registry        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/Registry        2012-02-27 22:33:23 UTC (rev 1538)
@@ -10,6 +10,7 @@
namelist character sw_model config_stop_time none
namelist character sw_model config_run_duration none
namelist integer sw_model config_stats_interval 100
+namelist logical sw_model config_initial_stats false
namelist character io config_input_name grid.nc
namelist character io config_output_name output.nc
namelist character io config_restart_name restart.nc
@@ -19,21 +20,17 @@
namelist logical restart config_do_restart false
namelist character restart config_restart_interval none
namelist character grid config_vert_grid_type isopycnal
+namelist character grid config_pressure_type pressure
namelist real grid config_rho0 1028
namelist integer split_explicit_ts config_n_ts_iter 2
-namelist integer split_explicit_ts config_n_bcl_iter_beg 4
-namelist integer split_explicit_ts config_n_bcl_iter_mid 4
-namelist integer split_explicit_ts config_n_bcl_iter_end 4
-namelist integer split_explicit_ts config_n_btr_subcycles 10
-namelist integer split_explicit_ts config_n_btr_cor_iter 1
+namelist integer split_explicit_ts config_n_bcl_iter_beg 2
+namelist integer split_explicit_ts config_n_bcl_iter_mid 2
+namelist integer split_explicit_ts config_n_bcl_iter_end 2
+namelist integer split_explicit_ts config_n_btr_subcycles 20
+namelist integer split_explicit_ts config_n_btr_cor_iter 2
namelist logical split_explicit_ts config_u_correction true
namelist logical split_explicit_ts config_filter_btr_mode false
-namelist logical split_explicit_ts config_btr_mom_decay false
-namelist real split_explicit_ts config_btr_mom_decay_time 3600.0
-namelist real split_explicit_ts config_btr_mom_eddy_visc2 0.0
namelist integer split_explicit_ts config_btr_subcycle_loop_factor 2
-namelist character split_explicit_ts config_SSH_from avg_flux
-namelist character split_explicit_ts config_new_btr_variables_from btr_avg
namelist real split_explicit_ts config_btr_gam1_uWt1 0.5
namelist real split_explicit_ts config_btr_gam2_SSHWt1 1.0
namelist real split_explicit_ts config_btr_gam3_uWt2 1.0
@@ -171,24 +168,24 @@
var persistent integer maxLevelEdgeBot ( nEdges ) 0 - maxLevelEdgeBot mesh - -
var persistent integer maxLevelVertexTop ( nVertices ) 0 - maxLevelVertexTop mesh - -
var persistent integer maxLevelVertexBot ( nVertices ) 0 - maxLevelVertexBot mesh - -
+var persistent real referenceBottomDepth ( nVertLevels ) 0 iro referenceBottomDepth mesh - -
+var persistent real referenceBottomDepthTopOfCell ( nVertLevelsP1 ) 0 - referenceBottomDepthTopOfCell mesh - -
var persistent real hZLevel ( nVertLevels ) 0 iro hZLevel mesh - -
-var persistent real zMidZLevel ( nVertLevels ) 0 - zMidZLevel mesh - -
-var persistent real zTopZLevel ( nVertLevelsP1 ) 0 - zTopZLevel mesh - -
-var persistent real hMeanTopZLevel ( nVertLevels ) 0 - hMeanTopZLevel mesh - -
-var persistent real hRatioZLevelK ( nVertLevels ) 0 - hRatioZLevelK mesh - -
-var persistent real hRatioZLevelKm1 ( nVertLevels ) 0 - hRatioZLevelKm1 mesh - -
% Boundary conditions: read from input, saved in restart and written to output
var persistent integer boundaryEdge ( nVertLevels nEdges ) 0 iro boundaryEdge mesh - -
var persistent integer boundaryVertex ( nVertLevels nVertices ) 0 iro boundaryVertex mesh - -
var persistent integer boundaryCell ( nVertLevels nCells ) 0 iro boundaryCell mesh - -
+var persistent integer edgeMask ( nVertLevels nEdges ) 0 o edgeMask mesh - -
+var persistent integer vertexMask ( nVertLevels nVertices ) 0 o vertexMask mesh - -
+var persistent integer cellMask ( nVertLevels nCells ) 0 o cellMask mesh - -
var persistent real u_src ( nVertLevels nEdges ) 0 ir u_src mesh - -
var persistent real temperatureRestore ( nCells ) 0 ir temperatureRestore mesh - -
var persistent real salinityRestore ( nCells ) 0 ir salinityRestore mesh - -
% Prognostic variables: read from input, saved in restart, and written to output
var persistent real u ( nVertLevels nEdges Time ) 2 ir u state - -
-var persistent real h ( nVertLevels nCells Time ) 2 ir h state - -
+var persistent real h ( nVertLevels nCells Time ) 2 iro h state - -
var persistent real rho ( nVertLevels nCells Time ) 2 iro rho state - -
var persistent real temperature ( nVertLevels nCells Time ) 2 iro temperature state tracers dynamics
var persistent real salinity ( nVertLevels nCells Time ) 2 iro salinity state tracers dynamics
@@ -210,12 +207,9 @@
var persistent real FBtr ( nEdges Time ) 2 - FBtr state - -
var persistent real GBtrForcing ( nEdges Time ) 2 - GBtrForcing state - -
var persistent real uBcl ( nVertLevels nEdges Time ) 2 - uBcl state - -
-var persistent real circulationBtr ( nVertices Time ) 2 - circulationBtr state - -
-var persistent real divergenceBtr ( nCells Time ) 2 - divergenceBtr state - -
-var persistent real vorticityBtr ( nVertices Time ) 2 - vorticityBtr state - -
-var persistent real u_diffusionBtr ( nEdges Time ) 2 - u_diffusionBtr state - -
% Diagnostic fields: only written to output
+var persistent real zMid ( nVertLevels nCells Time ) 2 io zMid state - -
var persistent real v ( nVertLevels nEdges Time ) 2 - v state - -
var persistent real divergence ( nVertLevels nCells Time ) 2 o divergence state - -
var persistent real vorticity ( nVertLevels nVertices Time ) 2 o vorticity state - -
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_advection.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -9,7 +9,7 @@
contains
- subroutine ocn_initialize_advection_rk( grid )
+ subroutine ocn_initialize_advection_rk( grid )!{{{
!
! compute the cell coefficients for the polynomial fit.
@@ -382,7 +382,7 @@
! end do
! stop
- end subroutine ocn_initialize_advection_rk
+ end subroutine ocn_initialize_advection_rk!}}}
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -391,7 +391,7 @@
! Computes the angle between arcs AB and AC, given points A, B, and C
! Equation numbers w.r.t. http://mathworld.wolfram.com/SphericalTrigonometry.html
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- real (kind=RKIND) function sphere_angle(ax, ay, az, bx, by, bz, cx, cy, cz)
+ real (kind=RKIND) function sphere_angle(ax, ay, az, bx, by, bz, cx, cy, cz)!{{{
implicit none
@@ -437,7 +437,7 @@
sphere_angle = -2.0 * asin(max(min(sin_angle,1.0_RKIND),-1.0_RKIND))
end if
- end function sphere_angle
+ end function sphere_angle!}}}
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -446,7 +446,7 @@
! Computes the angle between vectors AB and AC, given points A, B, and C, and
! a vector (u,v,w) normal to the plane.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- real (kind=RKIND) function plane_angle(ax, ay, az, bx, by, bz, cx, cy, cz, u, v, w)
+ real (kind=RKIND) function plane_angle(ax, ay, az, bx, by, bz, cx, cy, cz, u, v, w)!{{{
implicit none
@@ -486,7 +486,7 @@
plane_angle = -acos(max(min(cos_angle,1.0_RKIND),-1.0_RKIND))
end if
- end function plane_angle
+ end function plane_angle!}}}
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -496,7 +496,7 @@
! B=(bx, by, bz). It is assumed that both A and B lie on the surface of the
! same sphere centered at the origin.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- real (kind=RKIND) function arc_length(ax, ay, az, bx, by, bz)
+ real (kind=RKIND) function arc_length(ax, ay, az, bx, by, bz)!{{{
implicit none
@@ -519,7 +519,7 @@
! arc_length = sqrt(r) * 2.0 * asin(c/(2.0*r))
arc_length = r * 2.0 * asin(c/(2.0*r))
- end function arc_length
+ end function arc_length!}}}
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -529,7 +529,7 @@
! A=(ax, ay, az) to B=(bx, by, bz). It is assumed that A and B lie on the
! surface of a sphere centered at the origin.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine ocn_arc_bisect(ax, ay, az, bx, by, bz, cx, cy, cz)
+ subroutine ocn_arc_bisect(ax, ay, az, bx, by, bz, cx, cy, cz)!{{{
implicit none
@@ -554,10 +554,10 @@
cz = r * cz / d
end if
- end subroutine ocn_arc_bisect
+ end subroutine ocn_arc_bisect!}}}
- subroutine ocn_poly_fit_2(a_in,b_out,weights_in,m,n,ne)
+ subroutine ocn_poly_fit_2(a_in,b_out,weights_in,m,n,ne)!{{{
implicit none
@@ -612,7 +612,7 @@
!
! write(6,*) ' '
- end subroutine ocn_poly_fit_2
+ end subroutine ocn_poly_fit_2!}}}
! Updated 10/24/2001.
@@ -631,119 +631,119 @@
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
-SUBROUTine ocn_migs (A,N,X,INDX)
+subroutine ocn_migs (a,n,x,indx)!{{{
!
-! Subroutine to invert matrix A(N,N) with the inverse stored
-! in X(N,N) in the output. Copyright (c) Tao Pang 2001.
+! subroutine to invert matrix a(n,n) with the inverse stored
+! in x(n,n) in the output. copyright (c) tao pang 2001.
!
- IMPLICIT NONE
- INTEGER, INTENT (IN) :: N
- INTEGER :: I,J,K
- INTEGER, INTENT (OUT), DIMENSION (N) :: INDX
- REAL (kind=RKIND), INTENT (INOUT), DIMENSION (N,N):: A
- REAL (kind=RKIND), INTENT (OUT), DIMENSION (N,N):: X
- REAL (kind=RKIND), DIMENSION (N,N) :: B
+ implicit none
+ integer, intent (in) :: n
+ integer :: i,j,k
+ integer, intent (out), dimension (n) :: indx
+ real (kind=RKIND), intent (inout), dimension (n,n):: a
+ real (kind=RKIND), intent (out), dimension (n,n):: x
+ real (kind=RKIND), dimension (n,n) :: b
!
- DO I = 1, N
- DO J = 1, N
- B(I,J) = 0.0
- END DO
- END DO
- DO I = 1, N
- B(I,I) = 1.0
- END DO
+ do i = 1, n
+ do j = 1, n
+ b(i,j) = 0.0
+ end do
+ end do
+ do i = 1, n
+ b(i,i) = 1.0
+ end do
!
- call ocn_elgs (A,N,INDX)
+ call ocn_elgs (a,n,indx)
!
- DO I = 1, N-1
- DO J = I+1, N
- DO K = 1, N
- B(INDX(J),K) = B(INDX(J),K)-A(INDX(J),I)*B(INDX(I),K)
- END DO
- END DO
- END DO
+ do i = 1, n-1
+ do j = i+1, n
+ do k = 1, n
+ b(indx(j),k) = b(indx(j),k)-a(indx(j),i)*b(indx(i),k)
+ end do
+ end do
+ end do
!
- DO I = 1, N
- X(N,I) = B(INDX(N),I)/A(INDX(N),N)
- DO J = N-1, 1, -1
- X(J,I) = B(INDX(J),I)
- DO K = J+1, N
- X(J,I) = X(J,I)-A(INDX(J),K)*X(K,I)
- END DO
- X(J,I) = X(J,I)/A(INDX(J),J)
- END DO
- END DO
-END SUBROUTine ocn_migs
+ do i = 1, n
+ x(n,i) = b(indx(n),i)/a(indx(n),n)
+ do j = n-1, 1, -1
+ x(j,i) = b(indx(j),i)
+ do k = j+1, n
+ x(j,i) = x(j,i)-a(indx(j),k)*x(k,i)
+ end do
+ x(j,i) = x(j,i)/a(indx(j),j)
+ end do
+ end do
+end subroutine ocn_migs!}}}
-SUBROUTine ocn_elgs (A,N,INDX)
+subroutine ocn_elgs (a,n,indx)!{{{
!
-! Subroutine to perform the partial-pivoting Gaussian elimination.
-! A(N,N) is the original matrix in the input and transformed matrix
+! subroutine to perform the partial-pivoting gaussian elimination.
+! a(n,n) is the original matrix in the input and transformed matrix
! plus the pivoting element ratios below the diagonal in the output.
-! INDX(N) records the pivoting order. Copyright (c) Tao Pang 2001.
+! indx(n) records the pivoting order. copyright (c) tao pang 2001.
!
- IMPLICIT NONE
- INTEGER, INTENT (IN) :: N
- INTEGER :: I,J,K,ITMP
- INTEGER, INTENT (OUT), DIMENSION (N) :: INDX
- REAL (kind=RKIND) :: C1,PI,PI1,PJ
- REAL (kind=RKIND), INTENT (INOUT), DIMENSION (N,N) :: A
- REAL (kind=RKIND), DIMENSION (N) :: C
+ implicit none
+ integer, intent (in) :: n
+ integer :: i,j,k,itmp
+ integer, intent (out), dimension (n) :: indx
+ real (kind=RKIND) :: c1,pi,pi1,pj
+ real (kind=RKIND), intent (inout), dimension (n,n) :: a
+ real (kind=RKIND), dimension (n) :: c
!
-! Initialize the index
+! initialize the index
!
- DO I = 1, N
- INDX(I) = I
- END DO
+ do i = 1, n
+ indx(i) = i
+ end do
!
-! Find the rescaling factors, one from each row
+! find the rescaling factors, one from each row
!
- DO I = 1, N
- C1= 0.0
- DO J = 1, N
- C1 = MAX(C1,ABS(A(I,J)))
- END DO
- C(I) = C1
- END DO
+ do i = 1, n
+ c1= 0.0
+ do j = 1, n
+ c1 = max(c1,abs(a(i,j)))
+ end do
+ c(i) = c1
+ end do
!
-! Search the pivoting (largest) element from each column
+! search the pivoting (largest) element from each column
!
- DO J = 1, N-1
- PI1 = 0.0
- DO I = J, N
- PI = ABS(A(INDX(I),J))/C(INDX(I))
- IF (PI.GT.PI1) THEN
- PI1 = PI
- K = I
- ENDIF
- END DO
+ do j = 1, n-1
+ pi1 = 0.0
+ do i = j, n
+ pi = abs(a(indx(i),j))/c(indx(i))
+ if (pi.gt.pi1) then
+ pi1 = pi
+ k = i
+ endif
+ end do
!
-! Interchange the rows via INDX(N) to record pivoting order
+! interchange the rows via indx(n) to record pivoting order
!
- ITMP = INDX(J)
- INDX(J) = INDX(K)
- INDX(K) = ITMP
- DO I = J+1, N
- PJ = A(INDX(I),J)/A(INDX(J),J)
+ itmp = indx(j)
+ indx(j) = indx(k)
+ indx(k) = itmp
+ do i = j+1, n
+ pj = a(indx(i),j)/a(indx(j),j)
!
-! Record pivoting ratios below the diagonal
+! record pivoting ratios below the diagonal
!
- A(INDX(I),J) = PJ
+ a(indx(i),j) = pj
!
-! Modify other elements accordingly
+! modify other elements accordingly
!
- DO K = J+1, N
- A(INDX(I),K) = A(INDX(I),K)-PJ*A(INDX(J),K)
- END DO
- END DO
- END DO
+ do k = j+1, n
+ a(indx(i),k) = a(indx(i),k)-pj*a(indx(j),k)
+ end do
+ end do
+ end do
!
-END SUBROUTine ocn_elgs
+end subroutine ocn_elgs!}}}
!-------------------------------------------------------------
- subroutine ocn_initialize_deformation_weights( grid )
+ subroutine ocn_initialize_deformation_weights( grid )!{{{
!
! compute the cell coefficients for the deformation calculations
@@ -930,6 +930,6 @@
if (debug) write(0,*) ' exiting def weight calc '
- end subroutine ocn_initialize_deformation_weights
+ end subroutine ocn_initialize_deformation_weights!}}}
end module ocn_advection
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
use ocn_equation_of_state_linear
use ocn_equation_of_state_jm
@@ -99,8 +98,6 @@
if(.not.eosOn) return
- call mpas_timer_start("ocn_equation_of_state_rho")
-
tracers => s % tracers % array
indexT = s % index_temperature
indexS = s % index_salinity
@@ -122,8 +119,6 @@
endif
- call mpas_timer_stop("ocn_equation_of_state_rho")
-
end subroutine ocn_equation_of_state_rho!}}}
!***********************************************************************
@@ -159,7 +154,7 @@
linearEos = .false.
jmEos = .false.
- if(config_vert_grid_type.eq.'zlevel') then
+ if(config_vert_grid_type.ne.'isopycnal') then
eosON = .true.
if (config_eos_type.eq.'linear') then
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -96,30 +95,30 @@
real (kind=RKIND), dimension(:), pointer :: &
- zMidZLevel, pRefEOS
+ referenceBottomDepth, pRefEOS
real (kind=RKIND), dimension(:,:), intent(inout) :: &
rho
real (kind=RKIND), dimension(:,:,:), intent(in) :: tracers
integer, dimension(:), pointer :: maxLevelCell
- real (kind=RKIND) :: &
- TQ,SQ, &! adjusted T,S
- BULK_MOD, &! Bulk modulus
- RHO_S, &! density at the surface
- DRDT0, &! d(density)/d(temperature), for surface
- DRDS0, &! d(density)/d(salinity ), for surface
- DKDT, &! d(bulk modulus)/d(pot. temp.)
- DKDS, &! d(bulk modulus)/d(salinity )
- SQR,DENOMK, &! work arrays
- WORK1, WORK2, WORK3, WORK4, T2, depth
+ real (kind=RKIND) :: &
+ TQ,SQ, &! adjusted T,S
+ BULK_MOD, &! Bulk modulus
+ RHO_S, &! density at the surface
+ DRDT0, &! d(density)/d(temperature), for surface
+ DRDS0, &! d(density)/d(salinity ), for surface
+ DKDT, &! d(bulk modulus)/d(pot. temp.)
+ DKDS, &! d(bulk modulus)/d(salinity )
+ SQR,DENOMK, &! work arrays
+ WORK1, WORK2, WORK3, WORK4, T2, depth
- real (kind=RKIND) :: &
- tmin, tmax, &! valid temperature range for level k
- smin, smax ! valid salinity range for level k
+ real (kind=RKIND) :: &
+ tmin, tmax, &! valid temperature range for level k
+ smin, smax ! valid salinity range for level k
- real (kind=RKIND), dimension(:), allocatable :: &
- p, p2 ! temporary pressure scalars
+ real (kind=RKIND), dimension(:), allocatable :: &
+ p, p2 ! temporary pressure scalars
!-----------------------------------------------------------------------
!
@@ -127,79 +126,77 @@
!
!-----------------------------------------------------------------------
- !*** for density of fresh water (standard UNESCO)
+ !*** for density of fresh water (standard UNESCO)
- real (kind=RKIND), parameter :: &
- unt0 = 999.842594, &
- unt1 = 6.793952e-2, &
- unt2 = -9.095290e-3, &
- unt3 = 1.001685e-4, &
- unt4 = -1.120083e-6, &
- unt5 = 6.536332e-9
+ real (kind=RKIND), parameter :: &
+ unt0 = 999.842594, &
+ unt1 = 6.793952e-2, &
+ unt2 = -9.095290e-3, &
+ unt3 = 1.001685e-4, &
+ unt4 = -1.120083e-6, &
+ unt5 = 6.536332e-9
+
+ !*** for dependence of surface density on salinity (UNESCO)
- !*** for dependence of surface density on salinity (UNESCO)
-
- real (kind=RKIND), parameter :: &
- uns1t0 = 0.824493 , &
- uns1t1 = -4.0899e-3, &
- uns1t2 = 7.6438e-5, &
- uns1t3 = -8.2467e-7, &
- uns1t4 = 5.3875e-9, &
- unsqt0 = -5.72466e-3, &
- unsqt1 = 1.0227e-4, &
- unsqt2 = -1.6546e-6, &
- uns2t0 = 4.8314e-4
-
- !*** from Table A1 of Jackett and McDougall
-
- real (kind=RKIND), parameter :: &
- bup0s0t0 = 1.965933e+4, &
- bup0s0t1 = 1.444304e+2, &
- bup0s0t2 = -1.706103 , &
- bup0s0t3 = 9.648704e-3, &
- bup0s0t4 = -4.190253e-5
-
- real (kind=RKIND), parameter :: &
- bup0s1t0 = 5.284855e+1, &
- bup0s1t1 = -3.101089e-1, &
- bup0s1t2 = 6.283263e-3, &
- bup0s1t3 = -5.084188e-5
-
- real (kind=RKIND), parameter :: &
- bup0sqt0 = 3.886640e-1, &
- bup0sqt1 = 9.085835e-3, &
- bup0sqt2 = -4.619924e-4
-
- real (kind=RKIND), parameter :: &
- bup1s0t0 = 3.186519 , &
- bup1s0t1 = 2.212276e-2, &
- bup1s0t2 = -2.984642e-4, &
- bup1s0t3 = 1.956415e-6
-
- real (kind=RKIND), parameter :: &
- bup1s1t0 = 6.704388e-3, &
- bup1s1t1 = -1.847318e-4, &
- bup1s1t2 = 2.059331e-7, &
- bup1sqt0 = 1.480266e-4
-
- real (kind=RKIND), parameter :: &
- bup2s0t0 = 2.102898e-4, &
- bup2s0t1 = -1.202016e-5, &
- bup2s0t2 = 1.394680e-7, &
- bup2s1t0 = -2.040237e-6, &
- bup2s1t1 = 6.128773e-8, &
- bup2s1t2 = 6.207323e-10
-
- integer :: k_test, k_ref
-
+ real (kind=RKIND), parameter :: &
+ uns1t0 = 0.824493 , &
+ uns1t1 = -4.0899e-3, &
+ uns1t2 = 7.6438e-5, &
+ uns1t3 = -8.2467e-7, &
+ uns1t4 = 5.3875e-9, &
+ unsqt0 = -5.72466e-3, &
+ unsqt1 = 1.0227e-4, &
+ unsqt2 = -1.6546e-6, &
+ uns2t0 = 4.8314e-4
+
+ !*** from Table A1 of Jackett and McDougall
+
+ real (kind=RKIND), parameter :: &
+ bup0s0t0 = 1.965933e+4, &
+ bup0s0t1 = 1.444304e+2, &
+ bup0s0t2 = -1.706103 , &
+ bup0s0t3 = 9.648704e-3, &
+ bup0s0t4 = -4.190253e-5
+
+ real (kind=RKIND), parameter :: &
+ bup0s1t0 = 5.284855e+1, &
+ bup0s1t1 = -3.101089e-1, &
+ bup0s1t2 = 6.283263e-3, &
+ bup0s1t3 = -5.084188e-5
+
+ real (kind=RKIND), parameter :: &
+ bup0sqt0 = 3.886640e-1, &
+ bup0sqt1 = 9.085835e-3, &
+ bup0sqt2 = -4.619924e-4
+
+ real (kind=RKIND), parameter :: &
+ bup1s0t0 = 3.186519 , &
+ bup1s0t1 = 2.212276e-2, &
+ bup1s0t2 = -2.984642e-4, &
+ bup1s0t3 = 1.956415e-6
+
+ real (kind=RKIND), parameter :: &
+ bup1s1t0 = 6.704388e-3, &
+ bup1s1t1 = -1.847318e-4, &
+ bup1s1t2 = 2.059331e-7, &
+ bup1sqt0 = 1.480266e-4
+
+ real (kind=RKIND), parameter :: &
+ bup2s0t0 = 2.102898e-4, &
+ bup2s0t1 = -1.202016e-5, &
+ bup2s0t2 = 1.394680e-7, &
+ bup2s1t0 = -2.040237e-6, &
+ bup2s1t1 = 6.128773e-8, &
+ bup2s1t2 = 6.207323e-10
+
+ integer :: k_test, k_ref
+
err = 0
-
- call mpas_timer_start("equation_of_state_jm")
-
+
nCells = grid % nCells
maxLevelCell => grid % maxLevelCell % array
nVertLevels = grid % nVertLevels
- zMidZLevel => grid % zMidZLevel % array
+ referenceBottomDepth => grid % referenceBottomDepth % array
! Jackett and McDougall
@@ -208,109 +205,110 @@
smin = 0.0 ! valid salinity, in psu
smax = 42.0
- ! This could be put in a startup routine.
- ! Note I am using zMidZlevel, so pressure on top level does
- ! not include SSH contribution. I am not sure if that matters.
-
! This function computes pressure in bars from depth in meters
! using a mean density derived from depth-dependent global
! average temperatures and salinities from Levitus 1994, and
! integrating using hydrostatic balance.
allocate(pRefEOS(nVertLevels),p(nVertLevels),p2(nVertLevels))
- do k = 1,nVertLevels
- depth = -zMidZLevel(k)
- pRefEOS(k) = 0.059808*(exp(-0.025*depth) - 1.0) &
- + 0.100766*depth + 2.28405e-7*depth**2
- enddo
- ! If k_displaced=0, in-situ density is returned (no displacement)
- ! If k_displaced/=0, potential density is returned
-
- ! if displacement_type = 'relative', potential density is calculated
- ! referenced to level k + k_displaced
- ! if displacement_type = 'absolute', potential density is calculated
- ! referenced to level k_displaced for all k
- ! NOTE: k_displaced = 0 or > nVertLevels is incompatible with 'absolute'
- ! so abort if necessary
-
- if (displacement_type == 'absolute' .and. &
- (k_displaced <= 0 .or. k_displaced > nVertLevels) ) then
- write(0,*) 'Abort: In equation_of_state_jm', &
- ' k_displaced must be between 1 and nVertLevels for ', &
- 'displacement_type = absolute'
- call mpas_dmpar_abort(dminfo)
- endif
-
- if (k_displaced == 0) then
- do k=1,nVertLevels
- p(k) = pRefEOS(k)
- p2(k) = p(k)*p(k)
+ ! This could be put in the init routine.
+ ! Note I am using referenceBottomDepth, so pressure on top level does
+ ! not include SSH contribution. I am not sure if that matters, but
+ ! POP does it the same way.
+ depth = 0.5*referenceBottomDepth(1)
+ pRefEOS(1) = 0.059808*(exp(-0.025*depth) - 1.0) &
+ + 0.100766*depth + 2.28405e-7*depth**2
+ do k = 2,nVertLevels
+ depth = 0.5*(referenceBottomDepth(k)+referenceBottomDepth(k-1))
+ pRefEOS(k) = 0.059808*(exp(-0.025*depth) - 1.0) &
+ + 0.100766*depth + 2.28405e-7*depth**2
enddo
- else ! k_displaced /= 0
- do k=1,nVertLevels
- if (displacement_type == 'relative') then
- k_test = min(k + k_displaced, nVertLevels)
- k_ref = max(k_test, 1)
- else
- k_test = min(k_displaced, nVertLevels)
- k_ref = max(k_test, 1)
- endif
- p(k) = pRefEOS(k_ref)
- p2(k) = p(k)*p(k)
- enddo
- endif
- do iCell=1,nCells
- do k=1,maxLevelCell(iCell)
+ ! If k_displaced=0, in-situ density is returned (no displacement)
+ ! If k_displaced/=0, potential density is returned
- SQ = max(min(tracers(indexS,k,iCell),smax),smin)
- TQ = max(min(tracers(indexT,k,iCell),tmax),tmin)
+ ! if displacement_type = 'relative', potential density is calculated
+ ! referenced to level k + k_displaced
+ ! if displacement_type = 'absolute', potential density is calculated
+ ! referenced to level k_displaced for all k
+ ! NOTE: k_displaced = 0 or > nVertLevels is incompatible with 'absolute'
+ ! so abort if necessary
- SQR = sqrt(SQ)
- T2 = TQ*TQ
+ if (displacement_type == 'absolute' .and. &
+ (k_displaced <= 0 .or. k_displaced > nVertLevels) ) then
- !***
- !*** first calculate surface (p=0) values from UNESCO eqns.
- !***
+ write(0,*) 'Abort: In equation_of_state_jm', &
+ ' k_displaced must be between 1 and nVertLevels for ', &
+ 'displacement_type = absolute'
+ call mpas_dmpar_abort(dminfo)
+ endif
- WORK1 = uns1t0 + uns1t1*TQ + &
- (uns1t2 + uns1t3*TQ + uns1t4*T2)*T2
- WORK2 = SQR*(unsqt0 + unsqt1*TQ + unsqt2*T2)
+ if (k_displaced == 0) then
+ do k=1,nVertLevels
+ p(k) = pRefEOS(k)
+ p2(k) = p(k)*p(k)
+ enddo
+ else ! k_displaced /= 0
+ do k=1,nVertLevels
+ if (displacement_type == 'relative') then
+ k_test = min(k + k_displaced, nVertLevels)
+ k_ref = max(k_test, 1)
+ else
+ k_test = min(k_displaced, nVertLevels)
+ k_ref = max(k_test, 1)
+ endif
+ p(k) = pRefEOS(k_ref)
+ p2(k) = p(k)*p(k)
+ enddo
+ endif
- RHO_S = unt1*TQ + (unt2 + unt3*TQ + (unt4 + unt5*TQ)*T2)*T2 &
- + (uns2t0*SQ + WORK1 + WORK2)*SQ
+ do iCell=1,nCells
+ do k=1,maxLevelCell(iCell)
+ SQ = max(min(tracers(indexS,k,iCell),smax),smin)
+ TQ = max(min(tracers(indexT,k,iCell),tmax),tmin)
+
+ SQR = sqrt(SQ)
+ T2 = TQ*TQ
- !***
- !*** now calculate bulk modulus at pressure p from
- !*** Jackett and McDougall formula
- !***
+ !***
+ !*** first calculate surface (p=0) values from UNESCO eqns.
+ !***
- WORK3 = bup0s1t0 + bup0s1t1*TQ + &
- (bup0s1t2 + bup0s1t3*TQ)*T2 + &
- p(k) *(bup1s1t0 + bup1s1t1*TQ + bup1s1t2*T2) + &
- p2(k)*(bup2s1t0 + bup2s1t1*TQ + bup2s1t2*T2)
- WORK4 = SQR*(bup0sqt0 + bup0sqt1*TQ + bup0sqt2*T2 + &
- bup1sqt0*p(k))
+ WORK1 = uns1t0 + uns1t1*TQ + &
+ (uns1t2 + uns1t3*TQ + uns1t4*T2)*T2
+ WORK2 = SQR*(unsqt0 + unsqt1*TQ + unsqt2*T2)
- BULK_MOD = bup0s0t0 + bup0s0t1*TQ + &
- (bup0s0t2 + bup0s0t3*TQ + bup0s0t4*T2)*T2 + &
- p(k) *(bup1s0t0 + bup1s0t1*TQ + &
- (bup1s0t2 + bup1s0t3*TQ)*T2) + &
- p2(k)*(bup2s0t0 + bup2s0t1*TQ + bup2s0t2*T2) + &
- SQ*(WORK3 + WORK4)
+ RHO_S = unt1*TQ + (unt2 + unt3*TQ + (unt4 + unt5*TQ)*T2)*T2 &
+ + (uns2t0*SQ + WORK1 + WORK2)*SQ
- DENOMK = 1.0/(BULK_MOD - p(k))
+ !***
+ !*** now calculate bulk modulus at pressure p from
+ !*** Jackett and McDougall formula
+ !***
- rho(k,iCell) = (unt0 + RHO_S)*BULK_MOD*DENOMK
+ WORK3 = bup0s1t0 + bup0s1t1*TQ + &
+ (bup0s1t2 + bup0s1t3*TQ)*T2 + &
+ p(k) *(bup1s1t0 + bup1s1t1*TQ + bup1s1t2*T2) + &
+ p2(k)*(bup2s1t0 + bup2s1t1*TQ + bup2s1t2*T2)
+ WORK4 = SQR*(bup0sqt0 + bup0sqt1*TQ + bup0sqt2*T2 + &
+ bup1sqt0*p(k))
+
+ BULK_MOD = bup0s0t0 + bup0s0t1*TQ + &
+ (bup0s0t2 + bup0s0t3*TQ + bup0s0t4*T2)*T2 + &
+ p(k) *(bup1s0t0 + bup1s0t1*TQ + &
+ (bup1s0t2 + bup1s0t3*TQ)*T2) + &
+ p2(k)*(bup2s0t0 + bup2s0t1*TQ + bup2s0t2*T2) + &
+ SQ*(WORK3 + WORK4)
+
+ DENOMK = 1.0/(BULK_MOD - p(k))
+
+ rho(k,iCell) = (unt0 + RHO_S)*BULK_MOD*DENOMK
- end do
- end do
+ end do
+ end do
- deallocate(pRefEOS,p,p2)
-
- call mpas_timer_stop("equation_of_state_jm")
-
+ deallocate(pRefEOS,p,p2)
end subroutine ocn_equation_of_state_jm_rho!}}}
!***********************************************************************
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -87,8 +86,6 @@
integer :: nCells, iCell, k
type (dm_info) :: dminfo
- call mpas_timer_start("ocn_equation_of_state_linear")
-
maxLevelCell => grid % maxLevelCell % array
nCells = grid % nCells
@@ -103,8 +100,6 @@
end do
end do
- call mpas_timer_stop("ocn_equation_of_state_linear")
-
end subroutine ocn_equation_of_state_linear_rho!}}}
!***********************************************************************
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_global_diagnostics.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -4,14 +4,17 @@
use mpas_configure
use mpas_constants
use mpas_dmpar
+ use mpas_timer
implicit none
save
public
+ type (timer_node), pointer :: diagBlockTimer, diagMPITimer
+
contains
- subroutine ocn_compute_global_diagnostics(dminfo, state, grid, timeIndex, dt)
+ subroutine ocn_compute_global_diagnostics(domain, timeLevel, timeIndex, dt)!{{{
! Note: this routine assumes that there is only one block per processor. No looping
! is preformed over blocks.
@@ -26,197 +29,300 @@
implicit none
- type (dm_info), intent(in) :: dminfo
- type (state_type), intent(inout) :: state
- type (mesh_type), intent(in) :: grid
+ type (domain_type), intent(inout) :: domain !< Input/Output: domain information
integer, intent(in) :: timeIndex
real (kind=RKIND), intent(in) :: dt
+ type (block_type), pointer :: block
+ type (dm_info), pointer :: dminfo
+ type (state_type), pointer :: state
+ type (mesh_type), pointer :: grid
+
integer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve, nCellsGlobal, nEdgesGlobal, nVerticesGlobal, iTracer
+ integer :: elementIndex, variableIndex, nVariables, nSums, nMaxes, nMins
+ integer :: timeLevel,k,i, num_tracers
+ integer :: fileID
+ integer, parameter :: kMaxVariables = 1024 ! this must be a little more than double the number of variables to be reduced
- real (kind=RKIND) :: areaCellGlobal, areaEdgeGlobal, areaTriangleGlobal
+ real (kind=RKIND) :: volumeCellGlobal, volumeEdgeGlobal, CFLNumberGlobal, localCFL, localSum, areaCellGlobal, areaEdgeGlobal, areaTriangleGlobal
real (kind=RKIND), dimension(:), pointer :: areaCell, dcEdge, dvEdge, areaTriangle, areaEdge
real (kind=RKIND), dimension(:,:), pointer :: h, u, v, h_edge, circulation, vorticity, ke, pv_edge, pv_vertex, &
pv_cell, gradPVn, gradPVt, pressure, MontPot, wTop, rho, tracerTemp
real (kind=RKIND), dimension(:,:,:), pointer :: tracers
+
+ real (kind=RKIND), dimension(kMaxVariables) :: sums, mins, maxes, averages, verticalSumMins, verticalSumMaxes, reductions
+ real (kind=RKIND), dimension(kMaxVariables) :: sums_tmp, mins_tmp, maxes_tmp, averages_tmp, verticalSumMins_tmp, verticalSumMaxes_tmp
- real (kind=RKIND) :: volumeCellGlobal, volumeEdgeGlobal, CFLNumberGlobal
- real (kind=RKIND) :: localCFL, localSum
- integer :: elementIndex, variableIndex, nVariables, nSums, nMaxes, nMins
- integer :: timeLevel,k,i, num_tracers
+ block => domain % blocklist
+ dminfo => domain % dminfo
- integer, parameter :: kMaxVariables = 1024 ! this must be a little more than double the number of variables to be reduced
+ sums = 0.0
+ mins = 0.0
+ maxes = 0.0
+ averages = 0.0
+ verticalSumMins = 0.0
+ verticalSumMaxes = 0.0
+ reductions = 0.0
- real (kind=RKIND), dimension(kMaxVariables) :: sums, mins, maxes, averages, verticalSumMins, verticalSumMaxes, reductions
+ call mpas_timer_start("diagnostic block loop", .false., diagBlockTimer)
+ do while (associated(block))
+ state => block % state % time_levs(timeLevel) % state
+ grid => block % mesh
+
+ num_tracers = state % num_tracers
- integer :: fileID
+ nVertLevels = grid % nVertLevels
+ nCellsSolve = grid % nCellsSolve
+ nEdgesSolve = grid % nEdgesSolve
+ nVerticesSolve = grid % nVerticesSolve
- num_tracers = state % num_tracers
+ areaCell => grid % areaCell % array
+ dcEdge => grid % dcEdge % array
+ dvEdge => grid % dvEdge % array
+ areaTriangle => grid % areaTriangle % array
+ allocate(areaEdge(1:nEdgesSolve))
+ areaEdge = dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve)
- nVertLevels = grid % nVertLevels
- nCellsSolve = grid % nCellsSolve
- nEdgesSolve = grid % nEdgesSolve
- nVerticesSolve = grid % nVerticesSolve
+ h => state % h % array
+ u => state % u % array
+ rho => state % rho % array
+ tracers => state % tracers % array
+ v => state % v % array
+ wTop => state % wTop % array
+ h_edge => state % h_edge % array
+ circulation => state % circulation % array
+ vorticity => state % vorticity % array
+ ke => state % ke % array
+ pv_edge => state % pv_edge % array
+ pv_vertex => state % pv_vertex % array
+ pv_cell => state % pv_cell % array
+ gradPVn => state % gradPVn % array
+ gradPVt => state % gradPVt % array
+ MontPot => state % MontPot % array
+ pressure => state % pressure % array
- areaCell => grid % areaCell % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaTriangle => grid % areaTriangle % array
- allocate(areaEdge(1:nEdgesSolve))
- areaEdge = dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve)
+ variableIndex = 0
+ ! h
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- h => state % h % array
- u => state % u % array
- rho => state % rho % array
- tracers => state % tracers % array
- v => state % v % array
- wTop => state % wTop % array
- h_edge => state % h_edge % array
- circulation => state % circulation % array
- vorticity => state % vorticity % array
- ke => state % ke % array
- pv_edge => state % pv_edge % array
- pv_vertex => state % pv_vertex % array
- pv_cell => state % pv_cell % array
- gradPVn => state % gradPVn % array
- gradPVt => state % gradPVt % array
- MontPot => state % MontPot % array
- pressure => state % pressure % array
+ ! u
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ u(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- variableIndex = 0
- ! h
- variableIndex = variableIndex + 1
- call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ ! v
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ v(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! u
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- u(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! h_edge
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! v
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- v(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! circulation
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_local_stats(dminfo, nVertLevels, nVerticesSolve, circulation(:,1:nVerticesSolve), &
+ sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! h_edge
- variableIndex = variableIndex + 1
- call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ ! vorticity
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
+ vorticity(:,1:nVerticesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
+ verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! circulation
- variableIndex = variableIndex + 1
- call ocn_compute_field_local_stats(dminfo, nVertLevels, nVerticesSolve, circulation(:,1:nVerticesSolve), &
- sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ ! ke
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ ke(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! vorticity
- variableIndex = variableIndex + 1
- call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
- vorticity(:,1:nVerticesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), &
- verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ ! pv_edge
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ pv_edge(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! ke
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- ke(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! pv_vertex
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
+ pv_vertex(:,1:nVerticesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), &
+ verticalSumMins_tmp(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_edge
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- pv_edge(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! pv_cell
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ pv_cell(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_vertex
- variableIndex = variableIndex + 1
- call ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
- pv_vertex(:,1:nVerticesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), &
- verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ ! gradPVn
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ gradPVn(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pv_cell
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- pv_cell(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! gradPVt
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ gradPVt(:,1:nEdgesSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! gradPVn
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- gradPVn(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! pressure
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ pressure(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! gradPVt
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
- gradPVt(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! MontPot
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ MontPot(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! pressure
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- pressure(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! wTop vertical velocity
+ variableIndex = variableIndex + 1
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels+1, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ wTop(:,1:nCellsSolve), sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
- ! MontPot
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- MontPot(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ ! Tracers
+ allocate(tracerTemp(nVertLevels,nCellsSolve))
+ do iTracer=1,num_tracers
+ variableIndex = variableIndex + 1
+ tracerTemp = Tracers(iTracer,:,1:nCellsSolve)
+ call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ tracerTemp, sums_tmp(variableIndex), mins_tmp(variableIndex), maxes_tmp(variableIndex), verticalSumMins_tmp(variableIndex), &
+ verticalSumMaxes_tmp(variableIndex))
+ sums(variableIndex) = sums(variableIndex) + sums_tmp(variableIndex)
+ mins(variableIndex) = min(mins(variableIndex), mins_tmp(variableIndex))
+ maxes(variableIndex) = max(maxes(variableIndex), maxes_tmp(variableIndex))
+ verticalSumMins(variableIndex) = min(verticalSumMins(variableIndex), verticalSumMins_tmp(variableIndex))
+ verticalSumMaxes(variableIndex) = max(verticalSumMaxes(variableIndex), verticalSumMaxes_tmp(variableIndex))
+ enddo
+ deallocate(tracerTemp)
- ! wTop vertical velocity
- variableIndex = variableIndex + 1
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels+1, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- wTop(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
+ nVariables = variableIndex
+ nSums = nVariables
+ nMins = nVariables
+ nMaxes = nVariables
- ! Tracers
- allocate(tracerTemp(nVertLevels,nCellsSolve))
- do iTracer=1,num_tracers
- variableIndex = variableIndex + 1
- tracerTemp = Tracers(iTracer,:,1:nCellsSolve)
- call ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
- tracerTemp, sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
- verticalSumMaxes(variableIndex))
- enddo
- deallocate(tracerTemp)
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + sum(areaCell(1:nCellsSolve))
- nVariables = variableIndex
- nSums = nVariables
- nMins = nVariables
- nMaxes = nVariables
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + sum(dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve))
- nSums = nSums + 1
- sums(nSums) = sum(areaCell(1:nCellsSolve))
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + sum(areaTriangle(1:nVerticesSolve))
- nSums = nSums + 1
- sums(nSums) = sum(dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve))
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + nCellsSolve
- nSums = nSums + 1
- sums(nSums) = sum(areaTriangle(1:nVerticesSolve))
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + nEdgesSolve
- nSums = nSums + 1
- sums(nSums) = nCellsSolve
+ nSums = nSums + 1
+ sums(nSums) = sums(nSums) + nVerticesSolve
- nSums = nSums + 1
- sums(nSums) = nEdgesSolve
+ localCFL = 0.0
+ do elementIndex = 1,nEdgesSolve
+ localCFL = max(localCFL, maxval(dt*u(:,elementIndex)/dcEdge(elementIndex)))
+ end do
+ nMaxes = nMaxes + 1
+ maxes(nMaxes) = localCFL
- nSums = nSums + 1
- sums(nSums) = nVerticesSolve
+ do i = 1, nVariables
+ mins(nMins+i) = min(mins(nMins+i),verticalSumMins_tmp(i))
+ maxes(nMaxes+i) = max(maxes(nMaxes+i),verticalSumMaxes_tmp(i))
+ end do
- localCFL = 0.0
- do elementIndex = 1,nEdgesSolve
- localCFL = max(localCFL, maxval(dt*u(:,elementIndex)/dcEdge(elementIndex)))
+ nMins = nMins + nVariables
+ nMaxes = nMaxes + nVariables
+
+ block => block % next
end do
- nMaxes = nMaxes + 1
- maxes(nMaxes) = localCFL
+ call mpas_timer_stop("diagnostic block loop", diagBlockTimer)
+ call mpas_timer_start("diagnostics mpi", .false., diagMPITimer)
- mins(nMins+1:nMins+nVariables) = verticalSumMins(1:nVariables)
- nMins = nMins + nVariables
- maxes(nMaxes+1:nMaxes+nVariables) = verticalSumMaxes(1:nVariables)
- nMaxes = nMaxes + nVariables
-
! global reduction of the 5 arrays (packed into 3 to minimize global communication)
call mpas_dmpar_sum_real_array(dminfo, nSums, sums(1:nSums), reductions(1:nSums))
sums(1:nVariables) = reductions(1:nVariables)
@@ -306,6 +412,8 @@
averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
enddo
+ call mpas_timer_stop("diagnostics mpi", diagMPITimer)
+
! write out the data to files
if (dminfo % my_proc_id == IO_NODE) then
fileID = getFreeUnit()
@@ -322,7 +430,7 @@
write (fileID,'(100es24.14)') averages(1:nVariables)
close (fileID)
open(fileID,file='stats_time.txt',ACCESS='append')
- write (fileID,'(i5,10x,a,100es24.14)') timeIndex, &
+ write (fileID,'(i10,10x,a,100es24.14)') timeIndex, &
state % xtime % scalar, dt, &
CFLNumberGlobal
close (fileID)
@@ -343,9 +451,9 @@
state % CFLNumberGlobal % scalar = CFLNumberGlobal
deallocate(areaEdge)
- end subroutine ocn_compute_global_diagnostics
+ end subroutine ocn_compute_global_diagnostics!}}}
- integer function getFreeUnit()
+ integer function getFreeUnit()!{{{
implicit none
integer :: index
@@ -361,9 +469,9 @@
end if
end if
end do
- end function getFreeUnit
+ end function getFreeUnit!}}}
- subroutine ocn_compute_field_local_stats(dminfo, nVertLevels, nElements, field, localSum, localMin, localMax, localVertSumMin, &
+ subroutine ocn_compute_field_local_stats(dminfo, nVertLevels, nElements, field, localSum, localMin, localMax, localVertSumMin, &!{{{
localVertSumMax)
implicit none
@@ -380,9 +488,9 @@
localVertSumMin = minval(sum(field,1))
localVertSumMax = maxval(sum(field,1))
- end subroutine ocn_compute_field_local_stats
+ end subroutine ocn_compute_field_local_stats!}}}
- subroutine ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nElements, areas, field, localSum, localMin, &
+ subroutine ocn_compute_field_area_weighted_local_stats(dminfo, nVertLevels, nElements, areas, field, localSum, localMin, &!{{{
localMax, localVertSumMin, localVertSumMax)
implicit none
@@ -406,9 +514,9 @@
localVertSumMin = minval(sum(field,1))
localVertSumMax = maxval(sum(field,1))
- end subroutine ocn_compute_field_area_weighted_local_stats
+ end subroutine ocn_compute_field_area_weighted_local_stats!}}}
- subroutine ocn_compute_field_thickness_weighted_local_stats(dminfo, nVertLevels, nElements, h, field, &
+ subroutine ocn_compute_field_thickness_weighted_local_stats(dminfo, nVertLevels, nElements, h, field, &!{{{
localSum, localMin, localMax, localVertSumMin, localVertSumMax)
implicit none
@@ -430,9 +538,9 @@
localVertSumMin = minval(sum(h*field,1))
localVertSumMax = maxval(sum(h*field,1))
- end subroutine ocn_compute_field_thickness_weighted_local_stats
+ end subroutine ocn_compute_field_thickness_weighted_local_stats!}}}
- subroutine ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nElements, areas, h, field, &
+ subroutine ocn_compute_field_volume_weighted_local_stats(dminfo, nVertLevels, nElements, areas, h, field, &!{{{
localSum, localMin, localMax, localVertSumMin, localVertSumMax)
implicit none
@@ -459,11 +567,10 @@
localVertSumMin = minval(sum(h*field,1))
localVertSumMax = maxval(sum(h*field,1))
- end subroutine ocn_compute_field_volume_weighted_local_stats
+ end subroutine ocn_compute_field_volume_weighted_local_stats!}}}
+ subroutine ocn_compute_global_sum(dminfo, nVertLevels, nElements, field, globalSum)!{{{
- subroutine ocn_compute_global_sum(dminfo, nVertLevels, nElements, field, globalSum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -476,9 +583,9 @@
localSum = sum(field)
call mpas_dmpar_sum_real(dminfo, localSum, globalSum)
- end subroutine ocn_compute_global_sum
+ end subroutine ocn_compute_global_sum!}}}
- subroutine ocn_compute_area_weighted_global_sum(dminfo, nVertLevels, nElements, areas, field, globalSum)
+ subroutine ocn_compute_area_weighted_global_sum(dminfo, nVertLevels, nElements, areas, field, globalSum)!{{{
implicit none
@@ -498,9 +605,9 @@
call mpas_dmpar_sum_real(dminfo, localSum, globalSum)
- end subroutine ocn_compute_area_weighted_global_sum
+ end subroutine ocn_compute_area_weighted_global_sum!}}}
- subroutine ocn_compute_volume_weighted_global_sum(dminfo, nVertLevels, nElements, areas, h, field, globalSum)
+ subroutine ocn_compute_volume_weighted_global_sum(dminfo, nVertLevels, nElements, areas, h, field, globalSum)!{{{
implicit none
@@ -517,9 +624,9 @@
call ocn_compute_area_weighted_global_sum(dminfo, nVertLevels, nElements, areas, hTimesField, globalSum)
- end subroutine ocn_compute_volume_weighted_global_sum
+ end subroutine ocn_compute_volume_weighted_global_sum!}}}
- subroutine ocn_compute_global_min(dminfo, nVertLevels, nElements, field, globalMin)
+ subroutine ocn_compute_global_min(dminfo, nVertLevels, nElements, field, globalMin)!{{{
implicit none
@@ -533,9 +640,9 @@
localMin = minval(field)
call mpas_dmpar_min_real(dminfo, localMin, globalMin)
- end subroutine ocn_compute_global_min
+ end subroutine ocn_compute_global_min!}}}
- subroutine ocn_compute_global_max(dminfo, nVertLevels, nElements, field, globalMax)
+ subroutine ocn_compute_global_max(dminfo, nVertLevels, nElements, field, globalMax)!{{{
implicit none
@@ -549,9 +656,9 @@
localMax = maxval(field)
call mpas_dmpar_max_real(dminfo, localMax, globalMax)
- end subroutine ocn_compute_global_max
+ end subroutine ocn_compute_global_max!}}}
- subroutine ocn_compute_global_vert_sum_horiz_min(dminfo, nVertLevels, nElements, field, globalMin)
+ subroutine ocn_compute_global_vert_sum_horiz_min(dminfo, nVertLevels, nElements, field, globalMin)!{{{
implicit none
@@ -565,9 +672,9 @@
localMin = minval(sum(field,1))
call mpas_dmpar_min_real(dminfo, localMin, globalMin)
- end subroutine ocn_compute_global_vert_sum_horiz_min
+ end subroutine ocn_compute_global_vert_sum_horiz_min!}}}
- subroutine ocn_compute_global_vert_sum_horiz_max(dminfo, nVertLevels, nElements, field, globalMax)
+ subroutine ocn_compute_global_vert_sum_horiz_max(dminfo, nVertLevels, nElements, field, globalMax)!{{{
implicit none
@@ -581,9 +688,9 @@
localMax = maxval(sum(field,1))
call mpas_dmpar_max_real(dminfo, localMax, globalMax)
- end subroutine ocn_compute_global_vert_sum_horiz_max
+ end subroutine ocn_compute_global_vert_sum_horiz_max!}}}
- subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min(dminfo, nVertLevels, nElements, h, field, globalMin)
+ subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min(dminfo, nVertLevels, nElements, h, field, globalMin)!{{{
implicit none
@@ -597,9 +704,9 @@
localMin = minval(sum(h*field,1))
call mpas_dmpar_min_real(dminfo, localMin, globalMin)
- end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min
+ end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_min!}}}
- subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max(dminfo, nVertLevels, nElements, h, field, globalMax)
+ subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max(dminfo, nVertLevels, nElements, h, field, globalMax)!{{{
implicit none
@@ -613,6 +720,6 @@
localMax = maxval(sum(h*field,1))
call mpas_dmpar_max_real(dminfo, localMax, globalMax)
- end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max
+ end subroutine ocn_compute_global_vert_thickness_weighted_sum_horiz_max!}}}
end module ocn_global_diagnostics
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_mpas_core.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -1,10 +1,12 @@
module mpas_core
+ use mpas_configure
use mpas_framework
use mpas_timekeeping
use mpas_dmpar
use mpas_timer
+ use ocn_global_diagnostics
use ocn_test_cases
use ocn_time_integration
use ocn_tendency
@@ -35,11 +37,13 @@
integer, parameter :: restartAlarmID = 2
integer, parameter :: statsAlarmID = 3
+ type (timer_node), pointer :: globalDiagTimer, timeIntTimer
+ type (timer_node), pointer :: initDiagSolveTimer
+
contains
subroutine mpas_core_init(domain, startTimeStamp)!{{{
- use mpas_configure
use mpas_grid_types
implicit none
@@ -81,6 +85,9 @@
call ocn_equation_of_state_init(err_tmp)
err = ior(err, err_tmp)
+ call ocn_tendency_init(err_tmp)
+ err = ior(err,err_tmp)
+
call mpas_timer_init(domain)
if(err.eq.1) then
@@ -89,58 +96,66 @@
if (.not. config_do_restart) call setup_sw_test_case(domain)
- call compute_maxLevel(domain)
+ call ocn_compute_max_level(domain)
- if (config_vert_grid_type.eq.'isopycnal') then
- print *, ' Using isopycnal coordinates'
- elseif (config_vert_grid_type.eq.'zlevel') then
- print *, ' Using z-level coordinates'
- call init_ZLevel(domain)
- else
- print *, ' Incorrect choice of config_vert_grid_type:',&
- config_vert_grid_type
+ call ocn_init_z_level(domain)
+
+ print *, ' Vertical grid type is: ',config_vert_grid_type
+
+ if (config_vert_grid_type.ne.'isopycnal'.and. &
+ config_vert_grid_type.ne.'zlevel'.and. &
+ config_vert_grid_type.ne.'zstar1'.and. &
+ config_vert_grid_type.ne.'zstar'.and. &
+ config_vert_grid_type.ne.'zstarWeights') then
+ print *, ' Incorrect choice of config_vert_grid_type.'
call mpas_dmpar_abort(dminfo)
endif
- if (trim(config_new_btr_variables_from) == 'btr_avg' &
- .and.trim(config_time_integration) == 'unsplit_explicit') then
- print *, ' unsplit_explicit option must use',&
- ' config_new_btr_variables_from==last_subcycle'
+ print *, ' Pressure type is: ',config_pressure_type
+ if (config_pressure_type.ne.'pressure'.and. &
+ config_pressure_type.ne.'MontgomeryPotential') then
+ print *, ' Incorrect choice of config_pressure_type.'
call mpas_dmpar_abort(dminfo)
endif
+ if (config_filter_btr_mode.and. &
+ config_vert_grid_type.ne.'zlevel')then
+ print *, 'filter_btr_mode has only been tested with'// &
+ ' config_vert_grid_type=zlevel.'
+ call mpas_dmpar_abort(dminfo)
+ endif
+
!
! Initialize core
!
dt = config_dt
- call simulation_clock_init(domain, dt, startTimeStamp)
+ call ocn_simulation_clock_init(domain, dt, startTimeStamp)
block => domain % blocklist
do while (associated(block))
call mpas_init_block(block, block % mesh, dt)
block % state % time_levs(1) % state % xtime % scalar = startTimeStamp
block => block % next
-
- !dwj 110919 This allows the restorings to grab the indices for
- ! temperature and salinity tracers from state.
end do
! mrp 100316 In order for this to work, we need to pass domain % dminfo as an
! input arguement into mpas_init. Ask about that later. For now, there will be
! no initial statistics write.
- ! call mpas_timer_start("global diagnostics")
- ! call ocn_compute_global_diagnostics(domain % dminfo, block % state % time_levs(1) % state, mesh, 0, dt)
- ! call mpas_timer_stop("global diagnostics")
- ! call mpas_output_state_init(output_obj, domain, "OUTPUT")
- ! call write_output_frame(output_obj, domain)
+ if (config_initial_stats) then
+ call mpas_timer_start("global diagnostics", .false., globalDiagTimer)
+ call ocn_compute_global_diagnostics(domain, 1 , 0, dt)
+ call mpas_timer_stop("global diagnostics", globalDiagTimer)
+! call mpas_output_state_init(output_obj, domain, "OUTPUT")
+! call ocn_write_output_frame(output_obj, output_frame, domain)
+ endif
current_outfile_frames = 0
end subroutine mpas_core_init!}}}
- subroutine simulation_clock_init(domain, dt, startTimeStamp)!{{{
+ subroutine ocn_simulation_clock_init(domain, dt, startTimeStamp)!{{{
implicit none
@@ -196,7 +211,7 @@
call mpas_get_time(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
- end subroutine simulation_clock_init!}}}
+ end subroutine ocn_simulation_clock_init!}}}
subroutine mpas_init_block(block, mesh, dt)!{{{
@@ -213,9 +228,13 @@
call ocn_time_average_init(block % state % time_levs(1) % state)
+ call mpas_timer_start("diagnostic solve", .false., initDiagSolveTimer)
call ocn_diagnostic_solve(dt, block % state % time_levs(1) % state, mesh)
+ call mpas_timer_stop("diagnostic solve", initDiagSolveTimer)
- call compute_mesh_scaling(mesh)
+ call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(1) % state, mesh)
+
+ call ocn_compute_mesh_scaling(mesh)
call mpas_rbf_interp_initialize(mesh)
call mpas_init_reconstruct(mesh)
@@ -231,6 +250,8 @@
! The reconstructed velocity on land will have values not exactly
! -1e34 due to the interpolation of reconstruction.
+ block % mesh % areaCell % array(block % mesh % nCells+1) = -1.0e34
+
do iEdge=1,block % mesh % nEdges
! mrp 101115 note: in order to include flux boundary conditions, the following
! line will need to change. Right now, set boundary edges between land and
@@ -253,7 +274,7 @@
! :block % mesh % nVertLevels,iCell) = -1e34
! mrp 110516, added just to test for conservation of tracers
-! block % state % time_levs(1) % state % tracers % array(block % state % time_levs(1) % state % index_tracer1,:,iCell) = 1.0
+ block % state % time_levs(1) % state % tracers % array(block % state % time_levs(1) % state % index_tracer1,:,iCell) = 1.0
end do
@@ -303,7 +324,7 @@
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Initial time ', timeStamp
- call write_output_frame(output_obj, output_frame, domain)
+ call ocn_write_output_frame(output_obj, output_frame, domain)
block_ptr => domain % blocklist
do while(associated(block_ptr))
@@ -323,9 +344,9 @@
call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Doing timestep ', timeStamp
- call mpas_timer_start("time integration")
+ call mpas_timer_start("time integration", .false., timeIntTimer)
call mpas_timestep(domain, itimestep, dt, timeStamp)
- call mpas_timer_stop("time integration")
+ call mpas_timer_stop("time integration", timeIntTimer)
! Move time level 2 fields back into time level 1 for next time step
call mpas_shift_time_levels_state(domain % blocklist % state)
@@ -343,7 +364,9 @@
call ocn_time_average_normalize(block_ptr % state % time_levs(1) % state)
block_ptr => block_ptr % next
end do
- call write_output_frame(output_obj, output_frame, domain)
+
+ call ocn_write_output_frame(output_obj, output_frame, domain)
+
block_ptr => domain % blocklist
do while (associated(block_ptr))
call ocn_time_average_init(block_ptr % state % time_levs(1) % state)
@@ -364,7 +387,7 @@
end subroutine mpas_core_run!}}}
- subroutine write_output_frame(output_obj, output_frame, domain)!{{{
+ subroutine ocn_write_output_frame(output_obj, output_frame, domain)!{{{
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Compute diagnostic fields for a domain and write model state to output file
!
@@ -387,7 +410,7 @@
block_ptr => domain % blocklist
do while (associated(block_ptr))
- call compute_output_diagnostics(block_ptr % state % time_levs(1) % state, block_ptr % mesh)
+ call ocn_compute_output_diagnostics(block_ptr % state % time_levs(1) % state, block_ptr % mesh)
block_ptr => block_ptr % next
end do
@@ -403,9 +426,9 @@
end if
end if
- end subroutine write_output_frame!}}}
+ end subroutine ocn_write_output_frame!}}}
- subroutine compute_output_diagnostics(state, grid)!{{{
+ subroutine ocn_compute_output_diagnostics(state, grid)!{{{
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Compute diagnostic fields for a domain
!
@@ -425,13 +448,11 @@
integer :: i, eoe
integer :: iEdge, k
- end subroutine compute_output_diagnostics!}}}
+ end subroutine ocn_compute_output_diagnostics!}}}
subroutine mpas_timestep(domain, itimestep, dt, timeStamp)!{{{
use mpas_grid_types
- use mpas_timer
- use ocn_global_diagnostics
implicit none
@@ -447,17 +468,9 @@
if (config_stats_interval > 0) then
if (mod(itimestep, config_stats_interval) == 0) then
- block_ptr => domain % blocklist
- if (associated(block_ptr % next)) then
- write(0,*) 'Error: computeGlobalDiagnostics assumes ',&
- 'that there is only one block per processor.'
- end if
-
- call mpas_timer_start("global diagnostics")
- call ocn_compute_global_diagnostics(domain % dminfo, &
- block_ptr % state % time_levs(2) % state, block_ptr % mesh, &
- itimestep, dt)
- call mpas_timer_stop("global diagnostics")
+ call mpas_timer_start("global diagnostics", .false., globalDiagTimer)
+ call ocn_compute_global_diagnostics(domain, 2, itimestep, dt);
+ call mpas_timer_stop("global diagnostics", globalDiagTimer)
end if
end if
@@ -480,76 +493,54 @@
end subroutine mpas_timestep!}}}
-subroutine init_ZLevel(domain)!{{{
-! Initialize maxLevel and bouncary grid variables.
+ subroutine ocn_init_z_level(domain)!{{{
+ ! Initialize maxLevel and bouncary grid variables.
- use mpas_grid_types
- use mpas_configure
+ use mpas_grid_types
+ use mpas_configure
- implicit none
+ implicit none
- type (domain_type), intent(inout) :: domain
+ type (domain_type), intent(inout) :: domain
- integer :: i, iCell, iEdge, iVertex, k
- type (block_type), pointer :: block
+ integer :: i, iCell, iEdge, iVertex, k
+ type (block_type), pointer :: block
- integer :: iTracer, cell, cell1, cell2
- real (kind=RKIND) :: uhSum, hSum, sshEdge
- real (kind=RKIND), dimension(:), pointer :: &
- hZLevel, zMidZLevel, zTopZLevel, &
- hMeanTopZLevel, hRatioZLevelK, hRatioZLevelKm1
- real (kind=RKIND), dimension(:,:), pointer :: h
- integer :: nVertLevels
+ integer :: iTracer, cell, cell1, cell2
+ real (kind=RKIND) :: uhSum, hSum, hEdge1
+ real (kind=RKIND), dimension(:), pointer :: &
+ referenceBottomDepth, referenceBottomDepthTopOfCell
+ real (kind=RKIND), dimension(:,:), pointer :: h
+ integer :: nVertLevels
- ! Initialize z-level grid variables from h, read in from input file.
- block => domain % blocklist
- do while (associated(block))
+ ! Initialize z-level grid variables from h, read in from input file.
+ block => domain % blocklist
+ do while (associated(block))
- h => block % state % time_levs(1) % state % h % array
- hZLevel => block % mesh % hZLevel % array
- zMidZLevel => block % mesh % zMidZLevel % array
- zTopZLevel => block % mesh % zTopZLevel % array
- nVertLevels = block % mesh % nVertLevels
- hMeanTopZLevel => block % mesh % hMeanTopZLevel % array
- hRatioZLevelK => block % mesh % hRatioZLevelK % array
- hRatioZLevelKm1 => block % mesh % hRatioZLevelKm1 % array
+ h => block % state % time_levs(1) % state % h % array
+ referenceBottomDepth => block % mesh % referenceBottomDepth % array
+ referenceBottomDepthTopOfCell => block % mesh % referenceBottomDepthTopOfCell % array
+ nVertLevels = block % mesh % nVertLevels
- ! These should eventually be in an input file. For now
- ! I just read them in from h(:,1).
- ! Upon restart, the correct hZLevel should be in restart.nc
- if (.not. config_do_restart) hZLevel = h(:,1)
+ ! mrp 120208 right now hZLevel is in the grid.nc file.
+ ! We would like to transition to using referenceBottomDepth
+ ! as the defining variable instead, and will transition soon.
+ ! When the transition is done, hZLevel can be removed from
+ ! registry and the following four lines deleted.
+ referenceBottomDepth(1) = block % mesh % hZLevel % array(1)
+ do k = 2,nVertLevels
+ referenceBottomDepth(k) = referenceBottomDepth(k-1) + block % mesh % hZLevel % array(k)
+ end do
- ! hZLevel should be in the grid.nc and restart.nc file,
- ! and h for k=1 must be specified there as well.
-
- zTopZLevel(1) = 0.0
- do k = 1,nVertLevels
- zMidZLevel(k) = zTopZLevel(k)-0.5*hZLevel(k)
- zTopZLevel(k+1) = zTopZLevel(k)- hZLevel(k)
- end do
+ ! TopOfCell needed where zero depth for the very top may be referenced.
+ referenceBottomDepthTopOfCell(1) = 0.0
+ do k = 1,nVertLevels
+ referenceBottomDepthTopOfCell(k+1) = referenceBottomDepth(k)
+ end do
- hMeanTopZLevel(1) = 0.0
- hRatioZLevelK(1) = 0.0
- hRatioZLevelKm1(1) = 0.0
- do k = 2,nVertLevels
- hMeanTopZLevel(k) = 0.5*(hZLevel(k-1) + hZLevel(k))
- hRatioZLevelK(k) = 0.5*hZLevel(k)/hMeanTopZLevel(k)
- hRatioZLevelKm1(k) = 0.5*hZLevel(k-1)/hMeanTopZLevel(k)
- end do
-
- ! mrp 110601 For now, h is the variable saved in the restart file
- ! I am computing SSH here. In the future, could make smaller
- ! restart files for z-Level runs by saving SSH only.
- do iCell=1,block % mesh % nCells
-
- block % state % time_levs(1) % state % ssh % array(iCell) &
- = block % state % time_levs(1) % state % h % array(1,iCell) &
- - block % mesh % hZLevel % array(1)
- enddo
-
! Compute barotropic velocity at first timestep
! This is only done upon start-up.
- if (trim(config_time_integration) == 'unsplit_explicit') then
+ if (trim(config_time_integration) == 'unsplit_explicit') then
block % state % time_levs(1) % state % uBtr % array(:) = 0.0
block % state % time_levs(1) % state % uBcl % array(:,:) &
@@ -560,9 +551,7 @@
if (config_filter_btr_mode) then
do iCell=1,block % mesh % nCells
block % state % time_levs(1) % state % h % array(1,iCell) &
- = block % mesh % hZLevel % array(1)
-
- block % state % time_levs(1) % state % ssh % array(iCell) = 0.0
+ = block % mesh % referenceBottomDepth % array(1)
enddo
endif
@@ -570,21 +559,30 @@
cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
- sshEdge = 0.5*( &
- block % state % time_levs(1) % state % ssh % array(cell1) &
- + block % state % time_levs(1) % state % ssh % array(cell2) )
-
! uBtr = sum(u)/sum(h) on each column
- uhSum = (sshEdge + block % mesh % hZLevel % array(1)) &
- * block % state % time_levs(1) % state % u % array(1,iEdge)
- hSum = sshEdge + block % mesh % hZLevel % array(1)
+ ! ocn_diagnostic_solve has not yet been called, so compute hEdge
+ ! just for this edge.
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ hEdge1 = 0.5*( &
+ block % state % time_levs(1) % state % h % array(1,cell1) &
+ + block % state % time_levs(1) % state % h % array(1,cell2) )
+ uhSum = hEdge1*block % state % time_levs(1) % state % u % array(1,iEdge)
+ hSum = hEdge1
+
do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
+ ! ocn_diagnostic_solve has not yet been called, so compute hEdge
+ ! just for this edge.
+ hEdge1 = 0.5*( &
+ block % state % time_levs(1) % state % h % array(k,cell1) &
+ + block % state % time_levs(1) % state % h % array(k,cell2) )
+
uhSum = uhSum &
- + block % mesh % hZLevel % array(k) &
- *block % state % time_levs(1) % state % u % array(k,iEdge)
- hSum = hSum &
- + block % mesh % hZLevel % array(k)
+ + hEdge1*block % state % time_levs(1) % state % u % array(k,iEdge)
+ hSum = hSum + hEdge1
+
enddo
block % state % time_levs(1) % state % uBtr % array(iEdge) = uhSum/hsum
@@ -612,39 +610,12 @@
endif
-!print *, '11 u ',minval(domain % blocklist % state % time_levs(1) % state % u % array(:,1:domain % blocklist % mesh % nEdgesSolve)), &
-! maxval(domain % blocklist % state % time_levs(1) % state % u % array(:,1:domain % blocklist % mesh % nEdgesSolve))
-!print *, '11 uBtr ',minval(domain % blocklist % state % time_levs(1) % state % uBtr % array(1:domain % blocklist % mesh % nEdgesSolve)), &
-! maxval(domain % blocklist % state % time_levs(1) % state % uBtr % array(1:domain % blocklist % mesh % nEdgesSolve))
-!print *, '11 uBcl ',minval(domain % blocklist % state % time_levs(1) % state % uBcl % array(:,1:domain % blocklist % mesh % nEdgesSolve)), &
-! maxval(domain % blocklist % state % time_levs(1) % state % uBcl % array(:,1:domain % blocklist % mesh % nEdgesSolve))
-
-
-! mrp temp testing - is uBcl vert sum zero?
-! do iEdge=1,block % mesh % nEdges
-! uhSum = (sshEdge + block % mesh % hZLevel % array(1)) * block % state % time_levs(1) % state % uBcl % array(1,iEdge)
-! hSum = sshEdge + block % mesh % hZLevel % array(1)
-
-! do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
-! uhSum = uhSum + block % mesh % hZLevel % array(k) * block % state % time_levs(1) % state % uBcl % array(k,iEdge)
-! hSum = hSum + block % mesh % hZLevel % array(k)
-! enddo
-! block % state % time_levs(1) % state % FBtr % array(iEdge) = uhSum/hSum
-
-! enddo ! iEdge
-
-!print *, 'uBcl vert sum IC',minval(block % state % time_levs(1) % state % FBtr % array(1:block % mesh % nEdgesSolve)), &
-! maxval(block % state % time_levs(1) % state % FBtr % array(1:block % mesh % nEdgesSolve))
-
-! mrp temp testing - is uBcl vert sum zero? end
-
block => block % next
+ end do
- end do
+ end subroutine ocn_init_z_level!}}}
-end subroutine init_ZLevel!}}}
-
-subroutine compute_maxLevel(domain)!{{{
+subroutine ocn_compute_max_level(domain)!{{{
! Initialize maxLevel and bouncary grid variables.
use mpas_grid_types
@@ -669,7 +640,7 @@
maxLevelVertexTop, maxLevelVertexBot
integer, dimension(:,:), pointer :: &
cellsOnEdge, cellsOnVertex, boundaryEdge, boundaryCell, &
- boundaryVertex, verticesOnEdge
+ boundaryVertex, verticesOnEdge, edgeMask, cellMask, vertexMask
! Initialize z-level grid variables from h, read in from input file.
block => domain % blocklist
@@ -686,6 +657,9 @@
boundaryEdge => block % mesh % boundaryEdge % array
boundaryCell => block % mesh % boundaryCell % array
boundaryVertex => block % mesh % boundaryVertex % array
+ edgeMask => block % mesh % edgeMask % array
+ cellMask => block % mesh % cellMask % array
+ vertexMask => block % mesh % vertexMask % array
nCells = block % mesh % nCells
nEdges = block % mesh % nEdges
@@ -739,22 +713,31 @@
maxLevelVertexTop(nVertices+1) = 0
! set boundary edge
- boundaryEdge=1
+ boundaryEdge(:,1:nEdges+1)=1
+ edgeMask(:,1:nEdges+1)=0
do iEdge=1,nEdges
boundaryEdge(1:maxLevelEdgeTop(iEdge),iEdge)=0
+ edgeMask(1:maxLevelEdgeTop(iEdge),iEdge)=1
end do
!
! Find cells and vertices that have an edge on the boundary
!
- boundaryCell(:,:) = 0
+ boundaryCell(:,1:nCells+1) = 0
+ cellMask(:,1:nCells+1) = 1
+ boundaryVertex(:,1:nVertices+1) = 0
+ vertexMask(:,1:nVertices+1) = 1
do iEdge=1,nEdges
do k=1,nVertLevels
if (boundaryEdge(k,iEdge).eq.1) then
boundaryCell(k,cellsOnEdge(1,iEdge)) = 1
boundaryCell(k,cellsOnEdge(2,iEdge)) = 1
+ cellMask(k,cellsOnEdge(1,iEdge)) = 0
+ cellMask(k,cellsOnEdge(2,iEdge)) = 0
boundaryVertex(k,verticesOnEdge(1,iEdge)) = 1
boundaryVertex(k,verticesOnEdge(2,iEdge)) = 1
+ vertexMask(k,verticesOnEdge(1,iEdge)) = 0
+ vertexMask(k,verticesOnEdge(2,iEdge)) = 0
endif
end do
end do
@@ -765,7 +748,7 @@
! Note: We do not update halos on maxLevel* variables. I want the
! outside edge of a halo to be zero on each processor.
-end subroutine compute_maxLevel!}}}
+end subroutine ocn_compute_max_level!}}}
subroutine mpas_core_finalize(domain)!{{{
@@ -780,7 +763,7 @@
end subroutine mpas_core_finalize!}}}
- subroutine compute_mesh_scaling(mesh)!{{{
+ subroutine ocn_compute_mesh_scaling(mesh)!{{{
use mpas_grid_types
use mpas_configure
@@ -810,7 +793,7 @@
end do
end if
- end subroutine compute_mesh_scaling!}}}
+ end subroutine ocn_compute_mesh_scaling!}}}
end module mpas_core
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_restoring.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_restoring.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_restoring.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -111,6 +111,7 @@
integer :: iCell, nCellsSolve, k
real (kind=RKIND), dimension(:), pointer :: temperatureRestore, salinityRestore
+ real (kind=RKIND) :: invTemp, invSalinity
err = 0
@@ -121,17 +122,14 @@
temperatureRestore => grid % temperatureRestore % array
salinityRestore => grid % salinityRestore % array
+ invTemp = 1.0 / (temperatureTimeScale * 86400.0)
+ invSalinity = 1.0 / (salinityTimeScale * 86400.0)
+
k = 1 ! restoring only in top layer
do iCell=1,nCellsSolve
+ tend(indexT, k, iCell) = tend(indexT, k, iCell) - h(k,iCell)*(tracers(indexT, k, iCell) - temperatureRestore(iCell)) * invTemp
+ tend(indexS, k, iCell) = tend(indexS, k, iCell) - h(k,iCell)*(tracers(indexS, k, iCell) - salinityRestore(iCell)) * invSalinity
- tend(indexT, k, iCell) = tend(indexT, k, iCell) &
- - h(k,iCell)*(tracers(indexT, k, iCell) - temperatureRestore(iCell)) &
- / (temperatureTimeScale * 86400.0)
-
- tend(indexS, k, iCell) = tend(indexS, k, iCell) &
- - h(k,iCell)*(tracers(indexS, k, iCell) - salinityRestore(iCell)) &
- / (salinityTimeScale * 86400.0)
-
! write(6,10) iCell, tracers(indexT, k, iCell), &
! temperatureRestore(iCell), tracers(indexT, k, iCell), &
! (tracers(indexT, k, iCell) - temperatureRestore(iCell)) &
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tendency.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -44,6 +44,11 @@
private
save
+ type (timer_node), pointer :: diagEOSTimer
+ type (timer_node), pointer :: thickHadvTimer, thickVadvTimer
+ type (timer_node), pointer :: velCorTimer, velVadvTimer, velPgradTimer, velHmixTimer, velForceTimer, velExpVmixTimer
+ type (timer_node), pointer :: tracerHadvTimer, tracerVadvTimer, tracerHmixTimer, tracerExpVmixTimer, tracerRestoringTimer
+
!--------------------------------------------------------------------
!
! Public parameters
@@ -61,7 +66,8 @@
ocn_tend_scalar, &
ocn_diagnostic_solve, &
ocn_wtop, &
- ocn_fuperp
+ ocn_fuperp, &
+ ocn_tendency_init
!--------------------------------------------------------------------
!
@@ -69,7 +75,11 @@
!
!--------------------------------------------------------------------
+ integer :: hadv2nd, hadv3rd, hadv4th
+ integer :: ke_cell_flag, ke_vertex_flag
+ real (kind=RKIND) :: coef_3rd_order, fCoef
+
!***********************************************************************
contains
@@ -104,83 +114,18 @@
type (diagnostics_type), intent(in) :: d
type (mesh_type), intent(in) :: grid
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j, err
+ real (kind=RKIND), dimension(:,:), pointer :: h_edge, u, wTop, tend_h
-! mrp 110512 I just split compute_tend into compute_tend_u and ocn_tend_h.
-! Most of these variables can be removed, but at a later time.
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, q, &
- upstream_bias, wTopEdge, rho0Inv, r
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel
- real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_h, circulation, vorticity, ke, ke_edge, pv_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
- type (dm_info) :: dminfo
+ integer :: err
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
-
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
call mpas_timer_start("ocn_tend_h")
- h => s % h % array
u => s % u % array
- v => s % v % array
wTop => s % wTop % array
h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
- vertViscTopOfEdge => d % vertViscTopOfEdge % array
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
-
tend_h => tend % h % array
- nCells = grid % nCells
- nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
!
! height tendency: start accumulating tendency terms
!
@@ -192,23 +137,17 @@
! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
! for explanation of divergence operator.
!
- ! for z-level, only compute height tendency for top layer.
-
- call mpas_timer_start("ocn_tend_h-horiz adv")
-
+ call mpas_timer_start("hadv", .false., thickHadvTimer)
call ocn_thick_hadv_tend(grid, u, h_edge, tend_h, err)
+ call mpas_timer_stop("hadv", thickHadvTimer)
- call mpas_timer_stop("ocn_tend_h-horiz adv")
-
!
! height tendency: vertical advection term -d/dz(hw)
!
- ! Vertical advection computed for top layer of a z grid only.
- call mpas_timer_start("ocn_tend_h-vert adv")
-
+ call mpas_timer_start("vadv", .false., thickVadvTimer)
call ocn_thick_vadv_tend(grid, wtop, tend_h, err)
+ call mpas_timer_stop("vadv", thickVadvTimer)
- call mpas_timer_stop("ocn_tend_h-vert adv")
call mpas_timer_stop("ocn_tend_h")
end subroutine ocn_tend_h!}}}
@@ -243,47 +182,22 @@
type (diagnostics_type), intent(in) :: d
type (mesh_type), intent(in) :: grid
-! mrp 110512 I just split compute_tend into ocn_tend_u and compute_tend_h.
-! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
-
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve, err
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, q, &
- upstream_bias, wTopEdge, rho0Inv, r, visc_vorticity_coef
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel, meshScalingDel2, meshScalingDel4
real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
+ h_edge, h, u, rho, zMid, pressure, &
tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
MontPot, wTop, divergence, vertViscTopOfEdge
- type (dm_info) :: dminfo
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
-
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
+ integer :: err
+
call mpas_timer_start("ocn_tend_u")
- h => s % h % array
u => s % u % array
- v => s % v % array
+ rho => s % rho % array
wTop => s % wTop % array
+ zMid => s % zMid % array
h_edge => s % h_edge % array
- circulation => s % circulation % array
vorticity => s % vorticity % array
divergence => s % divergence % array
ke => s % ke % array
@@ -293,43 +207,10 @@
pressure => s % pressure % array
vertViscTopOfEdge => d % vertViscTopOfEdge % array
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
-! mrp 110516 cleanup fvertex fedge not used in this subroutine
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
-
tend_u => tend % u % array
- nCells = grid % nCells
- nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
u_src => grid % u_src % array
- meshScalingDel2 => grid % meshScalingDel2 % array
- meshScalingDel4 => grid % meshScalingDel4 % array
-
!
! velocity tendency: start accumulating tendency terms
!
@@ -340,66 +221,54 @@
! velocity tendency: nonlinear Coriolis term and grad of kinetic energy
!
- call mpas_timer_start("ocn_tend_u-coriolis")
-
+ call mpas_timer_start("coriolis", .false., velCorTimer)
call ocn_vel_coriolis_tend(grid, pv_edge, h_edge, u, ke, tend_u, err)
+ call mpas_timer_stop("coriolis", velCorTimer)
- call mpas_timer_stop("ocn_tend_u-coriolis")
-
!
! velocity tendency: vertical advection term -w du/dz
!
- call mpas_timer_start("ocn_tend_u-vert adv")
+ call mpas_timer_start("vadv", .false., velVadvTimer)
+ call ocn_vel_vadv_tend(grid, u, h_edge, wtop, tend_u, err)
+ call mpas_timer_stop("vadv", velVadvTimer)
- call ocn_vel_vadv_tend(grid, u, wtop, tend_u, err)
-
- call mpas_timer_stop("ocn_tend_u-vert adv")
-
!
! velocity tendency: pressure gradient
!
- call mpas_timer_start("ocn_tend_u-pressure grad")
-
- if (config_vert_grid_type.eq.'isopycnal') then
- call ocn_vel_pressure_grad_tend(grid, MontPot, tend_u, err)
- elseif (config_vert_grid_type.eq.'zlevel') then
- call ocn_vel_pressure_grad_tend(grid, pressure, tend_u, err)
+ call mpas_timer_start("pressure grad", .false., velPgradTimer)
+ if (config_pressure_type.eq.'MontgomeryPotential') then
+ call ocn_vel_pressure_grad_tend(grid, MontPot, zMid, rho, tend_u, err)
+ else
+ call ocn_vel_pressure_grad_tend(grid, pressure, zMid, rho, tend_u, err)
end if
+ call mpas_timer_stop("pressure grad", velPgradTimer)
- call mpas_timer_stop("ocn_tend_u-pressure grad")
-
!
! velocity tendency: del2 dissipation, </font>
<font color="black">u_2 </font>
<font color="black">abla^2 u
! computed as </font>
<font color="black">u( </font>
<font color="black">abla divergence + k \times </font>
<font color="gray">abla vorticity )
! strictly only valid for config_h_mom_eddy_visc2 == constant
!
- call mpas_timer_start("ocn_tend_u-horiz mix")
-
+ call mpas_timer_start("hmix", .false., velHmixTimer)
call ocn_vel_hmix_tend(grid, divergence, vorticity, tend_u, err)
+ call mpas_timer_stop("hmix", velHmixTimer)
- call mpas_timer_stop("ocn_tend_u-horiz mix")
-
!
! velocity tendency: forcing and bottom drag
!
! mrp 101115 note: in order to include flux boundary conditions, we will need to
! know the bottom edge with nonzero velocity and place the drag there.
- call mpas_timer_start("ocn_tend_u-forcings")
-
+ call mpas_timer_start("forcings", .false., velForceTimer)
call ocn_vel_forcing_tend(grid, u, u_src, ke_edge, h_edge, tend_u, err)
+ call mpas_timer_stop("forcings", velForceTimer)
- call mpas_timer_stop("ocn_tend_u-forcings")
-
!
! velocity tendency: vertical mixing d/dz( nu_v du/dz))
!
if (.not.config_implicit_vertical_mix) then
- call mpas_timer_start("ocn_tend_u-explicit vert mix")
-
+ call mpas_timer_start("explicit vmix", .false., velExpVmixTimer)
call ocn_vel_vmix_tend_explicit(grid, u, h_edge, vertvisctopofedge, tend_u, err)
-
- call mpas_timer_stop("ocn_tend_u-explicit vert mix")
+ call mpas_timer_stop("explicit vmix", velExpVmixTimer)
endif
call mpas_timer_stop("ocn_tend_u")
@@ -436,74 +305,24 @@
type (diagnostics_type), intent(in) :: d
type (mesh_type), intent(in) :: grid
- integer :: i, k, iCell, iEdge, iTracer, cell1, cell2, upwindCell,&
- nEdges, nCells, nCellsSolve, nVertLevels, num_tracers, err
- real (kind=RKIND) :: invAreaCell1, invAreaCell2, tracer_turb_flux
- real (kind=RKIND) :: flux, tracer_edge, r
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle
real (kind=RKIND), dimension(:,:), pointer :: &
u,h,wTop, h_edge, vertDiffTopOfCell
real (kind=RKIND), dimension(:,:,:), pointer :: &
tracers, tend_tr
- integer, dimension(:,:), pointer :: boundaryEdge
- type (dm_info) :: dminfo
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: cellsOnEdge, boundaryCell
- real (kind=RKIND), dimension(:), pointer :: zTopZLevel,zMidZLevel, &
- hRatioZLevelK, hRatioZLevelKm1, meshScalingDel2, meshScalingDel4
- real (kind=RKIND), dimension(:), allocatable:: tracer2ndDer, tracersIn, tracersOut, posZMidZLevel, &
- posZTopZLevel
- real (kind=RKIND), dimension(:,:), allocatable:: fluxVertTop, boundaryMask
- real (kind=RKIND), dimension(:,:,:), allocatable::tr_flux, tr_div, delsq_tracer, tracerTop
+ integer :: err
-
- real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2
- real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
- real (kind=RKIND) :: coef_3rd_order, flux3Coef, cSignWTop
-
- integer :: index_temperature, index_salinity, rrr
- real (kind=RKIND), dimension(:), pointer :: temperatureRestore, salinityRestore
-
call mpas_timer_start("ocn_tend_scalar")
u => s % u % array
h => s % h % array
- boundaryCell=> grid % boundaryCell % array
wTop => s % wTop % array
tracers => s % tracers % array
h_edge => s % h_edge % array
vertDiffTopOfCell => d % vertDiffTopOfCell % array
tend_tr => tend % tracers % array
-
- areaCell => grid % areaCell % array
- cellsOnEdge => grid % cellsOnEdge % array
- dvEdge => grid % dvEdge % array
- dcEdge => grid % dcEdge % array
- zTopZLevel => grid % zTopZLevel % array
- zMidZLevel => grid % zMidZLevel % array
- hRatioZLevelK => grid % hRatioZLevelK % array
- hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
- boundaryEdge => grid % boundaryEdge % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
- nEdges = grid % nEdges
- nCells = grid % nCells
- nCellsSolve = grid % nCellsSolve
- nVertLevels = grid % nVertLevels
- num_tracers = s % num_tracers
-
- meshScalingDel2 => grid % meshScalingDel2 % array
- meshScalingDel4 => grid % meshScalingDel4 % array
-
-
- deriv_two => grid % deriv_two % array
-
!
! initialize tracer tendency (RHS of tracer equation) to zero.
!
@@ -517,32 +336,26 @@
! and then change maxLevelEdgeTop to maxLevelEdgeBot in the following section.
! tracer_edge at the boundary will also need to be defined for flux boundaries.
- call mpas_timer_start("ocn_tend_scalar-horiz adv")
-
+ call mpas_timer_start("hadv", .false., tracerHadvTimer)
call ocn_tracer_hadv_tend(grid, u, h_edge, tracers, tend_tr, err)
+ call mpas_timer_stop("hadv", tracerHadvTimer)
- call mpas_timer_stop("ocn_tend_scalar-horiz adv")
-
!
! tracer tendency: vertical advection term -d/dz( h \phi w)
!
- call mpas_timer_start("ocn_tend_scalar-vert adv")
+ call mpas_timer_start("vadv", .false., tracerVadvTimer)
+ call ocn_tracer_vadv_tend(grid, h, wtop, tracers, tend_tr, err)
+ call mpas_timer_stop("vadv", tracerVadvTimer)
- call ocn_tracer_vadv_tend(grid, wtop, tracers, tend_tr, err)
-
- call mpas_timer_stop("ocn_tend_scalar-vert adv")
-
!
! tracer tendency: del2 horizontal tracer diffusion, div(h \kappa_2 </font>
<font color="gray">abla \phi)
!
- call mpas_timer_start("ocn_tend_scalar-horiz diff")
-
+ call mpas_timer_start("hmix", .false., tracerHmixTimer)
call ocn_tracer_hmix_tend(grid, h_edge, tracers, tend_tr, err)
+ call mpas_timer_stop("hmix", tracerHmixTimer)
- call mpas_timer_stop("ocn_tend_scalar-horiz diff")
-
! mrp 110516 printing
!print *, 'tend_tr 1',minval(tend_tr(3,1,1:nCells)),&
! maxval(tend_tr(3,1,1:nCells))
@@ -554,11 +367,11 @@
! tracer tendency: vertical diffusion h d/dz( \kappa_v d\phi/dz)
!
if (.not.config_implicit_vertical_mix) then
- call mpas_timer_start("ocn_tend_scalar-explicit vert diff")
+ call mpas_timer_start("explicit vmix", .false., tracerExpVmixTimer)
call ocn_tracer_vmix_tend_explicit(grid, h, vertdifftopofcell, tracers, tend_tr, err)
- call mpas_timer_stop("ocn_tend_scalar-explicit vert diff")
+ call mpas_timer_stop("explicit vmix", tracerExpVmixTimer)
endif
! mrp 110516 printing
@@ -569,11 +382,11 @@
!
! add restoring to T and S in top model layer
!
- call mpas_timer_start("ocn_tend_scalar-restoring")
+ call mpas_timer_start("restoring", .false., tracerRestoringTimer)
call ocn_restoring_tend(grid, h, s%index_temperature, s%index_salinity, tracers, tend_tr, err)
- call mpas_timer_stop("ocn_tend_scalar-restoring")
+ call mpas_timer_stop("restoring", tracerRestoringTimer)
10 format(2i8,10e20.10)
call mpas_timer_stop("ocn_tend_scalar")
@@ -609,42 +422,34 @@
type (mesh_type), intent(in) :: grid
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv
+ integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j
+ integer :: boundaryMask, velMask, nCells, nEdges, nVertices, nVertLevels, vertexDegree, err
- integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree, fCoef, err
+ integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
+ maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
+ maxLevelVertexBot
+ integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
+ verticesOnEdge, edgesOnEdge, edgesOnVertex,boundaryCell
+ real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2, coef_3rd_order, r_tmp, invAreaCell1, invAreaCell2, invAreaTri1, invAreaTri2, invLength, h_vertex
+ real (kind=RKIND), dimension(:), allocatable:: pTop
+
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- hZLevel
+ h_s, fVertex, dvEdge, dcEdge, areaCell, areaTriangle, &
+ referenceBottomDepth, ssh
real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, w, pressure,&
- circulation, vorticity, ke, ke_edge, MontPot, wTop, &
+ weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure,&
+ circulation, vorticity, ke, ke_edge, MontPot, wTop, zMid, &
pv_edge, pv_vertex, pv_cell, gradPVn, gradPVt, divergence, &
rho, temperature, salinity, kev, kevc
- real (kind=RKIND), dimension(:,:,:), pointer :: tracers
- real (kind=RKIND), dimension(:), allocatable:: pTop
+ real (kind=RKIND), dimension(:,:,:), pointer :: tracers, deriv_two
real (kind=RKIND), dimension(:,:), allocatable:: div_u
character :: c1*6
- integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
- verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &
- boundaryEdge, boundaryCell
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
- maxLevelVertexBot, maxLevelVertexTop
- real (kind=RKIND) :: d2fdx2_cell1, d2fdx2_cell2
- real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
- real (kind=RKIND) :: coef_3rd_order
- real (kind=RKIND) :: r, h1, h2
-
- call mpas_timer_start("ocn_diagnostic_solve")
-
h => s % h % array
u => s % u % array
v => s % v % array
- wTop => s % wTop % array
h_edge => s % h_edge % array
circulation => s % circulation % array
vorticity => s % vorticity % array
@@ -659,9 +464,11 @@
gradPVn => s % gradPVn % array
gradPVt => s % gradPVt % array
rho => s % rho % array
- tracers => s % tracers % array
MontPot => s % MontPot % array
pressure => s % pressure % array
+ zMid => s % zMid % array
+ ssh => s % ssh % array
+ tracers => s % tracers % array
weightsOnEdge => grid % weightsOnEdge % array
kiteAreasOnVertex => grid % kiteAreasOnVertex % array
@@ -669,7 +476,6 @@
cellsOnVertex => grid % cellsOnVertex % array
verticesOnEdge => grid % verticesOnEdge % array
nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
edgesOnEdge => grid % edgesOnEdge % array
edgesOnVertex => grid % edgesOnVertex % array
@@ -679,14 +485,12 @@
areaTriangle => grid % areaTriangle % array
h_s => grid % h_s % array
fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- hZLevel => grid % hZLevel % array
+ referenceBottomDepth => grid % referenceBottomDepth % array
deriv_two => grid % deriv_two % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelEdgeBot => grid % maxLevelEdgeBot % array
maxLevelVertexBot => grid % maxLevelVertexBot % array
- maxLevelVertexTop => grid % maxLevelVertexTop % array
nCells = grid % nCells
nEdges = grid % nEdges
@@ -694,7 +498,6 @@
nVertLevels = grid % nVertLevels
vertexDegree = grid % vertexDegree
- boundaryEdge => grid % boundaryEdge % array
boundaryCell => grid % boundaryCell % array
!
@@ -704,205 +507,165 @@
! mrp 101115 note: in order to include flux boundary conditions, we will need to
! assign h_edge for maxLevelEdgeTop:maxLevelEdgeBot in the following section
- ! mrp 110516 efficiency note: For z-level, only do this on level 1. h_edge for all
- ! lower levels is defined by hZlevel.
+ ! initialize h_edge to avoid divide by zero and NaN problems.
+ h_edge = -1.0e34
- call mpas_timer_start("ocn_diagnostic_solve-hEdge")
-
- coef_3rd_order = 0.
- if (config_thickness_adv_order == 3) coef_3rd_order = 1.0
- if (config_thickness_adv_order == 3 .and. config_monotonic) coef_3rd_order = 0.25
-
- if (config_thickness_adv_order == 2) then
- call mpas_timer_start("ocn_diagnostic_solve-hEdge 2")
-
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
- h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
- end do
+ do iEdge=1,nEdges*hadv2nd
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k=1,maxLevelEdgeTop(iEdge)
+ h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
end do
- call mpas_timer_stop("ocn_diagnostic_solve-hEdge 2")
+ end do
- else if (config_thickness_adv_order == 3) then
- call mpas_timer_start("ocn_diagnostic_solve-hEdge 3")
+ do iEdge=1,nEdges*hadv3rd
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ do k=1,maxLevelEdgeTop(iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
+ d2fdx2_cell1 = 0.0
+ d2fdx2_cell2 = 0.0
- d2fdx2_cell1 = 0.0
- d2fdx2_cell2 = 0.0
+ boundaryMask = abs(transfer(.not.(boundaryCell(k,cell1) == 0 .and. boundaryCell(k,cell2) == 0), boundaryMask))
- !-- if not a boundary cell
- if(boundaryCell(k,cell1).eq.0.and.boundaryCell(k,cell2).eq.0) then
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * h(k,cell1) * boundaryMask
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * h(k,cell2) * boundaryMask
- d2fdx2_cell1 = deriv_two(1,1,iEdge) * h(k,cell1)
- d2fdx2_cell2 = deriv_two(1,2,iEdge) * h(k,cell2)
+ !-- all edges of cell 1
+ do i=1, nEdgesOnCell(cell1) * boundaryMask
+ d2fdx2_cell1 = d2fdx2_cell1 + &
+ deriv_two(i+1,1,iEdge) * h(k,grid % CellsOnCell % array (i,cell1))
+ end do
- !-- all edges of cell 1
- do i=1, grid % nEdgesOnCell % array (cell1)
- d2fdx2_cell1 = d2fdx2_cell1 + &
- deriv_two(i+1,1,iEdge) * h(k,grid % CellsOnCell % array (i,cell1))
- end do
+ !-- all edges of cell 2
+ do i=1, nEdgesOnCell(cell2) * boundaryMask
+ d2fdx2_cell2 = d2fdx2_cell2 + &
+ deriv_two(i+1,2,iEdge) * h(k,grid % CellsOnCell % array (i,cell2))
+ end do
- !-- all edges of cell 2
- do i=1, grid % nEdgesOnCell % array (cell2)
- d2fdx2_cell2 = d2fdx2_cell2 + &
- deriv_two(i+1,2,iEdge) * h(k,grid % CellsOnCell % array (i,cell2))
- end do
+ velMask = 2*(abs(transfer(u(k,iEdge) <= 0, velMask))) - 1
- endif
+ h_edge(k,iEdge) = 0.5*(h(k,cell1) + h(k,cell2)) - (dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
+ + velMask * (dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12.
- !-- if u > 0:
- if (u(k,iEdge) > 0) then
- h_edge(k,iEdge) = &
- 0.5*(h(k,cell1) + h(k,cell2)) &
- -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
- -(dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12.
- !-- else u <= 0:
- else
- h_edge(k,iEdge) = &
- 0.5*(h(k,cell1) + h(k,cell2)) &
- -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
- +(dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12.
- end if
+ end do ! do k
+ end do ! do iEdge
- end do ! do k
- end do ! do iEdge
+ do iEdge=1,nEdges*hadv4th
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
- call mpas_timer_stop("ocn_diagnostic_solve-hEdge 3")
- else if (config_thickness_adv_order == 4) then
- call mpas_timer_start("ocn_diagnostic_solve-hEdge 4")
+ do k=1,maxLevelEdgeTop(iEdge)
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ d2fdx2_cell1 = 0.0
+ d2fdx2_cell2 = 0.0
- do k=1,maxLevelEdgeTop(iEdge)
+ boundaryMask = abs(transfer(.not.(boundaryCell(k,cell1) == 0 .and. boundaryCell(k,cell2) == 0), boundaryMask))
- d2fdx2_cell1 = 0.0
- d2fdx2_cell2 = 0.0
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * h(k,cell1) * boundaryMask
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * h(k,cell2) * boundaryMask
- !-- if not a boundary cell
- if(boundaryCell(k,cell1).eq.0.and.boundaryCell(k,cell2).eq.0) then
+ !-- all edges of cell 1
+ do i=1, nEdgesOnCell(cell1) * boundaryMask
+ d2fdx2_cell1 = d2fdx2_cell1 + &
+ deriv_two(i+1,1,iEdge) * h(k,grid % CellsOnCell % array (i,cell1))
+ end do
- d2fdx2_cell1 = deriv_two(1,1,iEdge) * h(k,cell1)
- d2fdx2_cell2 = deriv_two(1,2,iEdge) * h(k,cell2)
+ !-- all edges of cell 2
+ do i=1, nEdgesOnCell(cell2) * boundaryMask
+ d2fdx2_cell2 = d2fdx2_cell2 + &
+ deriv_two(i+1,2,iEdge) * h(k,grid % CellsOnCell % array (i,cell2))
+ end do
- !-- all edges of cell 1
- do i=1, grid % nEdgesOnCell % array (cell1)
- d2fdx2_cell1 = d2fdx2_cell1 + &
- deriv_two(i+1,1,iEdge) * h(k,grid % CellsOnCell % array (i,cell1))
- end do
+ h_edge(k,iEdge) = &
+ 0.5*(h(k,cell1) + h(k,cell2)) &
+ -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12.
- !-- all edges of cell 2
- do i=1, grid % nEdgesOnCell % array (cell2)
- d2fdx2_cell2 = d2fdx2_cell2 + &
- deriv_two(i+1,2,iEdge) * h(k,grid % CellsOnCell % array (i,cell2))
- end do
+ end do ! do k
+ end do ! do iEdge
- endif
-
- h_edge(k,iEdge) = &
- 0.5*(h(k,cell1) + h(k,cell2)) &
- -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12.
-
- end do ! do k
- end do ! do iEdge
-
- call mpas_timer_stop("ocn_diagnostic_solve-hEdge 4")
- endif ! if(config_thickness_adv_order == 2)
- call mpas_timer_stop("ocn_diagnostic_solve-hEdge")
-
!
! set the velocity and height at dummy address
! used -1e34 so error clearly occurs if these values are used.
!
-!mrp 110516 change to zero, change back later:
u(:,nEdges+1) = -1e34
h(:,nCells+1) = -1e34
tracers(s % index_temperature,:,nCells+1) = -1e34
tracers(s % index_salinity,:,nCells+1) = -1e34
- !
- ! Compute circulation and relative vorticity at each vertex
- !
circulation(:,:) = 0.0
+ vorticity(:,:) = 0.0
+ divergence(:,:) = 0.0
+ ke(:,:) = 0.0
+ v(:,:) = 0.0
do iEdge=1,nEdges
vertex1 = verticesOnEdge(1,iEdge)
vertex2 = verticesOnEdge(2,iEdge)
- do k=1,maxLevelEdgeBot(iEdge)
- circulation(k,vertex1) = circulation(k,vertex1) - dcEdge(iEdge) * u(k,iEdge)
- circulation(k,vertex2) = circulation(k,vertex2) + dcEdge(iEdge) * u(k,iEdge)
- end do
- end do
- do iVertex=1,nVertices
- do k=1,maxLevelVertexBot(iVertex)
- vorticity(k,iVertex) = circulation(k,iVertex) / areaTriangle(iVertex)
- end do
- end do
- !
- ! Compute the divergence at each cell center
- !
- divergence(:,:) = 0.0
- do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeBot(iEdge)
- divergence(k,cell1) = divergence(k,cell1) + u(k,iEdge)*dvEdge(iEdge)
- divergence(k,cell2) = divergence(k,cell2) - u(k,iEdge)*dvEdge(iEdge)
- enddo
- end do
- do iCell = 1,nCells
- r = 1.0 / areaCell(iCell)
- do k = 1,maxLevelCell(iCell)
- divergence(k,iCell) = divergence(k,iCell) * r
- enddo
- enddo
- !
- ! Compute kinetic energy in each cell
- !
- ke(:,:) = 0.0
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
+ invAreaTri1 = 1.0 / areaTriangle(vertex1)
+ invAreaTri2 = 1.0 / areaTriangle(vertex2)
+
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+
do k=1,maxLevelEdgeBot(iEdge)
- ke(k,cell1) = ke(k,cell1) + 0.25 * dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2.0
- ke(k,cell2) = ke(k,cell2) + 0.25 * dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2.0
- enddo
+ ! Compute circulation and relative vorticity at each vertex
+ r_tmp = dcEdge(iEdge) * u(k,iEdge)
+ circulation(k,vertex1) = circulation(k,vertex1) - r_tmp
+ circulation(k,vertex2) = circulation(k,vertex2) + r_tmp
+
+ vorticity(k, vertex1) = vorticity(k, vertex1) - r_tmp * invAreaTri1
+ vorticity(k, vertex2) = vorticity(k, vertex2) + r_tmp * invAreaTri2
+
+ ! Compute the divergence at each cell center
+ r_tmp = dvEdge(iEdge) * u(k, iEdge)
+ divergence(k,cell1) = divergence(k,cell1) + r_tmp * invAreaCell1
+ divergence(k,cell2) = divergence(k,cell2) - r_tmp * invAreaCell2
+
+ ! Compute kinetic energy in each cell
+ r_tmp = r_tmp * dcEdge(iEdge) * u(k,iEdge)
+ ke(k,cell1) = ke(k,cell1) + 0.25 * r_tmp * invAreaCell1
+ ke(k,cell2) = ke(k,cell2) + 0.25 * r_tmp * invAreaCell2
+ end do
+
+ ! Compute v (tangential) velocities
+ do i=1,nEdgesOnEdge(iEdge)
+ eoe = edgesOnEdge(i,iEdge)
+ ! mrp 101115 note: in order to include flux boundary conditions,
+ ! the following loop may need to change to maxLevelEdgeBot
+ do k = 1,maxLevelEdgeTop(iEdge)
+ v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
+ end do
+ end do
+
end do
- do iCell = 1,nCells
- do k = 1,maxLevelCell(iCell)
- ke(k,iCell) = ke(k,iCell) / areaCell(iCell)
- enddo
- enddo
!
! Compute kinetic energy in each vertex
!
kev(:,:) = 0.0; kevc(:,:) = 0.0
- do iEdge=1,nEdges
+ do iEdge=1,nEdges*ke_vertex_flag
do k=1,nVertLevels
- kev(k,verticesOnEdge(1,iEdge)) = kev(k,verticesOnEdge(1,iEdge)) + dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2
- kev(k,verticesOnEdge(2,iEdge)) = kev(k,verticesOnEdge(2,iEdge)) + dcEdge(iEdge) * dvEdge(iEdge) * u(k,iEdge)**2
+ r_tmp = dcEdge(iEdge) * dvEdge(iEdge) * u(k, iEdge)**2
+ kev(k,verticesOnEdge(1,iEdge)) = kev(k,verticesOnEdge(1,iEdge)) + r_tmp
+ kev(k,verticesOnEdge(2,iEdge)) = kev(k,verticesOnEdge(2,iEdge)) + r_tmp
end do
end do
- do iVertex = 1,nVertices
+ do iVertex = 1,nVertices*ke_vertex_flag
do k=1,nVertLevels
- kev(k,iVertex) = kev(k,iVertex) / areaTriangle(iVertex) / 4.0
+ kev(k,iVertex) = kev(k,iVertex) / areaTriangle(iVertex) * 0.25
enddo
enddo
- do iVertex = 1, nVertices
+ do iVertex = 1, nVertices*ke_vertex_flag
do i=1,grid % vertexDegree
iCell = cellsOnVertex(i,iVertex)
+ invAreaCell1 = 1.0 / areaCell(iCell)
do k=1,nVertLevels
- kevc(k,iCell) = kevc(k,iCell) + kiteAreasOnVertex(i, iVertex) * kev(k, iVertex) / areaCell(iCell)
+ kevc(k,iCell) = kevc(k,iCell) + kiteAreasOnVertex(i, iVertex) * kev(k, iVertex) * invAreaCell1
enddo
enddo
enddo
@@ -910,35 +673,17 @@
!
! Compute kinetic energy in each cell by blending ke and kevc
!
- if(config_include_KE_vertex) then
- do iCell=1,nCells
+ do iCell=1,nCells*ke_vertex_flag
do k=1,nVertLevels
ke(k,iCell) = 5.0/8.0*ke(k,iCell) + 3.0/8.0*kevc(k,iCell)
end do
end do
- endif
!
- ! Compute v (tangential) velocities
- !
- v(:,:) = 0.0
- do iEdge = 1,nEdges
- do i=1,nEdgesOnEdge(iEdge)
- eoe = edgesOnEdge(i,iEdge)
- ! mrp 101115 note: in order to include flux boundary conditions,
- ! the following loop may need to change to maxLevelEdgeBot
- do k = 1,maxLevelEdgeTop(iEdge)
- v(k,iEdge) = v(k,iEdge) + weightsOnEdge(i,iEdge) * u(k, eoe)
- end do
- end do
- end do
-
- !
! Compute ke on cell edges at velocity locations for quadratic bottom drag.
!
! mrp 101025 efficiency note: we could get rid of ke_edge completely by
! using sqrt(u(k,iEdge)**2 + v(k,iEdge)**2) in its place elsewhere.
- ke_edge = 0.0 !mrp remove 0 for efficiency
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
@@ -951,83 +696,64 @@
! Compute height at vertices, pv at vertices, and average pv to edge locations
! ( this computes pv_vertex at all vertices bounding real cells and distance-1 ghost cells )
!
- if (trim(config_time_integration) == 'RK4') then
- ! for RK4, PV is really PV = (eta+f)/h
- fCoef = 1
- elseif (trim(config_time_integration) == 'split_explicit' &
- .or.trim(config_time_integration) == 'unsplit_explicit') then
- ! for split explicit, PV is eta/h because f is added separately to the momentum forcing.
-! mrp temp, new should be:
- fCoef = 0
-! old, for testing:
-! fCoef = 1
- end if
-
do iVertex = 1,nVertices
+ invAreaTri1 = 1.0 / areaTriangle(iVertex)
do k=1,maxLevelVertexBot(iVertex)
h_vertex = 0.0
do i=1,vertexDegree
h_vertex = h_vertex + h(k,cellsOnVertex(i,iVertex)) * kiteAreasOnVertex(i,iVertex)
end do
- h_vertex = h_vertex / areaTriangle(iVertex)
+ h_vertex = h_vertex * invAreaTri1
pv_vertex(k,iVertex) = (fCoef*fVertex(iVertex) + vorticity(k,iVertex)) / h_vertex
end do
end do
- !
- ! Compute pv at cell centers
- ! ( this computes pv_cell for all real cells and distance-1 ghost cells )
- !
pv_cell(:,:) = 0.0
+ pv_edge(:,:) = 0.0
do iVertex = 1,nVertices
do i=1,vertexDegree
iCell = cellsOnVertex(i,iVertex)
+ iEdge = edgesOnVertex(i,iVertex)
+
+ invAreaCell1 = 1.0 / areaCell(iCell)
+
+ ! Compute pv at cell centers
+ ! ( this computes pv_cell for all real cells and distance-1 ghost cells )
do k = 1,maxLevelCell(iCell)
- pv_cell(k,iCell) = pv_cell(k,iCell) &
- + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) &
- / areaCell(iCell)
+ pv_cell(k,iCell) = pv_cell(k,iCell) + kiteAreasOnVertex(i, iVertex) * pv_vertex(k, iVertex) * invAreaCell1
enddo
- enddo
- enddo
- !
- ! Compute pv at the edges
- ! ( this computes pv_edge at all edges bounding real cells )
- !
- pv_edge(:,:) = 0.0
- do iVertex = 1,nVertices
- do i=1,vertexDegree
- iEdge = edgesOnVertex(i,iVertex)
+ ! Compute pv at the edges
+ ! ( this computes pv_edge at all edges bounding real cells )
do k=1,maxLevelEdgeBot(iEdge)
- pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex)
+ pv_edge(k,iEdge) = pv_edge(k,iEdge) + 0.5 * pv_vertex(k,iVertex)
enddo
- end do
- end do
+ enddo
+ enddo
- !
- ! Compute gradient of PV in normal direction
- ! ( this computes gradPVn for all edges bounding real cells )
- !
- gradPVn(:,:) = 0.0
+! gradPVn(:,:) = 0.0
+! gradPVt(:,:) = 0.0
do iEdge = 1,nEdges
+ cell1 = cellsOnEdge(1, iEdge)
+ cell2 = cellsOnEdge(2, iEdge)
+ vertex1 = verticesOnedge(1, iEdge)
+ vertex2 = verticesOnedge(2, iEdge)
+
+ invLength = 1.0 / dcEdge(iEdge)
+ ! Compute gradient of PV in normal direction
+ ! ( this computes gradPVn for all edges bounding real cells )
do k=1,maxLevelEdgeTop(iEdge)
- gradPVn(k,iEdge) = ( pv_cell(k,cellsOnEdge(2,iEdge)) &
- - pv_cell(k,cellsOnEdge(1,iEdge))) &
- / dcEdge(iEdge)
+ gradPVn(k,iEdge) = (pv_cell(k,cell2) - pv_cell(k,cell1)) * invLength
enddo
- enddo
- !
- ! Compute gradient of PV in the tangent direction
- ! ( this computes gradPVt at all edges bounding real cells and distance-1 ghost cells )
- !
- do iEdge = 1,nEdges
+ invLength = 1.0 / dvEdge(iEdge)
+ ! Compute gradient of PV in the tangent direction
+ ! ( this computes gradPVt at all edges bounding real cells and distance-1 ghost cells )
do k = 1,maxLevelEdgeBot(iEdge)
- gradPVt(k,iEdge) = ( pv_vertex(k,verticesOnEdge(2,iEdge)) &
- - pv_vertex(k,verticesOnEdge(1,iEdge))) &
- /dvEdge(iEdge)
+ gradPVt(k,iEdge) = (pv_vertex(k,vertex2) - pv_vertex(k,vertex1)) * invLength
enddo
+
enddo
!
@@ -1046,17 +772,20 @@
!
! For an isopycnal model, density should remain constant.
! For zlevel, calculate in-situ density
- if (config_vert_grid_type.eq.'zlevel') then
+ if (config_vert_grid_type.ne.'isopycnal') then
+ call mpas_timer_start("equation of state", .false., diagEOSTimer)
call ocn_equation_of_state_rho(s, grid, 0, 'relative', err)
! mrp 110324 In order to visualize rhoDisplaced, include the following
call ocn_equation_of_state_rho(s, grid, 1, 'relative', err)
+ call mpas_timer_stop("equation of state", diagEOSTimer)
endif
!
! Pressure
! This section must be after computing rho
!
- if (config_vert_grid_type.eq.'isopycnal') then
+ ! dwj: 10/25/2011 - Need to explore isopycnal vs zlevel flags
+ if (config_pressure_type.eq.'MontgomeryPotential') then
! For Isopycnal model.
! Compute pressure at top of each layer, and then
@@ -1083,34 +812,52 @@
end do
deallocate(pTop)
- elseif (config_vert_grid_type.eq.'zlevel') then
+ else
- ! For z-level model.
- ! Compute pressure at middle of each level.
- ! At k=1, where p is pressure at a depth of hZLevel(1)/2, not
- ! pressure at middle of layer including SSH.
-
do iCell=1,nCells
- ! compute pressure for z-level coordinates
+ ! pressure for generalized coordinates
! assume atmospheric pressure at the surface is zero for now.
-
pressure(1,iCell) = rho(1,iCell)*gravity &
- * (h(1,iCell)-0.5*hZLevel(1))
+ * 0.5*h(1,iCell)
do k=2,maxLevelCell(iCell)
pressure(k,iCell) = pressure(k-1,iCell) &
- + 0.5*gravity*( rho(k-1,iCell)*hZLevel(k-1) &
- + rho(k ,iCell)*hZLevel(k ))
+ + 0.5*gravity*( rho(k-1,iCell)*h(k-1,iCell) &
+ + rho(k ,iCell)*h(k ,iCell))
end do
+ ! Compute zMid, the z-coordinate of the middle of the layer.
+ ! This is used for the rho g grad z momentum term.
+ ! Note the negative sign, since referenceBottomDepth is positive
+ ! and z-coordinates are negative below the surface.
+ k = maxLevelCell(iCell)
+ zMid(k:nVertLevels,iCell) = -referenceBottomDepth(k) + 0.5*h(k,iCell)
+
+ do k=maxLevelCell(iCell)-1, 1, -1
+ zMid(k,iCell) = zMid(k+1,iCell) &
+ + 0.5*( h(k+1,iCell) &
+ + h(k ,iCell))
+ end do
+
end do
endif
- call ocn_wtop(s,grid)
+ !
+ ! Sea Surface Height
+ !
+ do iCell=1,nCells
+ ! Start at the bottom where we know the depth, and go up.
+ ! The bottom depth for this cell is
+ ! referenceBottomDepth(maxLevelCell(iCell)).
+ ! Note the negative sign, since referenceBottomDepth is positive
+ ! and z-coordinates are negative below the surface.
- call mpas_timer_stop("ocn_diagnostic_solve")
+ ssh(iCell) = -referenceBottomDepth(maxLevelCell(iCell)) &
+ + sum(h(1:maxLevelCell(iCell),iCell))
+ end do
+
end subroutine ocn_diagnostic_solve!}}}
!***********************************************************************
@@ -1126,7 +873,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_wtop(s, grid)!{{{
+ subroutine ocn_wtop(s1,s2, grid)!{{{
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Compute diagnostic fields used in the tendency computations
!
@@ -1137,21 +884,22 @@
implicit none
- type (state_type), intent(inout) :: s
+ type (state_type), intent(inout) :: s1
+ type (state_type), intent(inout) :: s2
type (mesh_type), intent(in) :: grid
! mrp 110512 could clean this out, remove pointers?
integer :: iEdge, iCell, iVertex, k, cell1, cell2, vertex1, vertex2, eoe, i, j, cov
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv
+ real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, rho0Inv, hSum
integer :: nCells, nEdges, nVertices, nVertLevels, vertexDegree
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- hZLevel
- real (kind=RKIND), dimension(:,:), pointer :: u,wTop
- real (kind=RKIND), dimension(:,:), allocatable:: div_u
+ h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle
+ real (kind=RKIND), dimension(:,:), pointer :: u,h,wTop, h_edge
+ real (kind=RKIND), dimension(:,:), allocatable:: div_hu
+ real (kind=RKIND), dimension(:), allocatable:: div_hu_btr, h_tend_col, h_weights
integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnVertex, &
verticesOnEdge, edgesOnCell, edgesOnEdge, edgesOnVertex, &
@@ -1160,14 +908,13 @@
maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
maxLevelVertexBot, maxLevelVertexTop
- call mpas_timer_start("wTop")
+ h => s1 % h % array
+ h_edge => s1 % h_edge % array
+ u => s2 % u % array
+ wTop => s2 % wTop % array
- u => s % u % array
- wTop => s % wTop % array
-
areaCell => grid % areaCell % array
cellsOnEdge => grid % cellsOnEdge % array
- hZLevel => grid % hZLevel % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeBot => grid % maxLevelEdgeBot % array
dvEdge => grid % dvEdge % array
@@ -1176,46 +923,128 @@
nEdges = grid % nEdges
nVertLevels = grid % nVertLevels
+ allocate(div_hu(nVertLevels,nCells+1), div_hu_btr(nCells+1), &
+ h_tend_col(nVertLevels), h_weights(nVertLevels))
+
!
+ ! Compute div(h^{edge} u) for each cell
+ ! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
+ !
+ div_hu(:,:) = 0.0
+ do iEdge=1,nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k=1,maxLevelEdgeBot(iEdge)
+ flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
+ div_hu(k,cell1) = div_hu(k,cell1) + flux
+ div_hu(k,cell2) = div_hu(k,cell2) - flux
+ end do
+ end do
+
+ do iCell=1,nCells
+ div_hu_btr(iCell) = 0.0
+ do k=1,maxLevelCell(iCell)
+ div_hu(k,iCell) = div_hu(k,iCell) / areaCell(iCell)
+ div_hu_btr(iCell) = div_hu_btr(iCell) + div_hu(k,iCell)
+ end do
+ end do
+
+ !
! vertical velocity through layer interface
!
+ !dwj: 10/25/2011 - Need to explore isopycnal vs zlevel flags
if (config_vert_grid_type.eq.'isopycnal') then
! set vertical velocity to zero in isopycnal case
wTop=0.0
elseif (config_vert_grid_type.eq.'zlevel') then
- !
- ! Compute div(u) for each cell
- ! See Ringler et al. (2010) jcp paper, eqn 19, 21, and fig. 3.
- !
- allocate(div_u(nVertLevels,nCells+1))
- div_u(:,:) = 0.0
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=2,maxLevelEdgeBot(iEdge)
- flux = u(k,iEdge) * dvEdge(iEdge)
- div_u(k,cell1) = div_u(k,cell1) + flux
- div_u(k,cell2) = div_u(k,cell2) - flux
- end do
- end do
+ do iCell=1,nCells
+ ! Vertical velocity through layer interface at top and
+ ! bottom is zero.
+ wTop(1,iCell) = 0.0
+ wTop(maxLevelCell(iCell)+1,iCell) = 0.0
+ do k=maxLevelCell(iCell),2,-1
+ wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell)
+ end do
+ end do
+ elseif (config_vert_grid_type.eq.'zstar1') then
+
+ ! This is a testing setting. The computation is similar to zstar,
+ ! but the weights are all in the top layer, so is a bit-for-bit
+ ! match with zlevel.
+
do iCell=1,nCells
+
+ h_tend_col = 0.0
+ h_tend_col(1) = - div_hu_btr(iCell)
+
! Vertical velocity through layer interface at top and
! bottom is zero.
wTop(1,iCell) = 0.0
wTop(maxLevelCell(iCell)+1,iCell) = 0.0
+
do k=maxLevelCell(iCell),2,-1
- wTop(k,iCell) = wTop(k+1,iCell) &
- - div_u(k,iCell)/areaCell(iCell)*hZLevel(k)
+ wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
end do
end do
- deallocate(div_u)
+ elseif (config_vert_grid_type.eq.'zstar') then
+
+ ! Distribute the change in total column height due to the external
+ ! mode, div_hu_btr, among all the layers. Distribute in proportion
+ ! to the layer thickness.
+
+ do iCell=1,nCells
+
+ hSum = 0.0
+ do k=1,maxLevelCell(iCell)
+ h_tend_col(k) = - h(k,iCell)*div_hu_btr(iCell)
+ hSum = hSum + h(k,iCell)
+ end do
+ h_tend_col = h_tend_col / hSum
+
+ ! Vertical velocity through layer interface at top and
+ ! bottom is zero.
+ wTop(1,iCell) = 0.0
+ wTop(maxLevelCell(iCell)+1,iCell) = 0.0
+ do k=maxLevelCell(iCell),2,-1
+ wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
+ end do
+ end do
+
+ elseif (config_vert_grid_type.eq.'zstarWeights') then
+
+ ! This is a test with other weights, not meant to be permanent.
+
+ h_weights = 0.0
+ h_weights(1:5) = 1.0
+ do k=1,10
+ h_weights(5+k) = 1.0-k*0.1
+ end do
+
+ do iCell=1,nCells
+
+ hSum = 0.0
+ do k=1,maxLevelCell(iCell)
+ h_tend_col(k) = - h_weights(k)*h(k,iCell)*div_hu_btr(iCell)
+ hSum = hSum + h_weights(k)*h(k,iCell)
+ end do
+ h_tend_col = h_tend_col / hSum
+
+ ! Vertical velocity through layer interface at top and
+ ! bottom is zero.
+ wTop(1,iCell) = 0.0
+ wTop(maxLevelCell(iCell)+1,iCell) = 0.0
+ do k=maxLevelCell(iCell),2,-1
+ wTop(k,iCell) = wTop(k+1,iCell) - div_hu(k,iCell) - h_tend_col(k)
+ end do
+ end do
+
endif
- call mpas_timer_stop("wTop")
+ deallocate(div_hu, div_hu_btr, h_tend_col, h_weights)
end subroutine ocn_wtop!}}}
@@ -1249,88 +1078,35 @@
! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
+ integer :: iEdge, cell1, cell2, eoe, i, j, k
- integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: flux, vorticity_abs, h_vertex, workpv, q, &
- upstream_bias, wTopEdge, rho0Inv, r
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel
- real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, uBcl, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
+ integer :: nEdgesSolve
+ real (kind=RKIND), dimension(:), pointer :: fEdge
+ real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge, u, uBcl
type (dm_info) :: dminfo
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
+ integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnEdge
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
- real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
-
call mpas_timer_start("ocn_fuperp")
- h => s % h % array
u => s % u % array
uBcl => s % uBcl % array
- v => s % v % array
- wTop => s % wTop % array
- h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
-
weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
+ fEdge => grid % fEdge % array
+ maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
nEdgesOnEdge => grid % nEdgesOnEdge % array
edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
- maxLevelCell => grid % maxLevelCell % array
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
-
- nCells = grid % nCells
- nEdges = grid % nEdges
+ fEdge => grid % fEdge % array
+
nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
!
! Put f*uBcl^{perp} in u as a work variable
!
- do iEdge=1,grid % nEdgesSolve
+ do iEdge=1,nEdgesSolve
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
@@ -1349,7 +1125,71 @@
end subroutine ocn_fuperp!}}}
!***********************************************************************
+!
+! routine ocn_tendency_init
+!
+!> \brief Initializes flags used within tendency routines.
+!> \author Doug Jacobsen
+!> \date 4 November 2011
+!> \version SVN:$Id$
+!> \details
+!> This routine initializes flags related to quantities computed within
+!> other tendency routines.
+!
+!-----------------------------------------------------------------------
+ subroutine ocn_tendency_init(err)!{{{
+ integer, intent(out) :: err
+
+ err = 0
+
+ coef_3rd_order = 0.
+
+ if (config_thickness_adv_order == 2) then
+ hadv2nd = 1
+ hadv3rd = 0
+ hadv4th = 0
+ else if (config_thickness_adv_order == 3) then
+ hadv2nd = 0
+ hadv3rd = 1
+ hadv4th = 0
+
+ if(config_monotonic) then
+ coef_3rd_order = 0.25
+ else
+ coef_3rd_order = 1.0
+ endif
+ else if (config_thickness_adv_order == 4) then
+ hadv2nd = 0
+ hadv3rd = 0
+ hadv4th = 1
+ end if
+
+
+ if(config_include_KE_vertex) then
+ ke_vertex_flag = 1
+ ke_cell_flag = 0
+ else
+ ke_vertex_flag = 0
+ ke_cell_flag = 1
+ endif
+
+ if (trim(config_time_integration) == 'RK4') then
+ ! for RK4, PV is really PV = (eta+f)/h
+ fCoef = 1
+ elseif (trim(config_time_integration) == 'split_explicit' &
+ .or.trim(config_time_integration) == 'unsplit_explicit') then
+ ! for split explicit, PV is eta/h because f is added separately to the momentum forcing.
+ ! mrp temp, new should be:
+ fCoef = 0
+ ! old, for testing:
+ ! fCoef = 1
+ end if
+
+ end subroutine ocn_tendency_init!}}}
+
+!***********************************************************************
+
end module ocn_tendency
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_thick_hadv.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -104,10 +104,10 @@
integer :: iEdge, nEdges, cell1, cell2, nVertLevels, k
integer :: iCell, nCells
- integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer, dimension(:), pointer :: maxLevelEdgeBot, MaxLevelCell
integer, dimension(:,:), pointer :: cellsOnEdge
- real (kind=RKIND) :: flux
+ real (kind=RKIND) :: flux, invAreaCell1, invAreaCell2
real (kind=RKIND), dimension(:), pointer :: dvEdge, areaCell
!-----------------------------------------------------------------
@@ -124,46 +124,27 @@
nCells = grid % nCells
nVertLevels = grid % nVertLevels
- maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ maxLevelCell => grid % maxLevelCell % array
+ maxLevelEdgeBot => grid % maxLevelEdgeBot % array
cellsOnEdge => grid % cellsOnEdge % array
dvEdge => grid % dvEdge % array
areaCell => grid % areaCell % array
- if (config_vert_grid_type.eq.'isopycnal') then
-
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,nVertLevels
- flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
- tend(k,cell1) = tend(k,cell1) - flux
- tend(k,cell2) = tend(k,cell2) + flux
- end do
+ do iEdge=1,nEdges
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ do k=1,maxLevelEdgeBot(iEdge)
+ flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
+ tend(k,cell1) = tend(k,cell1) - flux
+ tend(k,cell2) = tend(k,cell2) + flux
end do
- do iCell=1,nCells
- do k=1,nVertLevels
- tend(k,iCell) = tend(k,iCell) / areaCell(iCell)
- end do
+ end do
+ do iCell=1,nCells
+ do k=1,maxLevelCell(iCell)
+ tend(k,iCell) = tend(k,iCell) / areaCell(iCell)
end do
+ end do
- elseif (config_vert_grid_type.eq.'zlevel') then
-
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,min(1,maxLevelEdgeTop(iEdge))
- flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
- tend(k,cell1) = tend(k,cell1) - flux
- tend(k,cell2) = tend(k,cell2) + flux
- end do
- end do
- do iCell=1,nCells
- tend(1,iCell) = tend(1,iCell) / areaCell(iCell)
- end do
-
- endif ! config_vert_grid_type
-
-
!--------------------------------------------------------------------
end subroutine ocn_thick_hadv_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_thick_vadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_thick_vadv.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_thick_vadv.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -98,7 +98,8 @@
!
!-----------------------------------------------------------------
- integer :: iCell, nCells
+ integer :: iCell, nCells, nVertLevels, k
+ integer, dimension(:), pointer :: MaxLevelCell
!-----------------------------------------------------------------
!
@@ -110,15 +111,17 @@
err = 0
+ maxLevelCell => grid % maxLevelCell % array
+
nCells = grid % nCells
+ nVertLevels = grid % nVertLevels
- if (config_vert_grid_type.eq.'zlevel') then
- do iCell=1,nCells
- tend(1,iCell) = tend(1,iCell) + wTop(2,iCell)
- end do
- endif ! coordinate type
+ do iCell=1,nCells
+ do k=1,maxLevelCell(iCell)
+ tend(k,iCell) = tend(k,iCell) + wTop(k+1,iCell) - wTop(k,iCell)
+ end do
+ end do
-
!--------------------------------------------------------------------
end subroutine ocn_thick_vadv_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_rk4.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -166,6 +166,10 @@
call mpas_timer_start("RK4-tendency computations")
block => domain % blocklist
do while (associated(block))
+
+ ! mrp 111206 put ocn_wtop call at top for ALE
+ call ocn_wtop(provis, provis, block % mesh)
+
if (.not.config_implicit_vertical_mix) then
call ocn_vmix_coefs(block % mesh, provis, block % diagnostics, err)
end if
@@ -179,7 +183,6 @@
endif
call ocn_tend_scalar(block % tend, provis, block % diagnostics, block % mesh)
- call enforce_boundaryEdge(block % tend, block % mesh)
block => block % next
end do
call mpas_timer_stop("RK4-tendency computations")
@@ -296,8 +299,6 @@
if (config_implicit_vertical_mix) then
call mpas_timer_start("RK4-implicit vert mix")
- allocate(A(nVertLevels),C(nVertLevels),uTemp(nVertLevels), &
- tracersTemp(num_tracers,nVertLevels))
call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
@@ -369,8 +370,8 @@
integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel, meshScalingDel2, meshScalingDel4
+ h_s, dvEdge, dcEdge, areaCell, areaTriangle, &
+ meshScalingDel2, meshScalingDel4
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
@@ -425,11 +426,6 @@
areaCell => grid % areaCell % array
areaTriangle => grid % areaTriangle % array
h_s => grid % h_s % array
-! mrp 110516 cleanup fvertex fedge not used in this subroutine
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelVertexBot => grid % maxLevelVertexBot % array
@@ -446,15 +442,12 @@
do iEdge=1,grid % nEdges
- ! I am using hZLevel here. This assumes that SSH is zero everywhere already,
- ! which should be the case if the barotropic mode is filtered.
- ! The more general case is to use sshEdge or h_edge.
- uhSum = (grid % hZLevel % array(1)) * tend_u(1,iEdge)
- hSum = grid % hZLevel % array(1)
+ uhSum = (h_edge(1,iEdge)) * tend_u(1,iEdge)
+ hSum = h_edge(1,iEdge)
do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + grid % hZLevel % array(k) * tend_u(k,iEdge)
- hSum = hSum + grid % hZLevel % array(k)
+ uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
enddo
vertSum = uhSum/hSum
@@ -492,8 +485,8 @@
integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel, meshScalingDel2, meshScalingDel4
+ h_s, dvEdge, dcEdge, areaCell, areaTriangle, &
+ meshScalingDel2, meshScalingDel4
real (kind=RKIND), dimension(:,:), pointer :: &
weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
@@ -547,11 +540,6 @@
areaCell => grid % areaCell % array
areaTriangle => grid % areaTriangle % array
h_s => grid % h_s % array
-! mrp 110516 cleanup fvertex fedge not used in this subroutine
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelVertexBot => grid % maxLevelVertexBot % array
@@ -566,15 +554,12 @@
do iEdge=1,grid % nEdges
- ! I am using hZLevel here. This assumes that SSH is zero everywhere already,
- ! which should be the case if the barotropic mode is filtered.
- ! The more general case is to use sshedge or h_edge.
- uhSum = (grid % hZLevel % array(1)) * u(1,iEdge)
- hSum = grid % hZLevel % array(1)
+ uhSum = (h_edge(1,iEdge)) * u(1,iEdge)
+ hSum = h_edge(1,iEdge)
do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + grid % hZLevel % array(k) * u(k,iEdge)
- hSum = hSum + grid % hZLevel % array(k)
+ uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
enddo
vertSum = uhSum/hSum
@@ -588,51 +573,6 @@
end subroutine filter_btr_mode_u!}}}
- subroutine enforce_boundaryEdge(tend, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Enforce any boundary conditions on the normal velocity at each edge
- !
- ! Input: grid - grid metadata
- !
- ! Output: tend_u set to zero at boundaryEdge == 1 locations
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- implicit none
-
- type (tend_type), intent(inout) :: tend
- type (mesh_type), intent(in) :: grid
-
- integer, dimension(:,:), pointer :: boundaryEdge
- real (kind=RKIND), dimension(:,:), pointer :: tend_u
- integer :: nCells, nEdges, nVertices, nVertLevels
- integer :: iEdge, k
-
- call mpas_timer_start("enforce_boundaryEdge")
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
- boundaryEdge => grid % boundaryEdge % array
- tend_u => tend % u % array
-
- if(maxval(boundaryEdge).le.0) return
-
- do iEdge = 1,nEdges
- do k = 1,nVertLevels
-
- if(boundaryEdge(k,iEdge).eq.1) then
- tend_u(k,iEdge) = 0.0
- endif
-
- enddo
- enddo
- call mpas_timer_stop("enforce_boundaryEdge")
-
- end subroutine enforce_boundaryEdge!}}}
-
end module ocn_time_integration_rk4
! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_time_integration_split.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -47,6 +47,10 @@
public :: ocn_time_integrator_split
+ type (timer_node), pointer :: timer_main, timer_prep, timer_bcl_vel, timer_btr_vel, timer_diagnostic_update, timer_implicit_vmix, &
+ timer_halo_diagnostic, timer_halo_ubtr, timer_halo_ssh, timer_halo_f, timer_halo_h, &
+ timer_halo_tracers, timer_halo_ubcl
+
contains
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
@@ -54,7 +58,7 @@
! ocn_time_integration_split
!
!> \brief MPAS ocean split explicit time integration scheme
-!> \author Doug Jacobsen
+!> \author Mark Petersen
!> \date 26 September 2011
!> \version SVN:$Id:$
!> \details
@@ -63,16 +67,16 @@
!
!-----------------------------------------------------------------------
-subroutine ocn_time_integrator_split(domain, dt)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Advance model state forward in time by the specified time step using
- ! Split_Explicit timestepping scheme
- !
- ! Input: domain - current model state in time level 1 (e.g., time_levs(1)state%h(:,:))
- ! plus grid meta-data
- ! Output: domain - upon exit, time level 2 (e.g., time_levs(2)%state%h(:,:)) contains
- ! model state advanced forward in time by dt seconds
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine ocn_time_integrator_split(domain, dt)!{{{
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Advance model state forward in time by the specified time step using
+ ! Split_Explicit timestepping scheme
+ !
+ ! Input: domain - current model state in time level 1 (e.g., time_levs(1)state%h(:,:))
+ ! plus grid meta-data
+ ! Output: domain - upon exit, time level 2 (e.g., time_levs(2)%state%h(:,:)) contains
+ ! model state advanced forward in time by dt seconds
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
implicit none
@@ -81,72 +85,73 @@
type (dm_info) :: dminfo
integer :: iCell, i,k,j, iEdge, cell1, cell2, split_explicit_step, split, &
- eoe, oldBtrSubcycleTime, newBtrSubcycleTime, uPerpTime, BtrCorIter, &
- n_bcl_iter(config_n_ts_iter), &
- vertex1, vertex2, iVertex
-
+ eoe, oldBtrSubcycleTime, newBtrSubcycleTime, uPerpTime, BtrCorIter, &
+ n_bcl_iter(config_n_ts_iter)
type (block_type), pointer :: block
- real (kind=RKIND) :: uhSum, hSum, sshEdge, flux, &
- uPerp, uCorr, tracerTemp, coef, FBtr_coeff, sshCell1, sshCell2
- real (kind=RKIND), dimension(:), pointer :: sshNew
-
+ real (kind=RKIND) :: uhSum, hSum, flux, sshEdge, &
+ CoriolisTerm, uCorr, temp, temp_h, coef, FBtr_coeff, sshCell1, sshCell2
integer :: num_tracers, ucorr_coef, err
real (kind=RKIND), dimension(:,:), pointer :: &
- u, h, h_edge, ke_edge, vertViscTopOfEdge, vertDiffTopOfCell
+ u, h, h_edge, ke_edge, vertViscTopOfEdge, vertDiffTopOfCell
real (kind=RKIND), dimension(:,:,:), pointer :: tracers
integer, dimension(:), pointer :: &
- maxLevelCell, maxLevelEdgeTop
- real (kind=RKIND), dimension(:), allocatable:: A,C,uTemp, hNew
+ maxLevelCell, maxLevelEdgeTop
+ real (kind=RKIND), dimension(:), allocatable:: uTemp
real (kind=RKIND), dimension(:,:), allocatable:: tracersTemp
- call mpas_timer_start("split_explicit_timestep")
+ call mpas_timer_start("se timestep", .false., timer_main)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Prep variables before first iteration
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ call mpas_timer_start("se prep", .false., timer_prep)
block => domain % blocklist
do while (associated(block))
+ ! Initialize * variables that are used to compute baroclinic tendencies below.
do iEdge=1,block % mesh % nEdges
+ do k=1,block % mesh % nVertLevels !maxLevelEdgeTop % array(iEdge)
- ! The baroclinic velocity needs be recomputed at the beginning of a
- ! timestep because the implicit vertical mixing is conducted on the
- ! total u. We keep uBtr from the previous timestep.
- block % state % time_levs(1) % state % uBcl % array(:,iEdge) &
- = block % state % time_levs(1) % state % u % array(:,iEdge) &
- - block % state % time_levs(1) % state % uBtr % array(iEdge)
+ ! The baroclinic velocity needs be recomputed at the beginning of a
+ ! timestep because the implicit vertical mixing is conducted on the
+ ! total u. We keep uBtr from the previous timestep.
+ block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
+ = block % state % time_levs(1) % state % u % array(k,iEdge) &
+ - block % state % time_levs(1) % state % uBtr % array( iEdge)
- block % state % time_levs(2) % state % u % array(:,iEdge) &
- = block % state % time_levs(1) % state % u % array(:,iEdge)
+ block % state % time_levs(2) % state % u % array(k,iEdge) &
+ = block % state % time_levs(1) % state % u % array(k,iEdge)
- block % state % time_levs(2) % state % uBcl % array(:,iEdge) &
- = block % state % time_levs(1) % state % uBcl % array(:,iEdge)
+ block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
+ = block % state % time_levs(1) % state % uBcl % array(k,iEdge)
- enddo ! iEdge
+ block % state % time_levs(2) % state % h_edge % array(k,iEdge) &
+ = block % state % time_levs(1) % state % h_edge % array(k,iEdge)
- ! Initialize * variables that are used compute baroclinic tendencies below.
+ end do
+ end do
+
block % state % time_levs(2) % state % ssh % array(:) &
= block % state % time_levs(1) % state % ssh % array(:)
- block % state % time_levs(2) % state % h_edge % array(:,:) &
- = block % state % time_levs(1) % state % h_edge % array(:,:)
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % maxLevelCell % array(iCell)
- do iCell=1,block % mesh % nCells ! couple tracers to h
- ! change to maxLevelCell % array(iCell) ?
- do k=1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % h % array(k,iCell) &
+ = block % state % time_levs(1) % state % h % array(k,iCell)
- block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
- = block % state % time_levs(1) % state % tracers % array(:,k,iCell)
- end do
+ block % state % time_levs(2) % state % tracers % array(:,k,iCell) &
+ = block % state % time_levs(1) % state % tracers % array(:,k,iCell)
+ end do
end do
block => block % next
end do
-
+ call mpas_timer_stop("se prep", timer_prep)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! BEGIN large iteration loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -155,671 +160,484 @@
n_bcl_iter(config_n_ts_iter) = config_n_bcl_iter_end
do split_explicit_step = 1, config_n_ts_iter
-! --- update halos for diagnostic variables
+ ! --- update halos for diagnostic variables
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- if (config_h_mom_eddy_visc4 > 0.0) then
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nVertices, &
- block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
- end if
-
- block => block % next
- end do
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Stage 1: Baroclinic velocity (3D) prediction, explicit with long timestep
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- ! compute velocity tendencies, T(u*,w*,p*)
-
- block => domain % blocklist
- do while (associated(block))
- if (.not.config_implicit_vertical_mix) then
- call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
- end if
- call ocn_tend_u(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
- call enforce_boundaryEdge(block % tend, block % mesh)
- block => block % next
- end do
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! BEGIN baroclinic iterations on linear Coriolis term
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do j=1,n_bcl_iter(split_explicit_step)
-
- ! Use this G coefficient to avoid an if statement within the iEdge loop.
- if (trim(config_time_integration) == 'unsplit_explicit') then
- split = 0
- elseif (trim(config_time_integration) == 'split_explicit') then
- split = 1
- endif
-
+ call mpas_timer_start("se halo diag", .false., timer_halo_diagnostic)
block => domain % blocklist
do while (associated(block))
- allocate(uTemp(block % mesh % nVertLevels))
- ! Put f*uBcl^{perp} in uNew as a work variable
- call ocn_fuperp(block % state % time_levs(2) % state , block % mesh)
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
+ block % state % time_levs(2) % state % pv_edge % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ if (config_h_mom_eddy_visc4 > 0.0) then
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
+ block % state % time_levs(2) % state % divergence % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nCells, &
+ block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
+ block % state % time_levs(2) % state % vorticity % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nVertices, &
+ block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
+ end if
- uTemp = 0.0 ! could put this after with uTemp(maxleveledgetop+1:nvertlevels)=0
- do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
-
- ! uBclNew = uBclOld + dt*(-f*uBclPerp + T(u*,w*,p*) + g*grad(SSH*) )
- ! Here uNew is a work variable containing -fEdge(iEdge)*uBclPerp(k,iEdge)
- uTemp(k) &
- = block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
- + dt * (block % tend % u % array (k,iEdge) &
- + block % state % time_levs(2) % state % u % array (k,iEdge) & ! this is f*uBcl^{perp}
- + split*gravity &
- *( block % state % time_levs(2) % state % ssh % array(cell2) &
- - block % state % time_levs(2) % state % ssh % array(cell1) ) &
- /block % mesh % dcEdge % array(iEdge) )
- enddo
-
- ! Compute GBtrForcing, the vertically averaged forcing
- sshEdge = 0.5*( &
- block % state % time_levs(1) % state % ssh % array(cell1) &
- + block % state % time_levs(1) % state % ssh % array(cell2) )
-
- uhSum = (sshEdge + block % mesh % hZLevel % array(1)) * uTemp(1)
- hSum = sshEdge + block % mesh % hZLevel % array(1)
-
- do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + block % mesh % hZLevel % array(k) * uTemp(k)
- hSum = hSum + block % mesh % hZLevel % array(k)
- enddo
- block % state % time_levs(1) % state % GBtrForcing % array(iEdge) = split*uhSum/hSum/dt
-
-
- do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
- ! These two steps are together here:
- !{\bf u}'_{k,n+1} = {\bf u}'_{k,n} - \Delta t {\overline {\bf G}}
- !{\bf u}'_{k,n+1/2} = \frac{1}{2}\left({\bf u}^{'}_{k,n} +{\bf u}'_{k,n+1}\right)
- ! so that uBclNew is at time n+1/2
- block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
- = 0.5*( &
- block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
- + uTemp(k) - dt * block % state % time_levs(1) % state % GBtrForcing % array(iEdge))
- enddo
-
- enddo ! iEdge
-
- deallocate(uTemp)
-
- block => block % next
+ block => block % next
end do
+ call mpas_timer_stop("se halo diag", timer_halo_diagnostic)
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % uBcl % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Stage 1: Baroclinic velocity (3D) prediction, explicit with long timestep
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- block => block % next
- end do
+ ! compute velocity tendencies, T(u*,w*,p*)
+ call mpas_timer_start("se bcl vel", .false., timer_bcl_vel)
- enddo ! do j=1,config_n_bcl_iter
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! END baroclinic iterations on linear Coriolis term
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Stage 2: Barotropic velocity (2D) prediction, explicitly subcycled
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- oldBtrSubcycleTime = 1
- newBtrSubcycleTime = 2
-
- if (trim(config_time_integration) == 'unsplit_explicit') then
-
block => domain % blocklist
do while (associated(block))
-
- ! For Split_Explicit unsplit, simply set uBtrNew=0, uBtrSubcycle=0, and uNew=uBclNew
- block % state % time_levs(2) % state % uBtr % array(:) = 0.0
-
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:) = 0.0
-
- block % state % time_levs(2) % state % u % array(:,:) &
- = block % state % time_levs(2) % state % uBcl % array(:,:)
-
+ if (.not.config_implicit_vertical_mix) then
+ call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
+ end if
+ call ocn_tend_u(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
block => block % next
- end do ! block
+ end do
- elseif (trim(config_time_integration) == 'split_explicit') then
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! BEGIN baroclinic iterations on linear Coriolis term
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do j=1,n_bcl_iter(split_explicit_step)
- ! Initialize variables for barotropic subcycling
- block => domain % blocklist
- do while (associated(block))
+ ! Use this G coefficient to avoid an if statement within the iEdge loop.
+ if (trim(config_time_integration) == 'unsplit_explicit') then
+ split = 0
+ elseif (trim(config_time_integration) == 'split_explicit') then
+ split = 1
+ endif
- if (config_filter_btr_mode) then
- block % state % time_levs(1) % state % GBtrForcing % array(:) = 0.0
- endif
-
- do iCell=1,block % mesh % nCells
- ! sshSubcycleOld = sshOld
- block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- = block % state % time_levs(1) % state % ssh % array(iCell)
-
- ! sshNew = sshOld This is the first for the summation
- block % state % time_levs(2) % state % ssh % array(iCell) &
- = block % state % time_levs(1) % state % ssh % array(iCell)
- enddo
-
- do iEdge=1,block % mesh % nEdges
-
- ! uBtrSubcycleOld = uBtrOld
- block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- = block % state % time_levs(1) % state % uBtr % array(iEdge)
-
- ! uBtrNew = BtrOld This is the first for the summation
- block % state % time_levs(2) % state % uBtr % array(iEdge) &
- = block % state % time_levs(1) % state % uBtr % array(iEdge)
-
- ! FBtr = 0
- block % state % time_levs(1) % state % FBtr % array(iEdge) = 0.0
- enddo
-
- block => block % next
- end do ! block
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! BEGIN Barotropic subcycle loop
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do j=1,config_n_btr_subcycles*config_btr_subcycle_loop_factor
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: initial solve for velecity
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- if (config_btr_gam1_uWt1>1.0e-12) then ! only do this part if it is needed in next SSH solve
- uPerpTime = oldBtrSubcycleTime
-
block => domain % blocklist
do while (associated(block))
+ allocate(uTemp(block % mesh % nVertLevels))
- do iEdge=1,block % mesh % nEdges
+ ! Put f*uBcl^{perp} in uNew as a work variable
+ call ocn_fuperp(block % state % time_levs(2) % state , block % mesh)
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ do iEdge=1,block % mesh % nEdges
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
- ! Compute -f*uPerp
- uPerp = 0.0
- do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
- eoe = block % mesh % edgesOnEdge % array(i,iEdge)
- uPerp = uPerp + block % mesh % weightsOnEdge % array(i,iEdge) &
- * block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
- * block % mesh % fEdge % array(eoe)
- end do
+ uTemp = 0.0 ! could put this after with uTemp(maxleveledgetop+1:nvertlevels)=0
+ do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
- ! mrp 110606 efficiency note: could make this a 1D integer factor instead of an if statement.
- if (block % mesh % boundaryEdge % array(1,iEdge).eq.1) then
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) = 0.0
- else
+ ! uBclNew = uBclOld + dt*(-f*uBclPerp + T(u*,w*,p*) + g*grad(SSH*) )
+ ! Here uNew is a work variable containing -fEdge(iEdge)*uBclPerp(k,iEdge)
+ uTemp(k) = block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
+ + dt * (block % tend % u % array (k,iEdge) &
+ + block % state % time_levs(2) % state % u % array (k,iEdge) & ! this is f*uBcl^{perp}
+ + split * gravity * ( block % state % time_levs(2) % state % ssh % array(cell2) &
+ - block % state % time_levs(2) % state % ssh % array(cell1) ) &
+ /block % mesh % dcEdge % array(iEdge) )
+ enddo
- ! uBtrNew = uBtrOld + dt*(-f*uBtroldPerp - g*grad(SSH) + G)
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- + dt/config_n_btr_subcycles *( &
- uPerp &
- - gravity &
- *( block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
- - block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) ) &
- /block % mesh % dcEdge % array(iEdge) &
- + block % state % time_levs(1) % state % GBtrForcing % array(iEdge) )
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge) * uTemp(1)
+ hSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge)
- endif
+ do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
+ uhSum = uhSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge) * uTemp(k)
+ hSum = hSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge)
+ enddo
+ block % state % time_levs(1) % state % GBtrForcing % array(iEdge) = split*uhSum/hSum/dt
- end do
- ! Implicit solve for barotropic momentum decay
- if ( config_btr_mom_decay) then
- !
- ! Add term to RHS of momentum equation: -1/gamma u
- !
- ! This changes the solve to:
- ! u^{n+1} = u_provis^{n+1}/(1+dt/gamma)
- !
- coef = 1.0/(1.0 + dt/config_n_btr_subcycles/config_btr_mom_decay_time)
- do iEdge=1,block % mesh % nEdges
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- = block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- * coef
- end do
+ do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
+ ! These two steps are together here:
+ !{\bf u}'_{k,n+1} = {\bf u}'_{k,n} - \Delta t {\overline {\bf G}}
+ !{\bf u}'_{k,n+1/2} = \frac{1}{2}\left({\bf u}^{'}_{k,n} +{\bf u}'_{k,n+1}\right)
+ ! so that uBclNew is at time n+1/2
+ block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
+ = 0.5*( &
+ block % state % time_levs(1) % state % uBcl % array(k,iEdge) &
+ + uTemp(k) - dt * block % state % time_levs(1) % state % GBtrForcing % array(iEdge))
+ enddo
+
+ enddo ! iEdge
- endif
+ deallocate(uTemp)
-
block => block % next
- end do ! block
+ end do
-
- ! boundary update on uBtrNew
+ call mpas_timer_start("se halo ubcl", .false., timer_halo_ubcl)
block => domain % blocklist
do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, &
+ block % state % time_levs(2) % state % uBcl % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nEdges, &
+ block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+
block => block % next
- end do ! block
+ end do
+ call mpas_timer_stop("se halo ubcl", timer_halo_ubcl)
- endif ! config_btr_gam1_uWt1>1.0e-12
+ end do ! do j=1,config_n_bcl_iter
+ call mpas_timer_stop("se bcl vel", timer_bcl_vel)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! END baroclinic iterations on linear Coriolis term
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: Compute thickness flux and new SSH: PREDICTOR
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- block => domain % blocklist
- do while (associated(block))
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Stage 2: Barotropic velocity (2D) prediction, explicitly subcycled
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- block % tend % ssh % array(:) = 0.0
+ call mpas_timer_start("se btr vel", .false., timer_btr_vel)
- if (config_btr_solve_SSH2) then
- ! If config_btr_solve_SSH2=.true., then do NOT accumulate FBtr in this SSH predictor
- ! section, because it will be accumulated in the SSH corrector section.
- FBtr_coeff = 0.0
- else
- ! otherwise, DO accumulate FBtr in this SSH predictor section
- FBtr_coeff = 1.0
- endif
+ oldBtrSubcycleTime = 1
+ newBtrSubcycleTime = 2
- ! config_btr_gam1_uWt1 sets the forward weighting of velocity in the SSH computation
- ! config_btr_gam1_uWt1= 1 flux = uBtrNew*H
- ! config_btr_gam1_uWt1=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
- ! config_btr_gam1_uWt1= 0 flux = uBtrOld*H
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ if (trim(config_time_integration) == 'unsplit_explicit') then
- sshEdge = 0.5 &
- *( block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
- + block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
- hSum = sum(block % mesh % hZLevel % array (1:block % mesh % maxLevelEdgeTop % array(iEdge)))
-
- flux = ((1.0-config_btr_gam1_uWt1) &
- * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- + config_btr_gam1_uWt1 &
- * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
- * (sshEdge + hSum)
-
- block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) &
- - flux * block % mesh % dvEdge % array(iEdge)
- block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) &
- + flux * block % mesh % dvEdge % array(iEdge)
-
- block % state % time_levs(1) % state % FBtr % array(iEdge) &
- = block % state % time_levs(1) % state % FBtr % array(iEdge) &
- + FBtr_coeff*flux
- end do
-
- ! SSHnew = SSHold + dt/J*(-div(Flux))
- do iCell=1,block % mesh % nCells
-
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- + dt/config_n_btr_subcycles &
- * block % tend % ssh % array(iCell) / block % mesh % areaCell % array (iCell)
-
- end do
-
- block => block % next
- end do ! block
-
- ! boundary update on SSHnew
block => domain % blocklist
do while (associated(block))
-! block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
+ ! For Split_Explicit unsplit, simply set uBtrNew=0, uBtrSubcycle=0, and uNew=uBclNew
+ block % state % time_levs(2) % state % uBtr % array(:) = 0.0
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+ block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % uBcl % array(:,:)
block => block % next
end do ! block
+ elseif (trim(config_time_integration) == 'split_explicit') then
-! mrp 110801 begin
-! This whole section, bounded by 'mrp 110801', may be deleted later if it is found
-! that barotropic del2 is not useful.
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: compute btr_divergence and btr_vorticity for del2(u_btr)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Initialize variables for barotropic subcycling
block => domain % blocklist
do while (associated(block))
- block % state % time_levs(1) % state % u_diffusionBtr % array(:) = 0.0
- if ( config_btr_mom_eddy_visc2 > 0.0 ) then
- !
- ! Compute circulation and relative vorticity at each vertex
- !
- block % state % time_levs(1) % state % circulationBtr % array(:) = 0.0
- do iEdge=1,block % mesh % nEdges
- vertex1 = block % mesh % verticesOnEdge % array(1,iEdge)
- vertex2 = block % mesh % verticesOnEdge % array(2,iEdge)
- block % state % time_levs(1) % state % circulationBtr % array(vertex1) &
- = block % state % time_levs(1) % state % circulationBtr % array(vertex1) &
- - block % mesh % dcEdge % array (iEdge) &
- *block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)
- block % state % time_levs(1) % state % circulationBtr % array(vertex2) &
- = block % state % time_levs(1) % state % circulationBtr % array(vertex2) &
- + block % mesh % dcEdge % array (iEdge) &
- *block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)
- end do
- do iVertex=1,block % mesh % nVertices
- block % state % time_levs(1) % state % vorticityBtr % array(iVertex) &
- = block % state % time_levs(1) % state % circulationBtr % array(iVertex) / block % mesh % areaTriangle % array (iVertex)
- end do
+ if (config_filter_btr_mode) then
+ block % state % time_levs(1) % state % GBtrForcing % array(:) = 0.0
+ endif
- !
- ! Compute the divergence at each cell center
- !
- block % state % time_levs(1) % state % divergenceBtr % array(:) = 0.0
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
- block % state % time_levs(1) % state % divergenceBtr % array (cell1) &
- = block % state % time_levs(1) % state % divergenceBtr % array (cell1) &
- + block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- *block % mesh % dvEdge % array(iEdge)
+ do iCell=1,block % mesh % nCells
+ ! sshSubcycleOld = sshOld
+ block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
+ = block % state % time_levs(1) % state % ssh % array(iCell)
+ end do
- block % state % time_levs(1) % state % divergenceBtr % array (cell2) &
- = block % state % time_levs(1) % state % divergenceBtr % array (cell2) &
- - block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- *block % mesh % dvEdge % array(iEdge)
- end do
- do iCell = 1,block % mesh % nCells
- block % state % time_levs(1) % state % divergenceBtr % array(iCell) &
- = block % state % time_levs(1) % state % divergenceBtr % array(iCell) &
- /block % mesh % areaCell % array(iCell)
- enddo
+ do iEdge=1,block % mesh % nEdges
- !
- ! Compute Btr diffusion
- !
- do iEdge=1,block % mesh % nEdgesSolve
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
- vertex1 = block % mesh % verticesOnEdge % array(1,iEdge)
- vertex2 = block % mesh % verticesOnEdge % array(2,iEdge)
+ ! uBtrSubcycleOld = uBtrOld
+ block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ = block % state % time_levs(1) % state % uBtr % array(iEdge)
- ! Here -( vorticityBtr(vertex2) - vorticityBtr(vertex1) ) / dvEdge % array (iEdge)
- ! is - </font>
<font color="red">abla vorticity pointing from vertex 2 to vertex 1, or equivalently
- ! + k \times </font>
<font color="gray">abla vorticity pointing from cell1 to cell2.
+ ! uBtrNew = BtrOld This is the first for the summation
+ block % state % time_levs(2) % state % uBtr % array(iEdge) &
+ = block % state % time_levs(1) % state % uBtr % array(iEdge)
- block % state % time_levs(1) % state % u_diffusionBtr % array(iEdge) = block % mesh % meshScalingDel2 % array (iEdge) * config_btr_mom_eddy_visc2 * &
- (( block % state % time_levs(1) % state % divergenceBtr % array(cell2) - block % state % time_levs(1) % state % divergenceBtr % array(cell1) ) / block % mesh % dcEdge % array (iEdge) &
- -( block % state % time_levs(1) % state % vorticityBtr % array(vertex2) - block % state % time_levs(1) % state % vorticityBtr % array(vertex1) ) / block % mesh % dvEdge % array (iEdge))
+ ! FBtr = 0
+ block % state % time_levs(1) % state % FBtr % array(iEdge) = 0.0
+ end do
- end do
- end if
block => block % next
end do ! block
-! mrp 110801 end
-
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: Final solve for velocity. Iterate for Coriolis term.
+ ! BEGIN Barotropic subcycle loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do j=1,config_n_btr_subcycles*config_btr_subcycle_loop_factor
- do BtrCorIter=1,config_n_btr_cor_iter
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Barotropic subcycle: VELOCITY PREDICTOR STEP
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ if (config_btr_gam1_uWt1>1.0e-12) then ! only do this part if it is needed in next SSH solve
+ uPerpTime = oldBtrSubcycleTime
- uPerpTime = newBtrSubcycleTime
+ block => domain % blocklist
+ do while (associated(block))
- block => domain % blocklist
- do while (associated(block))
+ do iEdge=1,block % mesh % nEdges
- do iEdge=1,block % mesh % nEdges
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ ! Compute the barotropic Coriolis term, -f*uPerp
+ CoriolisTerm = 0.0
+ do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
+ eoe = block % mesh % edgesOnEdge % array(i,iEdge)
+ CoriolisTerm = CoriolisTerm &
+ + block % mesh % weightsOnEdge % array(i,iEdge) &
+ * block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
+ * block % mesh % fEdge % array(eoe)
+ end do
+
+ ! uBtrNew = uBtrOld + dt/J*(-f*uBtroldPerp - g*grad(SSH) + G)
+ block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ = (block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + dt / config_n_btr_subcycles * (CoriolisTerm - gravity &
+ * (block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ - block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) ) &
+ / block % mesh % dcEdge % array(iEdge) &
+ + block % state % time_levs(1) % state % GBtrForcing % array(iEdge))) * block % mesh % edgeMask % array(1, iEdge)
+ end do
- ! Compute -f*uPerp
- uPerp = 0.0
- do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
- eoe = block % mesh % edgesOnEdge % array(i,iEdge)
- uPerp = uPerp + block % mesh % weightsOnEdge % array(i,iEdge) &
- * block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
- * block % mesh % fEdge % array(eoe)
- end do
+ block => block % next
+ end do ! block
- ! mrp 110606 efficiency note: could make this a 1D integer factor instead of an if statement.
- if (block % mesh % boundaryEdge % array(1,iEdge).eq.1) then
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) = 0.0
- else
+ ! boundary update on uBtrNew
+ call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
+ block => domain % blocklist
+ do while (associated(block))
- ! uBtrNew = uBtrOld + dt*(-f*uBtroldPerp - g*grad(SSH) + G)
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
+ block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
+ block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- sshCell1 = &
- (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
- + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell1)
+ block => block % next
+ end do ! block
+ call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
+ endif ! config_btr_gam1_uWt1>1.0e-12
- sshCell2 = &
- (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
- + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Barotropic subcycle: SSH PREDICTOR STEP
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ block => domain % blocklist
+ do while (associated(block))
+
+ block % tend % ssh % array(:) = 0.0
+
+ if (config_btr_solve_SSH2) then
+ ! If config_btr_solve_SSH2=.true., then do NOT accumulate FBtr in this SSH predictor
+ ! section, because it will be accumulated in the SSH corrector section.
+ FBtr_coeff = 0.0
+ else
+ ! otherwise, DO accumulate FBtr in this SSH predictor section
+ FBtr_coeff = 1.0
+ endif
+
+ ! config_btr_gam1_uWt1 sets the forward weighting of velocity in the SSH computation
+ ! config_btr_gam1_uWt1= 1 flux = uBtrNew*H
+ ! config_btr_gam1_uWt1=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
+ ! config_btr_gam1_uWt1= 0 flux = uBtrOld*H
+ ! mrp 120201 efficiency: could we combine the following edge and cell loops?
+ do iEdge=1,block % mesh % nEdges
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+
+ sshEdge = 0.5 * (block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ + block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
+ hSum = sshEdge + block % mesh % referenceBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ flux = ((1.0-config_btr_gam1_uWt1) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + config_btr_gam1_uWt1 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
+ * hSum
+
+ block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) - flux * block % mesh % dvEdge % array(iEdge)
+ block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) + flux * block % mesh % dvEdge % array(iEdge)
+
+ block % state % time_levs(1) % state % FBtr % array(iEdge) = block % state % time_levs(1) % state % FBtr % array(iEdge) &
+ + FBtr_coeff*flux
+ end do
+
+ ! SSHnew = SSHold + dt/J*(-div(Flux))
+ do iCell=1,block % mesh % nCells
+
+ block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
+ = block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
+ + dt/config_n_btr_subcycles * block % tend % ssh % array(iCell) / block % mesh % areaCell % array (iCell)
+
+ end do
+
+ block => block % next
+ end do ! block
+
+ ! boundary update on SSHnew
+ call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
+ block => domain % blocklist
+ do while (associated(block))
+
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
+ block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
+ block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+
+ block => block % next
+ end do ! block
+ call mpas_timer_stop("se halo ssh", timer_halo_ssh)
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Barotropic subcycle: VELOCITY CORRECTOR STEP
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do BtrCorIter=1,config_n_btr_cor_iter
+ uPerpTime = newBtrSubcycleTime
+
+ block => domain % blocklist
+ do while (associated(block))
+ do iEdge=1,block % mesh % nEdges
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+
+ ! Compute the barotropic Coriolis term, -f*uPerp
+ CoriolisTerm = 0.0
+ do i = 1,block % mesh % nEdgesOnEdge % array(iEdge)
+ eoe = block % mesh % edgesOnEdge % array(i,iEdge)
+ CoriolisTerm = CoriolisTerm + block % mesh % weightsOnEdge % array(i,iEdge) &
+ * block % state % time_levs(uPerpTime) % state % uBtrSubcycle % array(eoe) &
+ * block % mesh % fEdge % array(eoe)
+ end do
+
+ ! In this final solve for velocity, SSH is a linear
+ ! combination of SSHold and SSHnew.
+ sshCell1 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell1)
+ sshCell2 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
+
+ ! uBtrNew = uBtrOld + dt/J*(-f*uBtroldPerp - g*grad(SSH) + G)
+ block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ = (block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + dt/config_n_btr_subcycles *(CoriolisTerm - gravity *(sshCell2 - sshCell1) /block % mesh % dcEdge % array(iEdge) &
+ + block % state % time_levs(1) % state % GBtrForcing % array(iEdge))) * block % mesh % edgeMask % array(1,iEdge)
+ end do
+
+ block => block % next
+ end do ! block
+
+ ! boundary update on uBtrNew
+ call mpas_timer_start("se halo ubtr", .false., timer_halo_ubtr)
+ block => domain % blocklist
+ do while (associated(block))
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
+ block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
+ block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+
+ block => block % next
+ end do ! block
+ call mpas_timer_stop("se halo ubtr", timer_halo_ubtr)
+ end do !do BtrCorIter=1,config_n_btr_cor_iter
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Barotropic subcycle: SSH CORRECTOR STEP
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ if (config_btr_solve_SSH2) then
+
+ block => domain % blocklist
+ do while (associated(block))
+ block % tend % ssh % array(:) = 0.0
+
+ ! config_btr_gam3_uWt2 sets the forward weighting of velocity in the SSH computation
+ ! config_btr_gam3_uWt2= 1 flux = uBtrNew*H
+ ! config_btr_gam3_uWt2=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
+ ! config_btr_gam3_uWt2= 0 flux = uBtrOld*H
+ ! mrp 120201 efficiency: could we combine the following edge and cell loops?
+ do iEdge=1,block % mesh % nEdges
+ cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
+ cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+
+ ! SSH is a linear combination of SSHold and SSHnew.
+ sshCell1 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell1)
+ sshCell2 = (1-config_btr_gam2_SSHWt1)*block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) &
+ + config_btr_gam2_SSHWt1 *block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(cell2)
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- + dt/config_n_btr_subcycles *( &
- uPerp &
- - gravity &
- *( sshCell2 &
- - sshCell1 )&
- /block % mesh % dcEdge % array(iEdge) &
- + block % state % time_levs(1) % state % GBtrForcing % array(iEdge) &
- + block % state % time_levs(1) % state % u_diffusionBtr % array(iEdge))
- ! added del2 diffusion to btr solve
-
- endif
-
- end do
-
- ! Implicit solve for barotropic momentum decay
- if ( config_btr_mom_decay) then
- ! Add term to RHS of momentum equation: -1/gamma u
- !
- ! This changes the solve to:
- ! u^{n+1} = u_provis^{n+1}/(1+dt/gamma)
- !
- coef = 1.0/(1.0 + dt/config_n_btr_subcycles/config_btr_mom_decay_time)
- do iEdge=1,block % mesh % nEdges
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- = block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- * coef
- end do
-
- endif
-
- block => block % next
- end do ! block
-
-
- ! boundary update on uBtrNew
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
- block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
- end do ! block
-
- end do !do BtrCorIter=1,config_n_btr_cor_iter
-
+ sshEdge = 0.5 * (sshCell1 + sshCell2)
+ hSum = sshEdge + block % mesh % referenceBottomDepthTopOfCell % array (block % mesh % maxLevelEdgeTop % array(iEdge)+1)
+
+ flux = ((1.0-config_btr_gam3_uWt2) * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
+ + config_btr_gam3_uWt2 * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
+ * hSum
+
+ block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) - flux * block % mesh % dvEdge % array(iEdge)
+ block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) + flux * block % mesh % dvEdge % array(iEdge)
+
+ block % state % time_levs(1) % state % FBtr % array(iEdge) = block % state % time_levs(1) % state % FBtr % array(iEdge) + flux
+ end do
+
+ ! SSHnew = SSHold + dt/J*(-div(Flux))
+ do iCell=1,block % mesh % nCells
+ block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
+ = block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
+ + dt/config_n_btr_subcycles * block % tend % ssh % array(iCell) / block % mesh % areaCell % array (iCell)
+ end do
+
+ block => block % next
+ end do ! block
+
+ ! boundary update on SSHnew
+ call mpas_timer_start("se halo ssh", .false., timer_halo_ssh)
+ block => domain % blocklist
+ do while (associated(block))
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
+ block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
+ block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
+
+ block => block % next
+ end do ! block
+ call mpas_timer_stop("se halo ssh", timer_halo_ssh)
+ endif ! config_btr_solve_SSH2
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Barotropic subcycle: Accumulate running sums, advance timestep pointers
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ block => domain % blocklist
+ do while (associated(block))
+
+ ! uBtrNew = uBtrNew + uBtrSubcycleNEW
+ ! This accumulates the sum.
+ ! If the Barotropic Coriolis iteration is limited to one, this could
+ ! be merged with the above code.
+ do iEdge=1,block % mesh % nEdges
+
+ block % state % time_levs(2) % state % uBtr % array(iEdge) &
+ = block % state % time_levs(2) % state % uBtr % array(iEdge) &
+ + block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)
+
+ end do ! iEdge
+ block => block % next
+ end do ! block
+
+ ! advance time pointers
+ oldBtrSubcycleTime = mod(oldBtrSubcycleTime,2)+1
+ newBtrSubcycleTime = mod(newBtrSubcycleTime,2)+1
+
+ end do ! j=1,config_n_btr_subcycles
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: Compute thickness flux and new SSH: CORRECTOR
+ ! END Barotropic subcycle loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- if (config_btr_solve_SSH2) then
- block => domain % blocklist
- do while (associated(block))
-
- block % tend % ssh % array(:) = 0.0
-
- ! config_btr_gam3_uWt2 sets the forward weighting of velocity in the SSH computation
- ! config_btr_gam3_uWt2= 1 flux = uBtrNew*H
- ! config_btr_gam3_uWt2=0.5 flux = 1/2*(uBtrNew+uBtrOld)*H
- ! config_btr_gam3_uWt2= 0 flux = uBtrOld*H
-
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
-
- sshEdge = 0.5 &
- *( block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell1) &
- + block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(cell2) )
- hSum = sum(block % mesh % hZLevel % array (1:block % mesh % maxLevelEdgeTop % array(iEdge)))
-
- flux = ((1.0-config_btr_gam3_uWt2) &
- * block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge) &
- + config_btr_gam3_uWt2 &
- * block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)) &
- * (sshEdge + hSum)
-
- block % tend % ssh % array(cell1) = block % tend % ssh % array(cell1) &
- - flux * block % mesh % dvEdge % array(iEdge)
- block % tend % ssh % array(cell2) = block % tend % ssh % array(cell2) &
- + flux * block % mesh % dvEdge % array(iEdge)
-
- block % state % time_levs(1) % state % FBtr % array(iEdge) &
- = block % state % time_levs(1) % state % FBtr % array(iEdge) &
- + flux
-
-
- end do
-
- ! SSHnew = SSHold + dt/J*(-div(Flux))
- do iCell=1,block % mesh % nCells
-
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- + dt/config_n_btr_subcycles &
- * block % tend % ssh % array(iCell) / block % mesh % areaCell % array (iCell)
-
- end do
-
- block => block % next
- end do ! block
-
- ! boundary update on SSHnew
- block => domain % blocklist
- do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-
- block => block % next
- end do ! block
-
- endif ! config_btr_solve_SSH2
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Barotropic subcycle: Accumulate running sums, advance timestep pointers
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- block => domain % blocklist
- do while (associated(block))
-
- ! Accumulate SSH in running sum over the subcycles.
- do iCell=1,block % mesh % nCells
- block % state % time_levs(2) % state % ssh % array(iCell) &
- = block % state % time_levs(2) % state % ssh % array(iCell) &
- + block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(iCell)
- end do
-
- ! uBtrNew = uBtrNew + uBtrSubcycleNEW
- ! This accumulates the sum.
- ! If the Barotropic Coriolis iteration is limited to one, this could
- ! be merged with the above code.
- do iEdge=1,block % mesh % nEdges
-
- block % state % time_levs(2) % state % uBtr % array(iEdge) &
- = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- + block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)
-
- end do ! iEdge
- block => block % next
- end do ! block
-
- ! advance time pointers
- oldBtrSubcycleTime = mod(oldBtrSubcycleTime,2)+1
- newBtrSubcycleTime = mod(newBtrSubcycleTime,2)+1
-
- end do ! j=1,config_n_btr_subcycles
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! END Barotropic subcycle loop
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
! Normalize Barotropic subcycle sums: ssh, uBtr, and F
block => domain % blocklist
do while (associated(block))
-
- do iEdge=1,block % mesh % nEdges
- block % state % time_levs(1) % state % FBtr % array(iEdge) &
- = block % state % time_levs(1) % state % FBtr % array(iEdge) &
- / (config_n_btr_subcycles*config_btr_subcycle_loop_factor)
-
- block % state % time_levs(2) % state % uBtr % array(iEdge) &
- = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- / (config_n_btr_subcycles*config_btr_subcycle_loop_factor + 1)
- end do
-
- if (config_SSH_from=='avg_of_SSH_subcycles') then
- do iCell=1,block % mesh % nCells
- block % state % time_levs(2) % state % ssh % array(iCell) &
- = block % state % time_levs(2) % state % ssh % array(iCell) &
- / (config_n_btr_subcycles*config_btr_subcycle_loop_factor + 1)
- end do
- elseif (config_SSH_from=='avg_flux') then
- ! see below
- else
- write(0,*) 'Abort: Unknown config_SSH_from option: '&
- //trim(config_SSH_from)
- call mpas_dmpar_abort(dminfo)
- endif
-
+
+ do iEdge=1,block % mesh % nEdges
+ block % state % time_levs(1) % state % FBtr % array(iEdge) = block % state % time_levs(1) % state % FBtr % array(iEdge) &
+ / (config_n_btr_subcycles*config_btr_subcycle_loop_factor)
+
+ block % state % time_levs(2) % state % uBtr % array(iEdge) = block % state % time_levs(2) % state % uBtr % array(iEdge) &
+ / (config_n_btr_subcycles*config_btr_subcycle_loop_factor + 1)
+ end do
+
block => block % next
end do ! block
-
-
+
+
! boundary update on F
+ call mpas_timer_start("se halo F", .false., timer_halo_f)
block => domain % blocklist
do while (associated(block))
-
- call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
- block % state % time_levs(1) % state % FBtr % array(:), &
- block % mesh % nEdges, &
- block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-
- block => block % next
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
+ block % state % time_levs(1) % state % FBtr % array(:), &
+ block % mesh % nEdges, block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
+
+ block => block % next
end do ! block
+ call mpas_timer_stop("se halo F", timer_halo_f)
! Check that you can compute SSH using the total sum or the individual increments
@@ -831,363 +649,234 @@
allocate(uTemp(block % mesh % nVertLevels))
- if (config_SSH_from=='avg_flux') then
- ! Accumulate fluxes in the tend % ssh variable
- block % tend % ssh % array(:) = 0.0
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ ! Correction velocity uCorr = (Flux - Sum(h u*))/H
+ ! or, for the full latex version:
+ !{\bf u}^{corr} = \left( {\overline {\bf F}}
+ ! - \sum_{k=1}^{N^{edge}} h_{k,*}^{edge} {\bf u}_k^{avg} \right)
+ ! \left/ \sum_{k=1}^{N^{edge}} h_{k,*}^{edge} \right.
- block % tend % ssh % array(cell1) &
- = block % tend % ssh % array(cell1) &
- - block % state % time_levs(1) % state % FBtr % array(iEdge) &
- * block % mesh % dvEdge % array(iEdge)
+ if (config_u_correction) then
+ ucorr_coef = 1
+ else
+ ucorr_coef = 0
+ endif
+ do iEdge=1,block % mesh % nEdges
- block % tend % ssh % array(cell2) &
- = block % tend % ssh % array(cell2) &
- + block % state % time_levs(1) % state % FBtr % array(iEdge) &
- * block % mesh % dvEdge % array(iEdge)
+ ! This is u^{avg}
+ uTemp(:) = block % state % time_levs(2) % state % uBtr % array(iEdge) &
+ + block % state % time_levs(2) % state % uBcl % array(:,iEdge)
- end do
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge) * uTemp(1)
+ hSum = block % state % time_levs(2) % state % h_edge % array(1,iEdge)
- do iCell=1,block % mesh % nCells
-
- ! SSHnew = SSHold + dt*(-div(Flux))
- block % state % time_levs(2) % state % ssh % array(iCell) &
- = block % state % time_levs(1) % state % ssh % array(iCell) &
- + dt &
- * block % tend % ssh % array(iCell) / block % mesh % areaCell % array (iCell)
- end do
- endif
+ do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
+ uhSum = uhSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge) * uTemp(k)
+ hSum = hSum + block % state % time_levs(2) % state % h_edge % array(k,iEdge)
+ enddo
- ! Correction velocity uCorr = (Flux - Sum(h u*))/H
- ! or, for the full latex version:
- !u^{corr} = \left( {\overline {\bf F}}
- ! - \sum_{k=1}^{N^{edge}} \left(\zeta_{k,n}^{*\;edge}+\Delta z_k\right) u_k^* \right)
- !\left/ \sum_{k=1}^{N^{edge}} \left(\zeta_{k,n}^{*\;edge}+\Delta z_k\right) \right.
+ uCorr = ucorr_coef*(( block % state % time_levs(1) % state % FBtr % array(iEdge) - uhSum)/hSum)
- if (config_u_correction) then
- ucorr_coef = 1
- else
- ucorr_coef = 0
- endif
+ ! put u^{tr}, the velocity for tracer transport, in uNew
+ ! mrp 060611 not sure if boundary enforcement is needed here.
+ if (block % mesh % boundaryEdge % array(1,iEdge).eq.1) then
+ block % state % time_levs(2) % state % u % array(:,iEdge) = 0.0
+ else
+ do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
+ block % state % time_levs(2) % state % u % array(k,iEdge) = uTemp(k) + uCorr
+ enddo
+ do k=block % mesh % maxLevelEdgeTop % array(iEdge)+1,block % mesh % nVertLevels
+ block % state % time_levs(2) % state % u % array(k,iEdge) = 0.0
+ end do
+ endif
- do iEdge=1,block % mesh % nEdges
- cell1 = block % mesh % cellsOnEdge % array(1,iEdge)
- cell2 = block % mesh % cellsOnEdge % array(2,iEdge)
+ end do ! iEdge
- sshEdge = 0.5 &
- *( block % state % time_levs(2) % state % ssh % array(cell1) &
- + block % state % time_levs(2) % state % ssh % array(cell2) )
+ deallocate(uTemp)
- ! This is u*
- uTemp(:) &
- = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- + block % state % time_levs(2) % state % uBcl % array(:,iEdge)
-
- uhSum = (sshEdge + block % mesh % hZLevel % array(1)) * uTemp(1)
- hSum = sshEdge + block % mesh % hZLevel % array(1)
-
- do k=2,block % mesh % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + block % mesh % hZLevel % array(k) * uTemp(k)
- hSum = hSum + block % mesh % hZLevel % array(k)
- enddo
-
- uCorr = ucorr_coef*(( block % state % time_levs(1) % state % FBtr % array(iEdge) &
- - uhSum)/hSum)
-
- ! put u^{tr}, the velocity for tracer transport, in uNew
- ! mrp 060611 not sure if boundary enforcement is needed here.
- if (block % mesh % boundaryEdge % array(1,iEdge).eq.1) then
- block % state % time_levs(2) % state % u % array(:,iEdge) = 0.0
- else
- do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
- block % state % time_levs(2) % state % u % array(k,iEdge) = uTemp(k) + uCorr
- enddo
- do k=block % mesh % maxLevelEdgeTop % array(iEdge)+1,block % mesh % nVertLevels
- block % state % time_levs(2) % state % u % array(k,iEdge) = 0.0
- enddo
- endif
-
- ! Put new sshEdge values in h_edge array, for the OcnTendScalar call below.
- block % state % time_levs(2) % state % h_edge % array(1,iEdge) &
- = sshEdge + block % mesh % hZLevel % array(1)
-
- do k=2,block % mesh % nVertLevels
- block % state % time_levs(2) % state % h_edge % array(k,iEdge) &
- = block % mesh % hZLevel % array(k)
- enddo
-
- end do ! iEdge
-
- ! Put new SSH values in h array, for the OcnTendScalar call below.
- do iCell=1,block % mesh % nCells
- block % state % time_levs(2) % state % h % array(1,iCell) &
- = block % state % time_levs(2) % state % ssh % array(iCell) &
- + block % mesh % hZLevel % array(1)
-
- ! mrp 110601 efficiency note: Since h just moves back and forth between pointers,
- ! this is not necessary once initialized.
- do k=2,block % mesh % nVertLevels
- block % state % time_levs(2) % state % h % array(k,iCell) &
- = block % mesh % hZLevel % array(k)
- enddo
- enddo ! iCell
-
- deallocate(uTemp)
-
block => block % next
end do ! block
+ endif ! split_explicit
- endif ! split_explicit
+ call mpas_timer_stop("se btr vel", timer_btr_vel)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Stage 3: Tracer, density, pressure, vertical velocity prediction
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Stage 3: Tracer, density, pressure, vertical velocity prediction
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !TDR: it seems almost trivial to hold off on doing T, S and rho updates until the
+ !TDR: dycore time step is complete. we might want to take this opportunity to clean-up
+ !TDR: Stage3 in order to faciliate the testing of not doing tracer updates after this code is committed to trunk.
+ !TDR: at this point, I am suggesting just pushing some of this code into subroutines.
+ !TDR: see comments farther down
+
block => domain % blocklist
do while (associated(block))
+ call ocn_wtop(block % state % time_levs(1) % state,block % state % time_levs(2) % state, block % mesh)
- call ocn_wtop(block % state % time_levs(2) % state, block % mesh)
+ call ocn_tend_h (block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
+ call ocn_tend_scalar(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
- if (trim(config_time_integration) == 'unsplit_explicit') then
- call ocn_tend_h(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
- endif
+ block => block % next
+ end do
- call ocn_tend_scalar(block % tend, block % state % time_levs(2) % state , block % diagnostics, block % mesh)
+ ! update halo for thickness and tracer tendencies
+ call mpas_timer_start("se halo h", .false., timer_halo_h)
+ block => domain % blocklist
+ do while (associated(block))
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
+ block % mesh % nVertLevels, block % mesh % nCells, &
+ block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
+ block => block % next
end do
+ call mpas_timer_stop("se halo h", timer_halo_h)
- ! update halo for thicknes for unsplit only
- if (trim(config_time_integration) == 'unsplit_explicit') then
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
- block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
- endif ! unsplit_explicit
+ block => domain % blocklist
+ do while (associated(block))
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! If iterating, reset variables for next iteration
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ if (split_explicit_step < config_n_ts_iter) then
- block => domain % blocklist
- do while (associated(block))
- allocate(hNew(block % mesh % nVertLevels))
+ !TDR: should we move this code into a subroutine called "compute_intermediate_value_at_midtime"
+ !TDR: this could be within a contains statement in this routine
- if (trim(config_new_btr_variables_from) == 'last_subcycle') then
- ! This points to the last barotropic SSH subcycle
- sshNew => block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array
- elseif (trim(config_new_btr_variables_from) == 'btr_avg') then
- ! This points to the tendency variable SSH*
- sshNew => block % state % time_levs(2) % state % ssh % array
- endif
+ ! Only need T & S for earlier iterations,
+ ! then all the tracers needed the last time through.
+ do iCell=1,block % mesh % nCells
+ ! sshNew is a pointer, defined above.
+ do k=1,block % mesh % maxLevelCell % array(iCell)
- if (trim(config_time_integration) == 'unsplit_explicit') then
+ ! this is h_{n+1}
+ temp_h &
+ = block % state % time_levs(1) % state % h % array(k,iCell) &
+ + dt* block % tend % h % array(k,iCell)
- do iCell=1,block % mesh % nCells
- ! this is h_{n+1}
- block % state % time_levs(2) % state % h % array(:,iCell) &
- = block % state % time_levs(1) % state % h % array(:,iCell) &
- + dt* block % tend % h % array(:,iCell)
+ ! this is h_{n+1/2}
+ block % state % time_levs(2) % state % h % array(k,iCell) &
+ = 0.5*( &
+ block % state % time_levs(1) % state % h % array(k,iCell) &
+ + temp_h)
- ! this is only for the hNew computation below, so there is the correct
- ! value in the ssh variable for unsplit_explicit case.
- block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell) &
- = block % state % time_levs(2) % state % h % array(1,iCell) &
- - block % mesh % hZLevel % array(1)
- end do ! iCell
+ do i=1,2
+ ! This is Phi at n+1
+ temp = ( &
+ block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell) &
+ + dt * block % tend % tracers % array(i,k,iCell)) &
+ / temp_h
+
+ ! This is Phi at n+1/2
+ block % state % time_levs(2) % state % tracers % array(i,k,iCell) &
+ = 0.5*( &
+ block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
+ + temp )
+ end do
+ end do
+ end do ! iCell
- endif ! unsplit_explicit
+ ! uBclNew is u'_{n+1/2}
+ ! uBtrNew is {\bar u}_{avg}
+ ! uNew is u^{tr}
- ! Only need T & S for earlier iterations,
- ! then all the tracers needed the last time through.
- if (split_explicit_step < config_n_ts_iter) then
+ ! mrp 110512 I really only need this to compute h_edge, density, pressure, and SSH
+ ! I can par this down later.
+ call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- hNew(:) = block % mesh % hZLevel % array(:)
- do iCell=1,block % mesh % nCells
- ! sshNew is a pointer, defined above.
- hNew(1) = sshNew(iCell) + block % mesh % hZLevel % array(1)
- do k=1,block % mesh % maxLevelCell % array(iCell)
- do i=1,2
- ! This is Phi at n+1
- tracerTemp &
- = ( block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell) &
- + dt * block % tend % tracers % array(i,k,iCell) &
- ) / hNew(k)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! If large iteration complete, compute all variables at time n+1
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ elseif (split_explicit_step == config_n_ts_iter) then
- ! This is Phi at n+1/2
- block % state % time_levs(2) % state % tracers % array(i,k,iCell) &
- = 0.5*( &
- block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
- + tracerTemp )
- enddo
- end do
- end do ! iCell
+ !TDR: should we move this code into a subroutine called "compute_final_values_at_nplus1"?
+ !TDR: this could be within a contains statement in this routine
+ do iCell=1,block % mesh % nCells
+ do k=1,block % mesh % maxLevelCell % array(iCell)
- if (trim(config_time_integration) == 'unsplit_explicit') then
+ ! this is h_{n+1}
+ block % state % time_levs(2) % state % h % array(k,iCell) &
+ = block % state % time_levs(1) % state % h % array(k,iCell) &
+ + dt* block % tend % h % array(k,iCell)
- ! compute h*, which is h at n+1/2 and put into array hNew
- ! on last iteration, hNew remains at n+1
- do iCell=1,block % mesh % nCells
- block % state % time_levs(2) % state % h % array(1,iCell) &
- = 0.5*( &
- block % state % time_levs(2) % state % h % array(1,iCell) &
- + block % state % time_levs(1) % state % h % array(1,iCell) )
+ ! This is Phi at n+1
+ do i=1,block % state % time_levs(1) % state % num_tracers
+ block % state % time_levs(2) % state % tracers % array(i,k,iCell) &
+ = (block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
+ * block % state % time_levs(1) % state % h % array(k,iCell) &
+ + dt * block % tend % tracers % array(i,k,iCell)) &
+ / block % state % time_levs(2) % state % h % array(k,iCell)
- end do ! iCell
- endif ! unsplit_explicit
+ enddo
+ end do
+ end do
- ! compute u*, the velocity for tendency terms. Put in uNew.
- ! uBclNew is at time n+1/2 here.
- ! This overwrites u^{tr}, the tracer transport velocity, which was in uNew.
- ! The following must occur after call OcnTendScalar
- do iEdge=1,block % mesh % nEdges
- block % state % time_levs(2) % state % u % array(:,iEdge) &
- = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- + block % state % time_levs(2) % state % uBcl % array(:,iEdge)
- end do ! iEdge
+ ! Recompute final u to go on to next step.
+ ! u_{n+1} = uBtr_{n+1} + uBcl_{n+1}
+ ! Right now uBclNew is at time n+1/2, so back compute to get uBcl at time n+1
+ ! using uBcl_{n+1/2} = 1/2*(uBcl_n + u_Bcl_{n+1})
+ ! so the following lines are
+ ! u_{n+1} = uBtr_{n+1} + 2*uBcl_{n+1/2} - uBcl_n
+ ! note that uBcl is recomputed at the beginning of the next timestep due to Imp Vert mixing,
+ ! so uBcl does not have to be recomputed here.
+
+ do iEdge=1,block % mesh % nEdges
+ do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
+ block % state % time_levs(2) % state % u % array(k,iEdge) &
+ = block % state % time_levs(2) % state % uBtr % array( iEdge) &
+ +2*block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
+ - block % state % time_levs(1) % state % uBcl % array(k,iEdge)
+ end do
+ end do ! iEdges
- elseif (split_explicit_step == config_n_ts_iter) then
+ endif ! split_explicit_step
- hNew(:) = block % mesh % hZLevel % array(:)
- do iCell=1,block % mesh % nCells
- ! sshNew is a pointer, defined above.
- hNew(1) = sshNew(iCell) + block % mesh % hZLevel % array(1)
- do k=1,block % mesh % maxLevelCell % array(iCell)
- do i=1,block % state % time_levs(1) % state % num_tracers
- ! This is Phi at n+1
- block % state % time_levs(2) % state % tracers % array(i,k,iCell) &
- = ( block % state % time_levs(1) % state % tracers % array(i,k,iCell) &
- * block % state % time_levs(1) % state % h % array(k,iCell) &
- + dt * block % tend % tracers % array(i,k,iCell) &
- ) / hNew(k)
+ block => block % next
+ end do
- enddo
- end do
- end do
-
- endif ! split_explicit_step
- deallocate(hNew)
-
- block => block % next
- end do
-
! Boundary update on tracers. This is placed here, rather than
! on tend % tracers as in RK4, because I needed to update
! afterwards for the del4 diffusion operator.
- block => domain % blocklist
- do while (associated(block))
- call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % tracers % array(:,:,:), &
- block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, &
- block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- block => block % next
- end do
+ call mpas_timer_start("se halo tracers", .false., timer_halo_tracers)
+ block => domain % blocklist
+ do while (associated(block))
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % tracers % array(:,:,:), &
+ block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- if (split_explicit_step < config_n_ts_iter) then
- ! mrp 110512 I really only need this to compute h_edge, density, pressure.
- ! I can par this down later.
- block => domain % blocklist
- do while (associated(block))
+ block => block % next
+ end do
+ call mpas_timer_stop("se halo tracers", timer_halo_tracers)
- call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
- block => block % next
- end do
- endif
end do ! split_explicit_step = 1, config_n_ts_iter
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! END large iteration loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! A little clean up at the end: decouple new scalar fields and compute diagnostics for new state
- !
block => domain % blocklist
do while (associated(block))
- if (trim(config_new_btr_variables_from) == 'last_subcycle') then
- do iEdge=1,block % mesh % nEdges
- ! uBtrNew = uBtrSubcycleNew (old here is because counter already flipped)
- ! This line is not needed if u is resplit at the beginning of the timestep.
- block % state % time_levs(2) % state % uBtr % array(iEdge) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % uBtrSubcycle % array(iEdge)
- enddo ! iEdges
- elseif (trim(config_new_btr_variables_from) == 'btr_avg') then
- ! uBtrNew from u*. this is done above, so u* is already in
- ! block % state % time_levs(2) % state % uBtr % array(iEdge)
- else
- write(0,*) 'Abort: Unknown config_new_btr_variables_from: '&
- //trim(config_time_integration)
- call mpas_dmpar_abort(dminfo)
- endif
- ! Recompute final u to go on to next step.
- ! u_{n+1} = uBtr_{n+1} + uBcl_{n+1}
- ! Right now uBclNew is at time n+1/2, so back compute to get uBcl at time n+1
- ! using uBcl_{n+1/2} = 1/2*(uBcl_n + u_Bcl_{n+1})
- ! so the following lines are
- ! u_{n+1} = uBtr_{n+1} + 2*uBcl_{n+1/2} - uBcl_n
- ! note that uBcl is recomputed at the beginning of the next timestep due to Imp Vert mixing,
- ! so uBcl does not have to be recomputed here.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !
+ ! Implicit vertical mixing, done after timestep is complete
+ !
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do iEdge=1,block % mesh % nEdges
- do k=1,block % mesh % maxLevelEdgeTop % array(iEdge)
-
- block % state % time_levs(2) % state % u % array(k,iEdge) &
- = block % state % time_levs(2) % state % uBtr % array(iEdge) &
- +2*block % state % time_levs(2) % state % uBcl % array(k,iEdge) &
- - block % state % time_levs(1) % state % uBcl % array(k,iEdge)
- enddo
- ! mrp 110607 zero out velocity below land edges. efficiency: this may not be required.
- do k=block % mesh % maxLevelEdgeTop % array(iEdge) + 1, block % mesh % nVertLevels
- block % state % time_levs(2) % state % u % array(k,iEdge) = 0.0
- enddo
-
- enddo ! iEdges
-
- if (trim(config_time_integration) == 'split_explicit') then
-
- if (trim(config_new_btr_variables_from) == 'last_subcycle') then
- do iCell=1,block % mesh % nCells
- ! SSH for the next step is from the end of the barotropic subcycle.
- block % state % time_levs(2) % state % ssh % array(iCell) &
- = block % state % time_levs(oldBtrSubcycleTime) % state % sshSubcycle % array(iCell)
- end do ! iCell
- elseif (trim(config_new_btr_variables_from) == 'btr_avg') then
- ! sshNew from ssh*. This is done above, so ssh* is already in
- ! block % state % time_levs(2) % state % ssh % array(iCell)
- endif
-
- do iCell=1,block % mesh % nCells
- ! Put new SSH values in h array, for the OcnTendScalar call below.
- block % state % time_levs(2) % state % h % array(1,iCell) &
- = block % state % time_levs(2) % state % ssh % array(iCell) &
- + block % mesh % hZLevel % array(1)
-
- ! mrp 110601 efficiency note: Since h just moves back and forth between pointers,
- ! this is not necessary once initialized.
- do k=2,block % mesh % nVertLevels
- block % state % time_levs(2) % state % h % array(k,iCell) &
- = block % mesh % hZLevel % array(k)
- end do
- end do ! iCell
- end if ! split_explicit
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !
- ! Implicit vertical mixing, done after timestep is complete
- !
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
u => block % state % time_levs(2) % state % u % array
tracers => block % state % time_levs(2) % state % tracers % array
h => block % state % time_levs(2) % state % h % array
@@ -1200,43 +889,34 @@
maxLevelEdgeTop => block % mesh % maxLevelEdgeTop % array
if (config_implicit_vertical_mix) then
- allocate(A(block % mesh % nVertLevels),C(block % mesh % nVertLevels),uTemp(block % mesh % nVertLevels), &
- tracersTemp(num_tracers,block % mesh % nVertLevels))
-
call ocn_vmix_coefs(block % mesh, block % state % time_levs(2) % state, block % diagnostics, err)
- !
! Implicit vertical solve for momentum
- !
-
call ocn_vel_vmix_tend_implicit(block % mesh, dt, ke_edge, vertvisctopofedge, h, h_edge, u, err)
-
- !
+
! Implicit vertical solve for tracers
- !
call ocn_tracer_vmix_tend_implicit(block % mesh, dt, vertdifftopofcell, h, tracers, err)
end if
if (config_test_case == 1) then ! For case 1, wind field should be fixed
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(1) % state % u % array(:,:)
end if
-
call ocn_diagnostic_solve(dt, block % state % time_levs(2) % state, block % mesh)
-
call mpas_reconstruct(block % mesh, block % state % time_levs(2) % state % u % array, &
- block % state % time_levs(2) % state % uReconstructX % array, &
- block % state % time_levs(2) % state % uReconstructY % array, &
- block % state % time_levs(2) % state % uReconstructZ % array, &
- block % state % time_levs(2) % state % uReconstructZonal % array, &
- block % state % time_levs(2) % state % uReconstructMeridional % array &
- )
+ block % state % time_levs(2) % state % uReconstructX % array, &
+ block % state % time_levs(2) % state % uReconstructY % array, &
+ block % state % time_levs(2) % state % uReconstructZ % array, &
+ block % state % time_levs(2) % state % uReconstructZonal % array, &
+ block % state % time_levs(2) % state % uReconstructMeridional % array)
call ocn_time_average_accumulate(block % state % time_levs(2) % state, block % state % time_levs(1) % state)
+
block => block % next
end do
- call mpas_timer_stop("split_explicit_timestep")
+ call mpas_timer_stop("se timestep", timer_main)
+
end subroutine ocn_time_integrator_split!}}}
subroutine filter_btr_mode_tend_u(tend, s, d, grid)!{{{
@@ -1256,110 +936,49 @@
type (diagnostics_type), intent(in) :: d
type (mesh_type), intent(in) :: grid
-! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
-! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
+ integer :: iEdge, k
integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel, meshScalingDel2, meshScalingDel4
+ real (kind=RKIND) :: vertSum, uhSum, hSum
real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
+ h_edge, h, u,tend_u
type (dm_info) :: dminfo
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
+ integer, dimension(:), pointer :: maxLevelEdgeTop
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
- real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
-
call mpas_timer_start("filter_btr_mode_tend_u")
h => s % h % array
u => s % u % array
- v => s % v % array
- wTop => s % wTop % array
h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
- vertViscTopOfEdge => d % vertViscTopOfEdge % array
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
-! mrp 110516 cleanup fvertex fedge not used in this subroutine
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
- maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
tend_u => tend % u % array
nCells = grid % nCells
nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
nVertLevels = grid % nVertLevels
- u_src => grid % u_src % array
+ do iEdge=1,nEdges
- do iEdge=1,grid % nEdges
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = h_edge(1,iEdge) * tend_u(1,iEdge)
+ hSum = h_edge(1,iEdge)
- ! I am using hZLevel here. This assumes that SSH is zero everywhere already,
- ! which should be the case if the barotropic mode is filtered.
- ! The more general case is to use sshEdge or h_edge.
- uhSum = (grid % hZLevel % array(1)) * tend_u(1,iEdge)
- hSum = grid % hZLevel % array(1)
+ do k=2,maxLevelEdgeTop(iEdge)
+ uhSum = uhSum + h_edge(k,iEdge) * tend_u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
+ enddo
- do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + grid % hZLevel % array(k) * tend_u(k,iEdge)
- hSum = hSum + grid % hZLevel % array(k)
- enddo
+ vertSum = uhSum/hSum
+ do k=1,maxLevelEdgeTop(iEdge)
+ tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
+ enddo
+ enddo ! iEdge
- vertSum = uhSum/hSum
-
- do k=1,grid % maxLevelEdgeTop % array(iEdge)
- tend_u(k,iEdge) = tend_u(k,iEdge) - vertSum
- enddo
-
- enddo ! iEdge
-
call mpas_timer_stop("filter_btr_mode_tend_u")
end subroutine filter_btr_mode_tend_u!}}}
@@ -1379,155 +998,51 @@
type (state_type), intent(inout) :: s
type (mesh_type), intent(in) :: grid
-! mrp 110512 I just split compute_tend into compute_tend_u and compute_tend_h.
-! Some of these variables can be removed, but at a later time.
- integer :: iEdge, iCell, iVertex, k, cell1, cell2, &
- vertex1, vertex2, eoe, i, j
+ integer :: iEdge, k
integer :: nCells, nEdges, nVertices, nVertLevels, nEdgesSolve
- real (kind=RKIND) :: vertSum, uhSum, hSum, sshEdge
- real (kind=RKIND), dimension(:), pointer :: &
- h_s, fVertex, fEdge, dvEdge, dcEdge, areaCell, areaTriangle, &
- zMidZLevel, zTopZLevel, meshScalingDel2, meshScalingDel4
+ real (kind=RKIND) :: vertSum, uhSum, hSum
real (kind=RKIND), dimension(:,:), pointer :: &
- weightsOnEdge, kiteAreasOnVertex, h_edge, h, u, v, pressure, &
- tend_u, circulation, vorticity, ke, ke_edge, pv_edge, &
- MontPot, wTop, divergence, vertViscTopOfEdge
+ h_edge, h, u
type (dm_info) :: dminfo
- integer, dimension(:), pointer :: nEdgesOnCell, nEdgesOnEdge, &
- maxLevelCell, maxLevelEdgeTop, maxLevelVertexBot
- integer, dimension(:,:), pointer :: &
- cellsOnEdge, cellsOnVertex, verticesOnEdge, edgesOnCell, &
- edgesOnEdge, edgesOnVertex
- real (kind=RKIND) :: u_diffusion
- real (kind=RKIND), dimension(:), allocatable:: fluxVertTop,w_dudzTopEdge
+ integer, dimension(:), pointer :: maxLevelEdgeTop
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_divergence
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
- real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
-
-
- real (kind=RKIND), dimension(:,:), pointer :: u_src
- real (kind=RKIND), parameter :: rho_ref = 1000.0
-
call mpas_timer_start("filter_btr_mode_u")
h => s % h % array
u => s % u % array
- v => s % v % array
- wTop => s % wTop % array
h_edge => s % h_edge % array
- circulation => s % circulation % array
- vorticity => s % vorticity % array
- divergence => s % divergence % array
- ke => s % ke % array
- ke_edge => s % ke_edge % array
- pv_edge => s % pv_edge % array
- MontPot => s % MontPot % array
- pressure => s % pressure % array
- weightsOnEdge => grid % weightsOnEdge % array
- kiteAreasOnVertex => grid % kiteAreasOnVertex % array
- cellsOnEdge => grid % cellsOnEdge % array
- cellsOnVertex => grid % cellsOnVertex % array
- verticesOnEdge => grid % verticesOnEdge % array
- nEdgesOnCell => grid % nEdgesOnCell % array
- edgesOnCell => grid % edgesOnCell % array
- nEdgesOnEdge => grid % nEdgesOnEdge % array
- edgesOnEdge => grid % edgesOnEdge % array
- edgesOnVertex => grid % edgesOnVertex % array
- dcEdge => grid % dcEdge % array
- dvEdge => grid % dvEdge % array
- areaCell => grid % areaCell % array
- areaTriangle => grid % areaTriangle % array
- h_s => grid % h_s % array
-! mrp 110516 cleanup fvertex fedge not used in this subroutine
- fVertex => grid % fVertex % array
- fEdge => grid % fEdge % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
- maxLevelCell => grid % maxLevelCell % array
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- maxLevelVertexBot => grid % maxLevelVertexBot % array
nCells = grid % nCells
nEdges = grid % nEdges
- nEdgesSolve = grid % nEdgesSolve
- nVertices = grid % nVertices
nVertLevels = grid % nVertLevels
- u_src => grid % u_src % array
+ do iEdge=1,nEdges
- do iEdge=1,grid % nEdges
+ ! hSum is initialized outside the loop because on land boundaries
+ ! maxLevelEdgeTop=0, but I want to initialize hSum with a
+ ! nonzero value to avoid a NaN.
+ uhSum = h_edge(1,iEdge) * u(1,iEdge)
+ hSum = h_edge(1,iEdge)
- ! I am using hZLevel here. This assumes that SSH is zero everywhere already,
- ! which should be the case if the barotropic mode is filtered.
- ! The more general case is to use sshedge or h_edge.
- uhSum = (grid % hZLevel % array(1)) * u(1,iEdge)
- hSum = grid % hZLevel % array(1)
+ do k=2,maxLevelEdgeTop(iEdge)
+ uhSum = uhSum + h_edge(k,iEdge) * u(k,iEdge)
+ hSum = hSum + h_edge(k,iEdge)
+ enddo
- do k=2,grid % maxLevelEdgeTop % array(iEdge)
- uhSum = uhSum + grid % hZLevel % array(k) * u(k,iEdge)
- hSum = hSum + grid % hZLevel % array(k)
- enddo
+ vertSum = uhSum/hSum
+ do k=1,maxLevelEdgeTop(iEdge)
+ u(k,iEdge) = u(k,iEdge) - vertSum
+ enddo
+ enddo ! iEdge
- vertSum = uhSum/hSum
- do k=1,grid % maxLevelEdgeTop % array(iEdge)
- u(k,iEdge) = u(k,iEdge) - vertSum
- enddo
-
- enddo ! iEdge
-
call mpas_timer_stop("filter_btr_mode_u")
end subroutine filter_btr_mode_u!}}}
- subroutine enforce_boundaryEdge(tend, grid)!{{{
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Enforce any boundary conditions on the normal velocity at each edge
- !
- ! Input: grid - grid metadata
- !
- ! Output: tend_u set to zero at boundaryEdge == 1 locations
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- implicit none
-
- type (tend_type), intent(inout) :: tend
- type (mesh_type), intent(in) :: grid
-
- integer, dimension(:,:), pointer :: boundaryEdge
- real (kind=RKIND), dimension(:,:), pointer :: tend_u
- integer :: nCells, nEdges, nVertices, nVertLevels
- integer :: iEdge, k
-
- call mpas_timer_start("enforce_boundaryEdge")
-
- nCells = grid % nCells
- nEdges = grid % nEdges
- nVertices = grid % nVertices
- nVertLevels = grid % nVertLevels
-
- boundaryEdge => grid % boundaryEdge % array
- tend_u => tend % u % array
-
- if(maxval(boundaryEdge).le.0) return
-
- do iEdge = 1,nEdges
- do k = 1,nVertLevels
-
- if(boundaryEdge(k,iEdge).eq.1) then
- tend_u(k,iEdge) = 0.0
- endif
-
- enddo
- enddo
- call mpas_timer_stop("enforce_boundaryEdge")
-
- end subroutine enforce_boundaryEdge!}}}
-
end module ocn_time_integration_split
! vim: foldmethod=marker
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,6 +16,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_timer
use ocn_tracer_hadv2
use ocn_tracer_hadv3
@@ -46,7 +47,9 @@
!
!--------------------------------------------------------------------
+ type (timer_node), pointer :: hadv2Timer, hadv3Timer, hadv4Timer
+
!***********************************************************************
contains
@@ -122,9 +125,15 @@
!
!-----------------------------------------------------------------
+ call mpas_timer_start("hadv2", .false., hadv2Timer);
call ocn_tracer_hadv2_tend(grid, u, h_edge, tracers, tend, err1)
+ call mpas_timer_stop("hadv2", hadv2Timer);
+ call mpas_timer_start("hadv3", .false., hadv3Timer);
call ocn_tracer_hadv3_tend(grid, u, h_edge, tracers, tend, err2)
+ call mpas_timer_stop("hadv3", hadv3Timer);
+ call mpas_timer_start("hadv4", .false., hadv4Timer);
call ocn_tracer_hadv4_tend(grid, u, h_edge, tracers, tend, err3)
+ call mpas_timer_stop("hadv4", hadv4Timer);
err = ior(err1, ior(err2, err3))
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv2.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -111,7 +110,7 @@
integer, dimension(:), pointer :: maxLevelEdgeTop
integer, dimension(:,:), pointer :: cellsOnEdge
- real (kind=RKIND) :: flux, tracer_edge
+ real (kind=RKIND) :: flux, tracer_edge, invAreaCell1, invAreaCell2, r_tmp
real (kind=RKIND), dimension(:), pointer :: dvEdge, areaCell
@@ -127,8 +126,6 @@
if(.not.hadv2On) return
- call mpas_timer_start("compute_scalar_tend-horiz adv 2")
-
nEdges = grid % nEdges
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
@@ -139,17 +136,19 @@
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
do k=1,maxLevelEdgeTop(iEdge)
+ r_tmp = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge)
do iTracer=1,num_tracers
tracer_edge = 0.5 * (tracers(iTracer,k,cell1) + tracers(iTracer,k,cell2))
- flux = u(k,iEdge) * dvEdge(iEdge) * h_edge(k,iEdge) * tracer_edge
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux/areaCell(cell1)
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux/areaCell(cell2)
+ flux = r_tmp * tracer_edge
+ tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux * invAreaCell1
+ tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux * invAreaCell2
end do
end do
end do
-
- call mpas_timer_stop("compute_scalar_tend-horiz adv 2")
!--------------------------------------------------------------------
end subroutine ocn_tracer_hadv2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv3.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -107,13 +106,15 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, cell1, cell2, iTracer, num_tracers, i, k
+ integer :: iEdge, nEdges, cell1, cell2, iTracer, num_tracers, i, k, &
+ boundaryMask, velMask
integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnCell
integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, &
- boundaryCell
+ cellMask, edgeMask
- real (kind=RKIND) :: flux, tracer_edge, d2fdx2_cell1, d2fdx2_cell2
+ real (kind=RKIND) :: flux, tracer_edge, d2fdx2_cell1, d2fdx2_cell2, &
+ invAreaCell1, invAreaCell2
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
@@ -134,7 +135,7 @@
num_tracers = size(tracers, dim=1)
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
nEdgesOnCell => grid % nEdgesOnCell % array
- boundaryCell => grid % boundaryCell % array
+ cellMask => grid % cellMask % array
cellsOnEdge => grid % cellsOnEdge % array
cellsOnCell => grid % cellsOnCell % array
dvEdge => grid % dvEdge % array
@@ -142,60 +143,51 @@
areaCell => grid % areaCell % array
deriv_two => grid % deriv_two % array
- call mpas_timer_start("compute_scalar_tend-horiz adv 3")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+
do k=1,maxLevelEdgeTop(iEdge)
d2fdx2_cell1 = 0.0
d2fdx2_cell2 = 0.0
+ boundaryMask = abs(transfer(cellMask(k,cell1) == 1 .and. cellMask(k,cell2) == 1,boundaryMask))
+
do iTracer=1,num_tracers
!-- if not a boundary cell
- if(boundaryCell(k,cell1).eq.0.and.boundaryCell(k,cell2).eq.0) then
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * tracers(iTracer,k,cell1) * boundaryMask
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * tracers(iTracer,k,cell2) * boundaryMask
- d2fdx2_cell1 = deriv_two(1,1,iEdge) * tracers(iTracer,k,cell1)
- d2fdx2_cell2 = deriv_two(1,2,iEdge) * tracers(iTracer,k,cell2)
+ !-- all edges of cell 1
+ do i=1,nEdgesOnCell(cell1) * boundaryMask
+ d2fdx2_cell1 = d2fdx2_cell1 + &
+ deriv_two(i+1,1,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell1))
+ end do
- !-- all edges of cell 1
- do i=1,nEdgesOnCell(cell1)
- d2fdx2_cell1 = d2fdx2_cell1 + &
- deriv_two(i+1,1,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell1))
- end do
+ !-- all edges of cell 2
+ do i=1,nEdgesOnCell(cell2) * boundaryMask
+ d2fdx2_cell2 = d2fdx2_cell2 + &
+ deriv_two(i+1,2,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell2))
+ end do
- !-- all edges of cell 2
- do i=1,nEdgesOnCell(cell2)
- d2fdx2_cell2 = d2fdx2_cell2 + &
- deriv_two(i+1,2,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell2))
- end do
- endif
+ velMask = 2*(abs(transfer(u(k,iEdge) <= 0, velMask))) - 1
+ flux = dvEdge(iEdge) * u(k,iEdge) * h_edge(k,iEdge) * ( &
+ 0.5*(tracers(iTracer,k,cell1) + tracers(iTracer,k,cell2)) &
+ -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
+ +velMask*(dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12. )
- !-- if u > 0:
- if (u(k,iEdge) > 0) then
- flux = dvEdge(iEdge) * u(k,iEdge) * h_edge(k,iEdge) * ( &
- 0.5*(tracers(iTracer,k,cell1) + tracers(iTracer,k,cell2)) &
- -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
- -(dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12. )
- !-- else u <= 0:
- else
- flux = dvEdge(iEdge) * u(k,iEdge) * h_edge(k,iEdge) * ( &
- 0.5*(tracers(iTracer,k,cell1) + tracers(iTracer,k,cell2)) &
- -(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. &
- +(dcEdge(iEdge) **2) * coef_3rd_order*(d2fdx2_cell1 - d2fdx2_cell2) / 12. )
- end if
-
!-- update tendency
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux/areaCell(cell1)
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux/areaCell(cell2)
+ tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux*invAreaCell1
+ tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux*invAreaCell2
enddo
end do
end do
- call mpas_timer_stop("compute_scalar_tend-horiz adv 3")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_hadv3_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hadv4.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -106,12 +105,13 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, cell1, cell2, iTracer, num_tracers, i, k
+ integer :: iEdge, nEdges, cell1, cell2, iTracer, num_tracers, i, k, &
+ boundaryMask
- integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, boundaryCell
+ integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge, cellsOnCell, cellMask
- real (kind=RKIND) :: flux, tracer_edge, d2fdx2_cell1, d2fdx2_cell2
+ real (kind=RKIND) :: flux, tracer_edge, d2fdx2_cell1, d2fdx2_cell2, invAreaCell1, invAreaCell2
real (kind=RKIND), dimension(:), pointer :: dvEdge, dcEdge, areaCell
real (kind=RKIND), dimension(:,:,:), pointer :: deriv_two
@@ -131,7 +131,8 @@
nEdges = grid % nEdges
num_tracers = size(tracers, dim=1)
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
- boundaryCell => grid % boundaryCell % array
+ nEdgesOnCell => grid % nEdgesOnCell % array
+ cellMask => grid % cellMask % array
cellsOnEdge => grid % cellsOnEdge % array
cellsOnCell => grid % cellsOnCell % array
dvEdge => grid % dvEdge % array
@@ -139,51 +140,46 @@
areaCell => grid % areaCell % array
deriv_two => grid % deriv_two % array
- call mpas_timer_start("compute_scalar_tend-horiz adv 4")
-
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+
do k=1,maxLevelEdgeTop(iEdge)
d2fdx2_cell1 = 0.0
d2fdx2_cell2 = 0.0
+ boundaryMask = abs(transfer(cellMask(k,cell1) == 1 .and. cellMask(k, cell2) == 1, boundaryMask))
+
do iTracer=1,num_tracers
+ d2fdx2_cell1 = deriv_two(1,1,iEdge) * tracers(iTracer,k,cell1) * boundaryMask
+ d2fdx2_cell2 = deriv_two(1,2,iEdge) * tracers(iTracer,k,cell2) * boundaryMask
- !-- if not a boundary cell
- if(boundaryCell(k,cell1).eq.0.and.boundaryCell(k,cell2).eq.0) then
+ !-- all edges of cell 1
+ do i=1,nEdgesOnCell(cell1) * boundaryMask
+ d2fdx2_cell1 = d2fdx2_cell1 + &
+ deriv_two(i+1,1,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell1))
+ end do
- d2fdx2_cell1 = deriv_two(1,1,iEdge) * tracers(iTracer,k,cell1)
- d2fdx2_cell2 = deriv_two(1,2,iEdge) * tracers(iTracer,k,cell2)
+ !-- all edges of cell 2
+ do i=1,nEdgesOnCell(cell2) * boundaryMask
+ d2fdx2_cell2 = d2fdx2_cell2 + &
+ deriv_two(i+1,2,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell2))
+ end do
- !-- all edges of cell 1
- do i=1, grid % nEdgesOnCell % array (cell1)
- d2fdx2_cell1 = d2fdx2_cell1 + &
- deriv_two(i+1,1,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell1))
- end do
-
- !-- all edges of cell 2
- do i=1, grid % nEdgesOnCell % array (cell2)
- d2fdx2_cell2 = d2fdx2_cell2 + &
- deriv_two(i+1,2,iEdge) * tracers(iTracer,k,cellsOnCell(i,cell2))
- end do
-
- endif
-
flux = dvEdge(iEdge) * u(k,iEdge) * h_edge(k,iEdge) * ( &
0.5*(tracers(iTracer,k,cell1) + tracers(iTracer,k,cell2)) &
-(dcEdge(iEdge) **2) * (d2fdx2_cell1 + d2fdx2_cell2) / 12. )
!-- update tendency
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux/areaCell(cell1)
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux/areaCell(cell2)
+ tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux * invAreaCell1
+ tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux * invAreaCell2
enddo
end do
end do
- call mpas_timer_stop("compute_scalar_tend-horiz adv 4")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_hadv4_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -18,6 +18,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_timer
use ocn_tracer_hmix_del2
use ocn_tracer_hmix_del4
@@ -46,7 +47,9 @@
!
!--------------------------------------------------------------------
+ type (timer_node), pointer :: del2Timer, del4Timer
+
!***********************************************************************
contains
@@ -119,8 +122,12 @@
!
!-----------------------------------------------------------------
+ call mpas_timer_start("del2", .false., del2Timer)
call ocn_tracer_hmix_del2_tend(grid, h_edge, tracers, tend, err1)
+ call mpas_timer_stop("del2", del2Timer)
+ call mpas_timer_start("del4", .false., del4Timer)
call ocn_tracer_hmix_del4_tend(grid, h_edge, tracers, tend, err2)
+ call mpas_timer_stop("del4", del4Timer)
err = ior(err1, err2)
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -18,7 +18,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -114,10 +113,10 @@
integer, dimension(:,:), allocatable :: boundaryMask
integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge, boundaryEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgeMask
real (kind=RKIND) :: invAreaCell1, invAreaCell2
- real (kind=RKIND) :: tracer_turb_flux, flux
+ real (kind=RKIND) :: tracer_turb_flux, flux, r_tmp
real (kind=RKIND), dimension(:), pointer :: areaCell, dvEdge, dcEdge
real (kind=RKIND), dimension(:), pointer :: meshScalingDel2
@@ -134,15 +133,13 @@
if (.not.del2On) return
- call mpas_timer_start("compute_scalar_tend-horiz diff 2")
-
nEdges = grid % nEdges
nVertLevels = grid % nVertLevels
num_tracers = size(tracers, dim=1)
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
- boundaryEdge => grid % boundaryEdge % array
+ edgeMask => grid % edgeMask % array
areaCell => grid % areaCell % array
dvEdge => grid % dvEdge % array
dcEdge => grid % dcEdge % array
@@ -151,36 +148,28 @@
!
! compute a boundary mask to enforce insulating boundary conditions in the horizontal
!
- allocate(boundaryMask(nVertLevels, nEdges+1))
- boundaryMask = 1.0
- where(boundaryEdge.eq.1) boundaryMask=0.0
-
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
invAreaCell1 = 1.0/areaCell(cell1)
invAreaCell2 = 1.0/areaCell(cell2)
+ r_tmp = meshScalingDel2(iEdge) * eddyDiff2 * dvEdge(iEdge) / dcEdge(iEdge)
+
do k=1,maxLevelEdgeTop(iEdge)
do iTracer=1,num_tracers
! \kappa_2 </font>
<font color="red">abla \phi on edge
- tracer_turb_flux = meshScalingDel2(iEdge) * eddyDiff2 &
- *( tracers(iTracer,k,cell2) &
- - tracers(iTracer,k,cell1))/dcEdge(iEdge)
+ tracer_turb_flux = tracers(iTracer,k,cell2) - tracers(iTracer,k,cell1)
! div(h \kappa_2 </font>
<font color="gray">abla \phi) at cell center
- flux = dvEdge (iEdge) * h_edge(k,iEdge) &
- * tracer_turb_flux * boundaryMask(k, iEdge)
+ flux = h_edge(k,iEdge) * tracer_turb_flux * edgeMask(k, iEdge) * r_tmp
+
tend(iTracer,k,cell1) = tend(iTracer,k,cell1) + flux * invAreaCell1
tend(iTracer,k,cell2) = tend(iTracer,k,cell2) - flux * invAreaCell2
end do
end do
end do
-
- deallocate(boundaryMask)
- call mpas_timer_stop("compute_scalar_tend-horiz diff 2")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_hmix_del2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -18,7 +18,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -45,7 +44,7 @@
!
!--------------------------------------------------------------------
- logical :: Del4On
+ logical :: del4On
real (kind=RKIND) :: eddyDiff4
@@ -111,12 +110,10 @@
integer :: iEdge, nEdges, num_tracers, nVertLevels, nCells
integer :: iTracer, k, iCell, cell1, cell2
- integer, dimension(:,:), allocatable :: boundaryMask
-
integer, dimension(:), pointer :: maxLevelEdgeTop, maxLevelCell
- integer, dimension(:,:), pointer :: boundaryEdge, cellsOnEdge
+ integer, dimension(:,:), pointer :: edgeMask, cellsOnEdge
- real (kind=RKIND) :: invAreaCell1, invAreaCell2, r, tracer_turb_flux, flux
+ real (kind=RKIND) :: invAreaCell1, invAreaCell2, tracer_turb_flux, flux, invdcEdge, r_tmp1, r_tmp2
real (kind=RKIND), dimension(:,:,:), allocatable :: delsq_tracer
@@ -133,10 +130,8 @@
err = 0
- if (.not.Del4On) return
+ if (.not.del4On) return
- call mpas_timer_start("compute_scalar_tend-horiz diff 4")
-
nEdges = grid % nEdges
nCells = grid % nCells
num_tracers = size(tracers, dim=1)
@@ -144,7 +139,6 @@
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
maxLevelCell => grid % maxLevelCell % array
- boundaryEdge => grid % boundaryEdge % array
cellsOnEdge => grid % cellsOnEdge % array
dcEdge => grid % dcEdge % array
@@ -152,68 +146,61 @@
areaCell => grid % areaCell % array
meshScalingDel4 => grid % meshScalingDel4 % array
- allocate(boundaryMask(nVertLevels, nEdges+1))
- boundaryMask = 1.0
- where(boundaryEdge.eq.1) boundaryMask=0.0
+ edgeMask => grid % edgeMask % array
allocate(delsq_tracer(num_tracers,nVertLevels, nCells+1))
- delsq_tracer(:,:,:) = 0.
+ delsq_tracer(:,:,:) = 0.0
! first del2: div(h </font>
<font color="red">abla \phi) at cell center
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+ invdcEdge = 1.0 / dcEdge(iEdge)
+
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
+
do k=1,maxLevelEdgeTop(iEdge)
- do iTracer=1,num_tracers
- delsq_tracer(iTracer,k,cell1) = delsq_tracer(iTracer,k,cell1) &
- + dvEdge(iEdge)*h_edge(k,iEdge) &
- *(tracers(iTracer,k,cell2) - tracers(iTracer,k,cell1)) &
- /dcEdge(iEdge) * boundaryMask(k,iEdge)
- delsq_tracer(iTracer,k,cell2) = delsq_tracer(iTracer,k,cell2) &
- - dvEdge(iEdge)*h_edge(k,iEdge) &
- *(tracers(iTracer,k,cell2) - tracers(iTracer,k,cell1)) &
- /dcEdge(iEdge) * boundaryMask(k,iEdge)
+ do iTracer=1,num_tracers * edgeMask(k, iEdge)
+
+ r_tmp1 = dvEdge(iEdge) * h_edge(k,iEdge) * invdcEdge
+
+ r_tmp2 = r_tmp1 * tracers(iTracer,k,cell2)
+ r_tmp1 = r_tmp1 * tracers(iTracer,k,cell1)
+
+ delsq_tracer(iTracer,k,cell1) = delsq_tracer(iTracer,k,cell1) + (r_tmp2 - r_tmp1) * invAreaCell1
+ delsq_tracer(iTracer,k,cell2) = delsq_tracer(iTracer,k,cell2) - (r_tmp2 - r_tmp1) * invAreaCell2
end do
end do
end do
- do iCell = 1,nCells
- r = 1.0 / areaCell(iCell)
- do k=1,maxLevelCell(iCell)
- do iTracer=1,num_tracers
- delsq_tracer(iTracer,k,iCell) = delsq_tracer(iTracer,k,iCell) * r
- end do
- end do
- end do
-
! second del2: div(h </font>
<font color="gray">abla [delsq_tracer]) at cell center
do iEdge=1,grid % nEdges
cell1 = grid % cellsOnEdge % array(1,iEdge)
cell2 = grid % cellsOnEdge % array(2,iEdge)
+
invAreaCell1 = 1.0 / areaCell(cell1)
invAreaCell2 = 1.0 / areaCell(cell2)
+ invdcEdge = 1.0 / dcEdge(iEdge)
+
do k=1,maxLevelEdgeTop(iEdge)
- do iTracer=1,num_tracers
+ do iTracer=1,num_tracers * edgeMask(k,iEdge)
tracer_turb_flux = meshScalingDel4(iEdge) * eddyDiff4 &
- *( delsq_tracer(iTracer,k,cell2) &
- - delsq_tracer(iTracer,k,cell1))/dcEdge(iEdge)
+ * (delsq_tracer(iTracer,k,cell2) - delsq_tracer(iTracer,k,cell1)) &
+ * invdcEdge
+
flux = dvEdge (iEdge) * tracer_turb_flux
- tend(iTracer,k,cell1) = tend(iTracer,k,cell1) &
- - flux * invAreaCell1 * boundaryMask(k,iEdge)
- tend(iTracer,k,cell2) = tend(iTracer,k,cell2) &
- + flux * invAreaCell2 * boundaryMask(k,iEdge)
-
+ tend(iTracer,k,cell1) = tend(iTracer,k,cell1) - flux * invAreaCell1
+ tend(iTracer,k,cell2) = tend(iTracer,k,cell2) + flux * invAreaCell2
enddo
enddo
end do
deallocate(delsq_tracer)
- deallocate(boundaryMask)
- call mpas_timer_stop("compute_scalar_tend-horiz diff 4")
!--------------------------------------------------------------------
end subroutine ocn_tracer_hmix_del4_tend!}}}
@@ -245,10 +232,10 @@
integer, intent(out) :: err !< Output: error flag
err = 0
- Del4on = .false.
+ del4on = .false.
if ( config_h_tracer_eddy_diff4 > 0.0 ) then
- Del4On = .true.
+ del4On = .true.
eddyDiff4 = config_h_tracer_eddy_diff4
endif
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -70,7 +70,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -79,6 +79,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -122,10 +123,15 @@
err = 0
+ ! mrp 120202 efficiency note:
+ ! The following if statement is not needed, since wTop is set to
+ ! zero for isopycnal coordinates. This if statment saves flops
+ ! for isopycnal coordinates. However, if the loops are pushed
+ ! out, we could get rid of this if statement.
if(.not.vadvOn) return
- call ocn_tracer_vadv_stencil_tend(grid, wTop, tracers, tend, err1)
- call ocn_tracer_vadv_spline_tend(grid, wTop, tracers, tend, err2)
+ call ocn_tracer_vadv_stencil_tend(grid, h, wTop, tracers, tend, err1)
+ call ocn_tracer_vadv_spline_tend(grid, h, wTop, tracers, tend, err2)
err = ior(err1, err2)
@@ -166,7 +172,7 @@
err = 0
vadvOn = .false.
- if (config_vert_grid_type.eq.'zlevel') then
+ if (config_vert_grid_type.ne.'isopycnal') then
vadvOn = .true.
call ocn_tracer_vadv_stencil_init(err1)
call ocn_tracer_vadv_spline_init(err2)
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,6 +16,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_timer
use ocn_tracer_vadv_spline2
use ocn_tracer_vadv_spline3
@@ -45,6 +46,7 @@
!
!--------------------------------------------------------------------
+ type (timer_node), pointer :: spline2_timer, spline3_timer
logical :: splineOn
@@ -70,7 +72,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_spline_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_spline_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -79,6 +81,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -124,9 +127,14 @@
if(.not.splineOn) return
- call ocn_tracer_vadv_spline2_tend(grid, wTop, tracers, tend, err1)
- call ocn_tracer_vadv_spline3_tend(grid, wTop, tracers, tend, err2)
+ call mpas_timer_start("spline 2", .false., spline2_timer)
+ call ocn_tracer_vadv_spline2_tend(grid, h, wTop, tracers, tend, err1)
+ call mpas_timer_stop("spline 2", spline2_timer)
+ call mpas_timer_start("spline 3", .false., spline3_timer)
+ call ocn_tracer_vadv_spline3_tend(grid, h, wTop, tracers, tend, err2)
+ call mpas_timer_stop("spline 3", spline3_timer)
+
err = ior(err1, err2)
!--------------------------------------------------------------------
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -64,7 +63,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_spline2_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_spline2_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -73,6 +72,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -108,8 +108,6 @@
integer, dimension(:), pointer :: maxLevelCell
- real (kind=RKIND), dimension(:), pointer :: hRatioZLevelK, hRatioZLevelKm1
-
real (kind=RKIND), dimension(:,:,:), allocatable :: tracerTop
!-----------------------------------------------------------------
@@ -125,28 +123,24 @@
if(.not.spline2On) return
! Compute tracerTop using linear interpolation.
- call mpas_timer_start("compute_scalar_tend-vert adv spline 2")
-
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
nVertLevels = grid % nVertLevels
num_tracers = size(tracers, 1)
maxLevelCell => grid % maxLevelCell % array
- hRatioZLevelK => grid % hRatioZLevelK % array
- hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
-
allocate(tracerTop(num_tracers,nVertLevels+1,nCells))
do iCell=1,nCellsSolve
tracerTop(:,1,iCell) = tracers(:,1,iCell)
do k=2,maxLevelCell(iCell)
do iTracer=1,num_tracers
- ! Note hRatio on the k side is multiplied by tracer at k-1
- ! and hRatio on the Km1 (k-1) side is mult. by tracer at k.
+ ! Note h on the k side is multiplied by tracer at k-1
+ ! and h on the Km1 (k-1) side is mult. by tracer at k.
tracerTop(iTracer,k,iCell) = &
- hRatioZLevelK(k) *tracers(iTracer,k-1,iCell) &
- + hRatioZLevelKm1(k)*tracers(iTracer,k ,iCell)
+ ( h(k ,iCell)*tracers(iTracer,k-1,iCell) &
+ + h(k-1,iCell)*tracers(iTracer,k ,iCell) ) &
+ / (h(k-1,iCell) + h(k,iCell))
end do
end do
tracerTop(:,maxLevelCell(iCell)+1,iCell) = tracers(:,maxLevelCell(iCell),iCell)
@@ -163,8 +157,6 @@
end do
deallocate(tracerTop)
-
- call mpas_timer_stop("compute_scalar_tend-vert adv spline 2")
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_spline2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
use mpas_spline_interpolation
implicit none
@@ -65,7 +64,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_spline3_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_spline3_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -74,6 +73,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -109,11 +109,8 @@
integer, dimension(:), pointer :: maxLevelCell
- real (kind=RKIND), dimension(:), pointer :: hRatioZLevelK, &
- hRatioZLevelKm1, zTopZLevel, zMidZLevel
-
real (kind=RKIND), dimension(:), allocatable :: tracer2ndDer, &
- tracersIn, tracersOut, posZMidZLevel, posZTopZLevel
+ tracersIn, tracersOut, depthTop, depthMid
real (kind=RKIND), dimension(:,:,:), allocatable :: tracerTop
!-----------------------------------------------------------------
@@ -129,34 +126,30 @@
if(.not.spline3On) return
! Compute tracerTop using linear interpolation.
- call mpas_timer_start("compute_scalar_tend-vert adv spline 3")
-
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
nVertLevels = grid % nVertLevels
num_tracers = size(tracers, 1)
maxLevelCell => grid % maxLevelCell % array
- hRatioZLevelK => grid % hRatioZLevelK % array
- hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
- zMidZLevel => grid % zMidZLevel % array
- zTopZLevel => grid % zTopZLevel % array
-
allocate(tracerTop(num_tracers,nVertLevels+1,nCells))
! Compute tracerTop using cubic spline interpolation.
allocate(tracer2ndDer(nVertLevels))
allocate(tracersIn(nVertLevels),tracersOut(nVertLevels), &
- posZMidZLevel(nVertLevels), posZTopZLevel(nVertLevels-1))
+ depthMid(nVertLevels), depthTop(nVertLevels+1))
- ! For the ocean, zlevel coordinates are negative and decreasing,
- ! but spline functions assume increasing, so flip to positive.
+ do iCell=1,nCellsSolve
- posZMidZLevel = -zMidZLevel(1:nVertLevels)
- posZTopZLevel = -zTopZLevel(2:nVertLevels)
+ ! Here depth considers SSH to be depth=0. We don't need to
+ ! have true z-coordinate depths because it is just for interpolation.
+ depthTop(1) = 0.0
+ do k=1,maxLevelCell(iCell)
+ depthMid(k ) = depthTop(k) + 0.5*h(k,iCell)
+ depthTop(k+1) = depthTop(k) + h(k,iCell)
+ enddo
- do iCell=1,nCellsSolve
! mrp 110201 efficiency note: push tracer loop down
! into spline subroutines to improve efficiency
do iTracer=1,num_tracers
@@ -165,12 +158,12 @@
! subroutine call.
tracersIn(1:maxLevelCell(iCell))=tracers(iTracer,1:maxLevelCell(iCell),iCell)
- call mpas_cubic_spline_coefficients(posZMidZLevel, &
+ call mpas_cubic_spline_coefficients(depthMid, &
tracersIn, maxLevelCell(iCell), tracer2ndDer)
call mpas_interpolate_cubic_spline( &
- posZMidZLevel, tracersIn, tracer2ndDer, maxLevelCell(iCell), &
- posZTopZLevel, tracersOut, maxLevelCell(iCell)-1 )
+ depthMid, tracersIn, tracer2ndDer, maxLevelCell(iCell), &
+ depthTop(2:maxLevelCell(iCell)), tracersOut, maxLevelCell(iCell)-1 )
tracerTop(itracer,1,iCell) = tracers(iTracer,1,iCell)
tracerTop(iTracer,2:maxLevelCell(iCell),iCell) = tracersOut(1:maxLevelCell(iCell)-1)
@@ -189,10 +182,8 @@
end do
deallocate(tracer2ndDer)
- deallocate(tracersIn,tracersOut, posZMidZLevel, posZTopZLevel)
+ deallocate(tracersIn,tracersOut, depthMid, depthTop)
deallocate(tracerTop)
-
- call mpas_timer_stop("compute_scalar_tend-vert adv spline 3")
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_spline3_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,6 +16,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_timer
use ocn_tracer_vadv_stencil2
use ocn_tracer_vadv_stencil3
@@ -46,6 +47,8 @@
!
!--------------------------------------------------------------------
+ type (timer_node), pointer :: stencil2_timer, stencil3_timer, stencil4_timer
+
logical :: stencilOn
@@ -71,7 +74,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_stencil_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_stencil_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -80,6 +83,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -125,9 +129,15 @@
if(.not. stencilOn) return
+ call mpas_timer_start("stencil 2", .false., stencil2_timer)
call ocn_tracer_vadv_stencil2_tend(grid, wTop, tracers, tend, err1)
- call ocn_tracer_vadv_stencil3_tend(grid, wTop, tracers, tend, err2)
- call ocn_tracer_vadv_stencil4_tend(grid, wTop, tracers, tend, err3)
+ call mpas_timer_stop("stencil 2", stencil2_timer)
+ call mpas_timer_start("stencil 3", .false., stencil3_timer)
+ call ocn_tracer_vadv_stencil3_tend(grid, h, wTop, tracers, tend, err2)
+ call mpas_timer_stop("stencil 3", stencil3_timer)
+ call mpas_timer_start("stencil 4", .false., stencil4_timer)
+ call ocn_tracer_vadv_stencil4_tend(grid, h, wTop, tracers, tend, err3)
+ call mpas_timer_stop("stencil 4", stencil4_timer)
err = ior(err1, ior(err2, err3))
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -122,11 +121,8 @@
err = 0
- if(.not. stencil2On) return
+ if(.not.stencil2On) return
-
- call mpas_timer_start("compute_scalar_tend-vert adv stencil 2")
-
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
num_tracers = size(tracers, 1)
@@ -160,8 +156,6 @@
end do
deallocate(tracerTop)
- call mpas_timer_stop("compute_scalar_tend-vert adv stencil 2")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_stencil2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -64,7 +63,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_stencil3_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_stencil3_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -73,6 +72,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -110,7 +110,6 @@
integer, dimension(:), pointer :: maxLevelCell
real (kind=RKIND) :: cSignWTop, flux3Coef
- real (kind=RKIND), dimension(:), pointer :: hRatioZLevelK, hRatioZLevelKm1
real (kind=RKIND), dimension(:,:,:), allocatable :: tracerTop
@@ -124,18 +123,14 @@
err = 0
- if(.not. stencil3On) return
+ if(.not.stencil3On) return
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
num_tracers = size(tracers, 1)
nVertLevels = grid % nVertLevels
maxLevelCell => grid % maxLevelCell % array
- hRatioZLevelK => grid % hRatioZLevelK % array
- hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
- call mpas_timer_start("compute_scalar_tend-vert adv stencil 3")
-
allocate(tracerTop(num_tracers,nVertLevels+1,nCells))
! Compute tracerTop using 3rd order stencil. This is the same
@@ -148,9 +143,10 @@
tracerTop(:,1,iCell) = tracers(:,1,iCell)
k=2
do iTracer=1,num_tracers
- tracerTop(iTracer,k,iCell) = &
- hRatioZLevelK(k) *tracers(iTracer,k-1,iCell) &
- + hRatioZLevelKm1(k)*tracers(iTracer,k ,iCell)
+ tracerTop(iTracer,k,iCell) = &
+ ( h(k,iCell)*tracers(iTracer,k-1,iCell) &
+ + h(k-1,iCell)*tracers(iTracer,k ,iCell) ) &
+ / (h(k-1,iCell) + h(k,iCell))
end do
do k=3,maxLevelCell(iCell)-1
cSignWTop = sign(flux3Coef,wTop(k,iCell))
@@ -165,9 +161,10 @@
end do
k=maxLevelCell(iCell)
do iTracer=1,num_tracers
- tracerTop(iTracer,k,iCell) = &
- hRatioZLevelK(k) *tracers(iTracer,k-1,iCell) &
- + hRatioZLevelKm1(k)*tracers(iTracer,k ,iCell)
+ tracerTop(iTracer,k,iCell) = &
+ ( h(k,iCell)*tracers(iTracer,k-1,iCell) &
+ + h(k-1,iCell)*tracers(iTracer,k ,iCell) ) &
+ / (h(k-1,iCell) + h(k,iCell))
end do
tracerTop(:,maxLevelCell(iCell)+1,iCell) = tracers(:,maxLevelCell(iCell),iCell)
end do
@@ -183,8 +180,6 @@
end do
deallocate(tracerTop)
- call mpas_timer_stop("compute_scalar_tend-vert adv stencil 3")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_stencil3_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -64,7 +63,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_tracer_vadv_stencil4_tend(grid, wTop, tracers, tend, err)!{{{
+ subroutine ocn_tracer_vadv_stencil4_tend(grid, h, wTop, tracers, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -73,6 +72,7 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h, & !< Input: layer thickness
wTop !< Input: vertical tracer in top layer
real (kind=RKIND), dimension(:,:,:), intent(in) :: &
@@ -110,7 +110,6 @@
integer, dimension(:), pointer :: maxLevelCell
real (kind=RKIND) :: cSingWTop, flux3Coef
- real (kind=RKIND), dimension(:), pointer :: hRatioZLevelK, hRatioZLevelKm1
real (kind=RKIND), dimension(:,:,:), allocatable :: tracerTop
@@ -124,17 +123,13 @@
err = 0
- if(.not. Stencil4On) return
+ if(.not.Stencil4On) return
- call mpas_timer_start("compute_scalar_tend-vert adv stencil 4")
-
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
num_tracers = size(tracers, 1)
nVertLevels = grid % nVertLevels
maxLevelCell => grid % maxLevelCell % array
- hRatioZLevelK => grid % hRatioZLevelK % array
- hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
allocate(tracerTop(num_tracers,nVertLevels+1,nCells))
@@ -144,9 +139,10 @@
tracerTop(:,1,iCell) = tracers(:,1,iCell)
k=2
do iTracer=1,num_tracers
- tracerTop(iTracer,k,iCell) = &
- hRatioZLevelK(k) *tracers(iTracer,k-1,iCell) &
- + hRatioZLevelKm1(k)*tracers(iTracer,k ,iCell)
+ tracerTop(iTracer,k,iCell) = &
+ ( h(k ,iCell)*tracers(iTracer,k-1,iCell) &
+ + h(k-1,iCell)*tracers(iTracer,k ,iCell) ) &
+ / (h(k-1,iCell) + h(k,iCell))
end do
do k=3,maxLevelCell(iCell)-1
do iTracer=1,num_tracers
@@ -160,9 +156,10 @@
end do
k=maxLevelCell(iCell)
do iTracer=1,num_tracers
- tracerTop(iTracer,k,iCell) = &
- hRatioZLevelK(k) *tracers(iTracer,k-1,iCell) &
- + hRatioZLevelKm1(k)*tracers(iTracer,k ,iCell)
+ tracerTop(iTracer,k,iCell) = &
+ ( h(k ,iCell)*tracers(iTracer,k-1,iCell) &
+ + h(k-1,iCell)*tracers(iTracer,k ,iCell) ) &
+ / (h(k-1,iCell) + h(k,iCell))
end do
tracerTop(:,maxLevelCell(iCell)+1,iCell) = tracers(:,maxLevelCell(iCell),iCell)
end do
@@ -178,8 +175,6 @@
end do
deallocate(tracerTop)
- call mpas_timer_stop("compute_scalar_tend-vert adv stencil 4")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_stencil4_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_coriolis.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -106,13 +106,13 @@
!-----------------------------------------------------------------
integer, dimension(:), pointer :: maxLevelEdgeTop, nEdgesOnEdge
- integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgesOnEdge, edgeMask
real (kind=RKIND), dimension(:,:), pointer :: weightsOnEdge
real (kind=RKIND), dimension(:), pointer :: dcEdge
integer :: j, k
integer :: cell1, cell2, nEdgesSolve, iEdge, eoe
- real (kind=RKIND) :: workpv, q
+ real (kind=RKIND) :: workpv, q, invLength
err = 0
@@ -123,12 +123,16 @@
weightsOnEdge => grid % weightsOnEdge % array
dcEdge => grid % dcEdge % array
+ edgeMask => grid % edgeMask % array
+
nEdgesSolve = grid % nEdgesSolve
do iEdge=1,grid % nEdgesSolve
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+ invLength = 1.0 / dcEdge(iEdgE)
+
do k=1,maxLevelEdgeTop(iEdge)
q = 0.0
@@ -138,9 +142,7 @@
q = q + weightsOnEdge(j,iEdge) * u(k,eoe) * workpv * h_edge(k,eoe)
end do
- tend(k,iEdge) = tend(k,iEdge) &
- + q &
- - ( ke(k,cell2) - ke(k,cell1) ) / dcEdge(iEdge)
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (q - ( ke(k,cell2) - ke(k,cell1) ) * invLength )
end do
end do
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -109,6 +109,7 @@
integer :: iEdge, nEdgesSolve, k
integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer, dimension(:,:), pointer :: edgeMask
!-----------------------------------------------------------------
!
@@ -124,27 +125,18 @@
nEdgesSolve = grid % nEdgesSolve
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ edgeMask => grid % edgeMask % array
do iEdge=1,grid % nEdgesSolve
- k = maxLevelEdgeTop(iEdge)
+ k = max(maxLevelEdgeTop(iEdge), 1)
- ! efficiency note: it would be nice to avoid this
- ! if within a do. This could be done with
- ! k = max(maxLevelEdgeTop(iEdge),1)
- ! and then tend_u(1,iEdge) is just not used for land cells.
+ ! bottom drag is the same as POP:
+ ! -c |u| u where c is unitless and 1.0e-3.
+ ! see POP Reference guide, section 3.4.4.
- if (k>0) then
- ! bottom drag is the same as POP:
- ! -c |u| u where c is unitless and 1.0e-3.
- ! see POP Reference guide, section 3.4.4.
+ tend(k,iEdge) = tend(k,iEdge)-edgeMask(k,iEdge)*(bottomDragCoef*u(k,iEdge)*sqrt(2.0*ke_edge(k,iEdge))/h_edge(k,iEdge))
- tend(k,iEdge) = tend(k,iEdge) &
- -bottomDragCoef*u(k,iEdge) &
- *sqrt(2.0*ke_edge(k,iEdge))/h_edge(k,iEdge)
-
- endif
-
enddo
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -106,8 +106,8 @@
integer :: iEdge, nEdgesSolve, k
integer, dimension(:), pointer :: maxLevelEdgeTop
+ integer, dimension(:,:), pointer :: edgeMask
-
!-----------------------------------------------------------------
!
! call relevant routines for computing tendencies
@@ -122,21 +122,19 @@
nEdgesSolve = grid % nEdgesSolve
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
+ edgeMask => grid % edgeMask % array
do iEdge=1,nEdgesSolve
- k = maxLevelEdgeTop(iEdge)
-
! efficiency note: it would be nice to avoid this
! if within a do. This could be done with
! k = max(maxLevelEdgeTop(iEdge),1)
! and then tend_u(1,iEdge) is just not used for land cells.
- if (k>0) then
+ do k = 1,min(maxLevelEdgeTop(iEdge),1)
! forcing in top layer only
- tend(1,iEdge) = tend(1,iEdge) &
- + u_src(1,iEdge)/rho_ref/h_edge(1,iEdge)
- endif
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * (u_src(k,iEdge)/rho_ref/h_edge(k,iEdge))
+ enddo
enddo
@@ -171,13 +169,11 @@
integer, intent(out) :: err !< Output: error flag
-
windStressOn = .true.
rho_ref = 1000.0
err = 0
-
!--------------------------------------------------------------------
end subroutine ocn_vel_forcing_windstress_init!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -18,6 +18,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_timer
use ocn_vel_hmix_del2
use ocn_vel_hmix_del4
@@ -46,7 +47,9 @@
!
!--------------------------------------------------------------------
+ type (timer_node), pointer :: del2Timer, del4Timer
+
!***********************************************************************
contains
@@ -119,8 +122,12 @@
!
!-----------------------------------------------------------------
+ call mpas_timer_start("del2", .false., del2Timer)
call ocn_vel_hmix_del2_tend(grid, divergence, vorticity, tend, err1)
+ call mpas_timer_stop("del2", del2Timer)
+ call mpas_timer_start("del4", .false., del4Timer)
call ocn_vel_hmix_del4_tend(grid, divergence, vorticity, tend, err2)
+ call mpas_timer_stop("del4", del4Timer)
err = ior(err1, err2)
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -43,8 +42,7 @@
!
!--------------------------------------------------------------------
- logical :: &
- hmixDel2On !< local flag to determine whether del2 chosen
+ logical :: hmixDel2On !< integer flag to determine whether del2 chosen
real (kind=RKIND) :: &
eddyVisc2, &!< base eddy diffusivity for Laplacian
@@ -116,9 +114,9 @@
integer :: iEdge, nEdgesSolve, cell1, cell2, vertex1, vertex2
integer :: k
integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask
- real (kind=RKIND) :: u_diffusion
+ real (kind=RKIND) :: u_diffusion, invLength1, invLength2
real (kind=RKIND), dimension(:), pointer :: meshScalingDel2, &
dcEdge, dvEdge
@@ -132,13 +130,12 @@
if(.not.hmixDel2On) return
- call mpas_timer_start("compute_tend_u-horiz mix-del2")
-
nEdgesSolve = grid % nEdgesSolve
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
verticesOnEdge => grid % verticesOnEdge % array
meshScalingDel2 => grid % meshScalingDel2 % array
+ edgeMask => grid % edgeMask % array
dcEdge => grid % dcEdge % array
dvEdge => grid % dvEdge % array
@@ -148,25 +145,26 @@
vertex1 = verticesOnEdge(1,iEdge)
vertex2 = verticesOnEdge(2,iEdge)
+ invLength1 = 1.0 / dcEdge(iEdge)
+ invLength2 = 1.0 / dvEdge(iEdge)
+
do k=1,maxLevelEdgeTop(iEdge)
! Here -( vorticity(k,vertex2) - vorticity(k,vertex1) ) / dvEdge(iEdge)
! is - </font>
<font color="black">abla vorticity pointing from vertex 2 to vertex 1, or equivalently
! + k \times </font>
<font color="gray">abla vorticity pointing from cell1 to cell2.
- u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) &
+ u_diffusion = ( divergence(k,cell2) - divergence(k,cell1) ) * invLength1 &
-viscVortCoef &
- *( vorticity(k,vertex2) - vorticity(k,vertex1) ) / dvEdge(iEdge)
+ *( vorticity(k,vertex2) - vorticity(k,vertex1) ) * invLength2
u_diffusion = meshScalingDel2(iEdge) * eddyVisc2 * u_diffusion
- tend(k,iEdge) = tend(k,iEdge) + u_diffusion
+ tend(k,iEdge) = tend(k,iEdge) + edgeMask(k, iEdge) * u_diffusion
end do
end do
- call mpas_timer_stop("compute_tend_u-horiz mix-del2")
-
!--------------------------------------------------------------------
end subroutine ocn_vel_hmix_del2_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -16,7 +16,6 @@
use mpas_grid_types
use mpas_configure
- use mpas_timer
implicit none
private
@@ -43,8 +42,7 @@
!
!--------------------------------------------------------------------
- logical :: &
- hmixDel4On !< local flag to determine whether del4 chosen
+ logical :: hmixDel4On !< local flag to determine whether del4 chosen
real (kind=RKIND) :: &
eddyVisc4, &!< base eddy diffusivity for biharmonic
@@ -114,30 +112,30 @@
!
!-----------------------------------------------------------------
- integer :: iEdge, nEdges, cell1, cell2, vertex1, vertex2, k
+ integer :: iEdge, cell1, cell2, vertex1, vertex2, k
integer :: iCell, iVertex
- integer :: nVertices, nVertLevels, nCells
+ integer :: nVertices, nVertLevels, nCells, nEdges, nEdgesSolve
integer, dimension(:), pointer :: maxLevelEdgeTop, maxLevelVertexBot, &
maxLevelCell
- integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, verticesOnEdge, edgeMask
- real (kind=RKIND) :: u_diffusion, r
+ real (kind=RKIND) :: u_diffusion, invAreaCell1, invAreaCell2, invAreaTri1, &
+ invAreaTri2, invDcEdge, invDvEdge, r_tmp, delsq_u
real (kind=RKIND), dimension(:), pointer :: dcEdge, dvEdge, areaTriangle, &
meshScalingDel4, areaCell
real (kind=RKIND), dimension(:,:), allocatable :: delsq_divergence, &
- delsq_u, delsq_circulation, delsq_vorticity
+ delsq_circulation, delsq_vorticity
err = 0
if(.not.hmixDel4On) return
- call mpas_timer_start("compute_tend-horiz mix-del4")
-
nCells = grid % nCells
nEdges = grid % nEdges
+ nEdgesSolve = grid % nEdgessolve
nVertices = grid % nVertices
nVertLevels = grid % nVertLevels
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
@@ -150,102 +148,70 @@
areaTriangle => grid % areaTriangle % array
areaCell => grid % areaCell % array
meshScalingDel4 => grid % meshScalingDel4 % array
+ edgeMask => grid % edgeMask % array
allocate(delsq_divergence(nVertLevels, nCells+1))
- allocate(delsq_u(nVertLevels, nEdges+1))
- allocate(delsq_circulation(nVertLevels, nVertices+1))
allocate(delsq_vorticity(nVertLevels, nVertices+1))
- delsq_u(:,:) = 0.0
- delsq_circulation(:,:) = 0.0
delsq_vorticity(:,:) = 0.0
delsq_divergence(:,:) = 0.0
- ! Compute </font>
<font color="black">abla^2 u = </font>
<font color="black">abla divergence + k \times </font>
<font color="red">abla vorticity
- do iEdge=1,grid % nEdges
+ do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
+
vertex1 = verticesOnEdge(1,iEdge)
vertex2 = verticesOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
+ invAreaTri1 = 1.0 / areaTriangle(vertex1)
+ invAreaTri2 = 1.0 / areaTriangle(vertex2)
- delsq_u(k,iEdge) = &
- ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) &
- -viscVortCoef &
- *( vorticity(k,vertex2) - vorticity(k,vertex1)) / dvEdge(iEdge)
+ invAreaCell1 = 1.0 / areaCell(cell1)
+ invAreaCell2 = 1.0 / areaCell(cell2)
- end do
- end do
+ invDcEdge = 1.0 / dcEdge(iEdge)
+ invDvEdge = 1.0 / dvEdge(iEdge)
- ! vorticity using </font>
<font color="red">abla^2 u
- do iEdge=1,nEdges
- vertex1 = verticesOnEdge(1,iEdge)
- vertex2 = verticesOnEdge(2,iEdge)
do k=1,maxLevelEdgeTop(iEdge)
- delsq_circulation(k,vertex1) = delsq_circulation(k,vertex1) &
- - dcEdge(iEdge) * delsq_u(k,iEdge)
- delsq_circulation(k,vertex2) = delsq_circulation(k,vertex2) &
- + dcEdge(iEdge) * delsq_u(k,iEdge)
- end do
- end do
- do iVertex=1,nVertices
- r = 1.0 / areaTriangle(iVertex)
- do k=1,maxLevelVertexBot(iVertex)
- delsq_vorticity(k,iVertex) = delsq_circulation(k,iVertex) * r
- end do
- end do
+ ! Compute </font>
<font color="black">abla^2 u = </font>
<font color="black">abla divergence + k \times </font>
<font color="red">abla vorticity
+ delsq_u = ( divergence(k,cell2) - divergence(k,cell1) ) * invDcEdge &
+ -viscVortCoef *( vorticity(k,vertex2) - vorticity(k,vertex1)) * invDvEdge
- ! Divergence using </font>
<font color="red">abla^2 u
- do iEdge=1,nEdges
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
- delsq_divergence(k,cell1) = delsq_divergence(k,cell1) &
- + delsq_u(k,iEdge)*dvEdge(iEdge)
- delsq_divergence(k,cell2) = delsq_divergence(k,cell2) &
- - delsq_u(k,iEdge)*dvEdge(iEdge)
+ ! vorticity using </font>
<font color="blue">abla^2 u
+ r_tmp = dcEdge(iEdge) * delsq_u
+ delsq_vorticity(k,vertex1) = delsq_vorticity(k,vertex1) - r_tmp * invAreaTri1
+ delsq_vorticity(k,vertex2) = delsq_vorticity(k,vertex2) + r_tmp * invAreaTri2
+
+ ! Divergence using </font>
<font color="red">abla^2 u
+ r_tmp = dvEdge(iEdge) * delsq_u
+ delsq_divergence(k, cell1) = delsq_divergence(k,cell1) + r_tmp * invAreaCell1
+ delsq_divergence(k, cell2) = delsq_divergence(k,cell2) - r_tmp * invAreaCell2
end do
end do
- do iCell = 1,nCells
- r = 1.0 / areaCell(iCell)
- do k = 1,maxLevelCell(iCell)
- delsq_divergence(k,iCell) = delsq_divergence(k,iCell) * r
- end do
- end do
! Compute - \kappa </font>
<font color="black">abla^4 u
! as </font>
<font color="black">abla div(</font>
<font color="black">abla^2 u) + k \times </font>
<font color="black">abla ( k \cross curl(</font>
<font color="gray">abla^2 u) )
- do iEdge=1,grid % nEdgesSolve
+ do iEdge=1,nEdgesSolve
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
vertex1 = verticesOnEdge(1,iEdge)
vertex2 = verticesOnEdge(2,iEdge)
+ invDcEdge = 1.0 / dcEdge(iEdge)
+ invDvEdge = 1.0 / dvEdge(iEdge)
+ r_tmp = meshScalingDel4(iEdge) * eddyVisc4
+
do k=1,maxLevelEdgeTop(iEdge)
- delsq_u(k,iEdge) = &
- ( divergence(k,cell2) - divergence(k,cell1) ) / dcEdge(iEdge) &
- -( vorticity(k,vertex2) - vorticity(k,vertex1)) / dvEdge(iEdge)
+ u_diffusion = (delsq_divergence(k,cell2) - delsq_divergence(k,cell1)) * invDcEdge &
+ -viscVortCoef * (delsq_vorticity(k,vertex2) - delsq_vorticity(k,vertex1) ) * invDvEdge
- u_diffusion = ( delsq_divergence(k,cell2) &
- - delsq_divergence(k,cell1) ) / dcEdge(iEdge) &
- -viscVortCoef &
- *( delsq_vorticity(k,vertex2) &
- - delsq_vorticity(k,vertex1) ) / dvEdge(iEdge)
-
- u_diffusion = meshScalingDel4(iEdge) * eddyVisc4 * u_diffusion
-
- tend(k,iEdge) = tend(k,iEdge) - u_diffusion
+ tend(k,iEdge) = tend(k,iEdge) - edgeMask(k, iEdge) * u_diffusion * r_tmp
end do
end do
deallocate(delsq_divergence)
- deallocate(delsq_u)
- deallocate(delsq_circulation)
deallocate(delsq_vorticity)
- call mpas_timer_stop("compute_tend-horiz mix-del4")
-
!--------------------------------------------------------------------
end subroutine ocn_vel_hmix_del4_tend!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_pressure_grad.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_pressure_grad.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_pressure_grad.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -17,6 +17,7 @@
use mpas_grid_types
use mpas_configure
+ use mpas_constants
implicit none
private
@@ -43,7 +44,7 @@
!
!--------------------------------------------------------------------
- real (kind=RKIND) :: rho0Inv
+ real (kind=RKIND) :: rho0Inv, grho0Inv
!***********************************************************************
@@ -64,7 +65,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_vel_pressure_grad_tend(grid, pressure, tend, err)!{{{
+ subroutine ocn_vel_pressure_grad_tend(grid, pressure, zMid, rho, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -73,7 +74,9 @@
!-----------------------------------------------------------------
real (kind=RKIND), dimension(:,:), intent(in) :: &
- pressure !< Input: Pressure field or Mongomery potential
+ pressure, & !< Input: Pressure field or Mongomery potential
+ zMid, & !< Input: z-coordinate at mid-depth of layer
+ rho !< Input: density
type (mesh_type), intent(in) :: &
grid !< Input: grid information
@@ -103,9 +106,10 @@
integer :: nEdgesSolve, iEdge, k, cell1, cell2
integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgeMask
real (kind=RKIND), dimension(:), pointer :: dcEdge
+ real (kind=RKIND) :: invdcEdge
err = 0
@@ -113,31 +117,35 @@
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
dcEdge => grid % dcEdge % array
+ edgeMask => grid % edgeMask % array
- if (config_vert_grid_type.eq.'isopycnal') then
- do iEdge=1,nEdgesSolve
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
- tend(k,iEdge) = tend(k,iEdge) &
- - (pressure(k,cell2) - pressure(k,cell1))/dcEdge(iEdge)
- end do
- enddo
- elseif (config_vert_grid_type.eq.'zlevel') then
- do iEdge=1,nEdgesSolve
- cell1 = cellsOnEdge(1,iEdge)
- cell2 = cellsOnEdge(2,iEdge)
- do k=1,maxLevelEdgeTop(iEdge)
+ ! pressure for generalized coordinates
+ ! -1/rho_0 (grad p_k + rho g grad z_k^{mid})
+ ! For pure isopycnal coordinates, we are still using
+ ! grad(M), the gradient of Montgomery Potential, because
+ ! we have set rho0Inv=1 and grho0Inv=0 in the init routine,
+ ! and pressure is passed in as MontPot.
+
+ do iEdge=1,nEdgesSolve
+ cell1 = cellsOnEdge(1,iEdge)
+ cell2 = cellsOnEdge(2,iEdge)
+ invdcEdge = 1.0 / dcEdge(iEdge)
+
+ do k=1,maxLevelEdgeTop(iEdge)
tend(k,iEdge) = tend(k,iEdge) &
- - rho0Inv*( pressure(k,cell2) &
- - pressure(k,cell1) )/dcEdge(iEdge)
- end do
+ - edgeMask(k,iEdge) * rho0Inv*( pressure(k,cell2) &
+ - pressure(k,cell1) )* invdcEdge &
+ - edgeMask(k,iEdge) * grho0Inv* 0.5*(rho(k,cell1)+rho(k,cell2)) &
+ *( zMid(k,cell2) &
+ - zMid(k,cell1) )* invdcEdge
+
+ end do
- enddo
- endif
+ end do
+
!--------------------------------------------------------------------
end subroutine ocn_vel_pressure_grad_tend!}}}
@@ -178,12 +186,16 @@
err = 0
- if (config_vert_grid_type.eq.'isopycnal') then
+ if (config_pressure_type.eq.'MontgomeryPotential') then
rho0Inv = 1.0
- elseif (config_vert_grid_type.eq.'zlevel') then
+ grho0Inv = 0.0
+ else
rho0Inv = 1.0/config_rho0
+ grho0Inv = gravity/config_rho0
end if
+
+
!--------------------------------------------------------------------
end subroutine ocn_vel_pressure_grad_init!}}}
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vel_vadv.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vel_vadv.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vel_vadv.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -64,7 +64,7 @@
!
!-----------------------------------------------------------------------
- subroutine ocn_vel_vadv_tend(grid, u, wTop, tend, err)!{{{
+ subroutine ocn_vel_vadv_tend(grid, u, h_edge, wTop, tend, err)!{{{
!-----------------------------------------------------------------
!
@@ -75,6 +75,7 @@
real (kind=RKIND), dimension(:,:), intent(in) :: &
u !< Input: Horizontal velocity
real (kind=RKIND), dimension(:,:), intent(in) :: &
+ h_edge,&!< Input: thickness at edge
wTop !< Input: Vertical velocity on top layer
type (mesh_type), intent(in) :: &
@@ -106,12 +107,16 @@
integer :: iEdge, nEdgesSolve, cell1, cell2, k
integer :: nVertLevels
integer, dimension(:), pointer :: maxLevelEdgeTop
- integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:,:), pointer :: cellsOnEdge, edgeMask
real (kind=RKIND) :: wTopEdge
real (kind=RKIND), dimension(:), allocatable :: w_dudzTopEdge
- real (kind=RKIND), dimension(:), pointer :: zMidZLevel
+ ! mrp 120202 efficiency note:
+ ! The following if statement is not needed, since wTop is set to
+ ! zero for isopycnal coordinates. This if statment saves flops
+ ! for isopycnal coordinates. However, if the loops are pushed
+ ! out, we could get rid of this if statement.
if(.not.velVadvOn) return
err = 0
@@ -120,7 +125,7 @@
nEdgesSolve = grid % nEdgesSolve
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
cellsOnEdge => grid % cellsOnEdge % array
- zMidZLevel => grid % zMidZLevel % array
+ edgeMask => grid % edgeMask % array
allocate(w_dudzTopEdge(nVertLevels+1))
w_dudzTopEdge = 0.0
@@ -134,14 +139,13 @@
! compute dudz at vertical interface with first order derivative.
w_dudzTopEdge(k) = wTopEdge * (u(k-1,iEdge)-u(k,iEdge)) &
- / (zMidZLevel(k-1) - zMidZLevel(k))
+ / (0.5*(h_edge(k-1,iEdge) + h_edge(k,iEdge)))
end do
w_dudzTopEdge(maxLevelEdgeTop(iEdge)+1) = 0.0
! Average w*du/dz from vertical interface to vertical middle of cell
do k=1,maxLevelEdgeTop(iEdge)
- tend(k,iEdge) = tend(k,iEdge) &
- - 0.5 * (w_dudzTopEdge(k) + w_dudzTopEdge(k+1))
+ tend(k,iEdge) = tend(k,iEdge) - edgeMask(k, iEdge) * 0.5 * (w_dudzTopEdge(k) + w_dudzTopEdge(k+1))
enddo
enddo
deallocate(w_dudzTopEdge)
@@ -179,7 +183,7 @@
err = 0
velVadvOn = .false.
- if (config_vert_grid_type.eq.'zlevel') then
+ if (config_vert_grid_type.ne.'isopycnal') then
velVadvOn = .true.
end if
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -199,8 +199,6 @@
if(implicitOn) return
- call mpas_timer_start("compute_tend_u-explicit vert mix")
-
nEdgessolve = grid % nEdgesSolve
nVertLevels = grid % nVertLevels
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
@@ -223,9 +221,6 @@
end do
deallocate(fluxVertTop)
-
- call mpas_timer_stop("compute_tend_u-explicit vert mix")
-
!--------------------------------------------------------------------
end subroutine ocn_vel_vmix_tend_explicit!}}}
@@ -319,7 +314,6 @@
! mrp 110315 efficiency note: for z-level, could precompute
! -2.0*dt/(h(k)_h(k+1))/h(k) in setup
! h_edge is computed in compute_solve_diag, and is not available yet.
- ! This could be removed if hZLevel used instead.
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
do k=1,maxLevelEdgeTop(iEdge)
@@ -347,6 +341,8 @@
end if
end do
+ deallocate(A,C,uTemp)
+
!--------------------------------------------------------------------
end subroutine ocn_vel_vmix_tend_implicit!}}}
@@ -418,8 +414,6 @@
if(implicitOn) return
- call mpas_timer_start("compute_scalar_tend-explicit vert diff")
-
nCellsSolve = grid % nCellsSolve
nVertLevels = grid % nVertLevels
num_tracers = size(tracers, dim=1)
@@ -454,9 +448,6 @@
!print '(a,50e12.2)', 'tend_tr ',tend_tr(3,1,1:maxLevelCell(iCell))
enddo ! iCell loop
deallocate(fluxVertTop)
-
- call mpas_timer_stop("compute_scalar_tend-explicit vert diff")
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_vmix_tend_explicit!}}}
@@ -558,9 +549,9 @@
tracers(:,1:maxLevelCell(iCell),iCell) = tracersTemp(:,1:maxLevelCell(iCell))
tracers(:,maxLevelCell(iCell)+1:nVertLevels,iCell) = -1e34
end do
+
deallocate(A,C,tracersTemp)
-
!--------------------------------------------------------------------
end subroutine ocn_tracer_vmix_tend_implicit!}}}
@@ -602,7 +593,7 @@
if(config_implicit_vertical_mix) then
explicitOn = .false.
- implicitOn =.true.
+ implicitOn = .true.
end if
call ocn_vmix_coefs_const_init(err1)
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -26,6 +26,8 @@
private
save
+ type (timer_node), pointer :: richEOSTimer
+
!--------------------------------------------------------------------
!
! Public parameters
@@ -139,8 +141,10 @@
rhoDisplaced => s % rhoDisplaced % array
tracers => s % tracers % array
+ call mpas_timer_start("eos rich", .false., richEOSTimer)
call ocn_equation_of_state_rho(s, grid, 0, 'relative', err)
call ocn_equation_of_state_rho(s, grid, 1, 'relative', err)
+ call mpas_timer_stop("eos rich", richEOSTimer)
call ocn_vmix_get_rich_numbers(grid, indexT, indexS, u, h, h_edge, &
rho, rhoDisplaced, tracers, RiTopOfEdge, RiTopOfCell, err1)
@@ -244,8 +248,6 @@
else
! for Ri<0 and explicit vertical mix,
! use maximum diffusion allowed by CFL criterion
- ! mrp 110324 efficiency note: for z-level, could use fixed
- ! grid array hMeanTopZLevel and compute maxdiff on startup.
vertViscTopOfEdge(k,iEdge) = &
((h_edge(k-1,iEdge)+h_edge(k,iEdge))/2.0)**2/config_dt/4.0
end if
@@ -353,8 +355,6 @@
else
! for Ri<0 and explicit vertical mix,
! use maximum diffusion allowed by CFL criterion
- ! mrp 110324 efficiency note: for z-level, could use fixed
- ! grid array hMeanTopZLevel and compute maxdiff on startup.
vertDiffTopOfCell(k,iCell) = &
((h(k-1,iCell)+h(k,iCell))/2.0)**2/config_dt/4.0
end if
@@ -440,7 +440,7 @@
err = 0
- if(.not.richViscOn .and. .not.richDiffOn) return
+ if((.not.richViscOn) .and. (.not.richDiffOn)) return
nVertLevels = grid % nVertLevels
nCells = grid % nCells
@@ -455,8 +455,8 @@
areaCell => grid % areaCell % array
allocate( &
- drhoTopOfCell(nVertLevels+1,nCells+1), drhoTopOfEdge(nVertLevels+1,nEdges+1), &
- du2TopOfCell(nVertLevels+1,nCells+1), du2TopOfEdge(nVertLevels+1,nEdges+1))
+ drhoTopOfCell(nVertLevels+1,nCells+1), drhoTopOfEdge(nVertLevels+1,nEdges), &
+ du2TopOfCell(nVertLevels+1,nCells+1), du2TopOfEdge(nVertLevels+1,nEdges))
! compute density of parcel displaced to next deeper z-level,
! in state % rhoDisplaced
Modified: branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F
===================================================================
--- branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2012-02-27 22:31:30 UTC (rev 1537)
+++ branches/atmos_physics/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2012-02-27 22:33:23 UTC (rev 1538)
@@ -177,18 +177,22 @@
integer :: k, nVertLevels
- real (kind=RKIND), dimension(:), pointer :: zTopZLevel
+ real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
err = 0
if(.not.tanhViscOn) return
nVertLevels = grid % nVertLevels
- zTopZLevel => grid % zTopZLevel % array
+ referenceBottomDepth => grid % referenceBottomDepth % array
- do k=1,nVertLevels+1
- vertViscTopOfEdge(k,:) = -(config_max_visc_tanh-config_min_visc_tanh)/2.0 &
- *tanh(-(zTopZLevel(k)-config_ZMid_tanh) &
+ ! referenceBottomDepth is used here for simplicity. Using zMid and h, which
+ ! vary in time, would give the exact location of the top, but it
+ ! would only change the diffusion value very slightly.
+ vertViscTopOfEdge = 0.0
+ do k=2,nVertLevels
+ vertViscTopOfEdge(k,:) = -(config_max_visc_tanh-config_min_visc_tanh)/2.0 &
+ *tanh((referenceBottomDepth(k-1)+config_ZMid_tanh) &
/config_zWidth_tanh) &
+ (config_max_visc_tanh+config_min_visc_tanh)/2
end do
@@ -246,18 +250,22 @@
integer :: k, nVertLevels
- real (kind=RKIND), dimension(:), pointer :: zTopZLevel
+ real (kind=RKIND), dimension(:), pointer :: referenceBottomDepth
err = 0
if(.not.tanhDiffOn) return
nVertLevels = grid % nVertLevels
- zTopZLevel => grid % zTopZLevel % array
+ referenceBottomDepth => grid % referenceBottomDepth % array
- do k=1,nVertLevels+1
+ ! referenceBottomDepth is used here for simplicity. Using zMid and h, which
+ ! vary in time, would give the exact location of the top, but it
+ ! would only change the diffusion value very slightly.
+ vertDiffTopOfCell = 0.0
+ do k=2,nVertLevels
vertDiffTopOfCell(k,:) = -(config_max_diff_tanh-config_min_diff_tanh)/2.0 &
- *tanh(-(zTopZLevel(k)-config_ZMid_tanh) &
+ *tanh((referenceBottomDepth(k-1)+config_ZMid_tanh) &
/config_zWidth_tanh) &
+ (config_max_diff_tanh+config_min_diff_tanh)/2
end do
@@ -308,14 +316,6 @@
tanhDiffOn = .true.
endif
- if(tanhViscOn .or. tanhDiffOn) then
- if (config_vert_grid_type.ne.'zlevel') then
- write(0,*) 'Abort: config_vert_diff_type.eq.tanh may only', &
- ' use config_vert_grid_type of zlevel at this time'
- err = 1
- endif
- endif
-
!--------------------------------------------------------------------
end subroutine ocn_vmix_coefs_tanh_init!}}}
</font>
</pre>