<p><b>xylar@lanl.gov</b> 2010-02-09 15:05:34 -0700 (Tue, 09 Feb 2010)</p><p>Added the following:<br>
<br>
1. In the global diagnostics, the min, max, sum, average, and column-summed min and max of:<br>
h, u, v, h_edge, circulation, vorticity, ke, pv_edge, pv_vertex, <br>
pv_cell, gradPVn, gradPVt, zmid, zbot, pmid, pbot, MontPot<br>
<br>
These are written out in to files in a manner essentially identical to what Mark added in<br>
the last update. CFLNumberGlobal was added as the last column of the time.txt output<br>
<br>
in module_global_diagnostics.F<br>
modified:<br>
subroutine computeGlobalDiagnostics<br>
added:<br>
function getFreeUnit<br>
subroutine computeFieldLocalStats<br>
subroutine computeFieldAreaWeightedLocalStats<br>
subroutine computeFieldThicknessWeightedLocalStats<br>
subroutine computeFieldVolumeWeightedLocalStats<br>
subroutine computeGlobalVertSumHorizMin<br>
subroutine computeGlobalVertSumHorizMax<br>
subroutine computeGlobalVertThicknessWeightedSumHorizMin<br>
subroutine computeGlobalVertThicknessWeightedSumHorizMax<br>
<br>
in module_dmapr.F<br>
added:<br>
subroutine dmpar_sum_int_array<br>
subroutine dmpar_min_int_array<br>
subroutine dmpar_max_int_array<br>
subroutine dmpar_sum_real_array<br>
subroutine dmpar_min_real_array<br>
subroutine dmpar_max_real_array<br>
compute global reductions of arrays of reals or ints for efficiency<br>
<br>
modified:<br>
subroutine sw_solve in module_sw_solver.F<br>
removed calls to write_stats, added calls to computeGlobalDiagnostics<br>
subroutine timestep in module_time_integration.F<br>
removed calls to computeGlobalDiagnostics<br>
<br>
2. Removed calls to vector reconstruction initialization and computation because of LAPACK troubles<br>
on coyote. Todd will fix this soon by integrating the appropriate LAPACK routines into the code<br>
<br>
modified:<br>
subroutine rk4 in module_time_integration.F<br>
subroutine init_reconstruct in module_vector_reconstruction.F<br>
subroutine sw_solve in module_sw_solver.F<br>
<br>
3. Removed some redundant diagnostic variables from the Registry<br>
<br>
4. Modified the Makefle to be similar to the swmodel branch. Now different make commands<br>
will build the code for different systems without the need to comment in/out lines<br>
<br>
Note: to build for coyote, use<br>
make coyote<br>
or<br>
make coyote_serial<br>
</p><hr noshade><pre><font color="gray">Modified: branches/ocean/Makefile
===================================================================
--- branches/ocean/Makefile        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/Makefile        2010-02-09 22:05:34 UTC (rev 116)
@@ -1,78 +1,138 @@
-#MODEL_FORMULATION = -DNCAR_FORMULATION
-MODEL_FORMULATION = -DLANL_FORMULATION
-
-#EXPAND_LEVELS = -DEXPAND_LEVELS=32
-
-# IBM with Xlf compilers
-#FC = mpxlf90
-#CC = mpcc
-#SFC = xlf90
-#SCC = xlc
-#FFLAGS = -qrealsize=8 -g -C
-#CFLAGS = -g
-#LDFLAGS = -g -C
-
-#FC = mpif90
-#CC = mpicc
-#SFC = pgf90
-#SCC = pgcc
-#FFLAGS = -r8 -O3
-#CFLAGS = -O3
-#LDFLAGS = -O3
-
-#ifort (for Intel Macs and Quad-Core AMD Opteron Clusters)
-FC = mpif90
-CC = gcc
-SFC = ifort
-SCC = gcc
-FFLAGS = -real-size 64 -O3
-CFLAGS = -O3 -m64
-LDFLAGS = -O3
-
-CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -DUNDERSCORE -DOFFSET64BIT
-
-CPPINCLUDES = -I../inc -I$(NETCDF)/include
-FCINCLUDES = -I../inc -I$(NETCDF)/include
-LIBS = -L$(NETCDF)/lib -lnetcdf -llapack
-
-RM = rm -f
-CPP = cpp -C -P -traditional
-RANLIB = ranlib
-
-#########################
-# Section for Zoltan TPL
-#########################
-ifdef ZOLTAN_HOME
- ifdef ZOLTAN_INCLUDE
- FCINCLUDES += -I$(ZOLTAN_INCLUDE)
- else
- FCINCLUDES += -I$(ZOLTAN_HOME)/include
- endif
-
- ifdef ZOLTAN_LIBPATH
- LIBS += -L$(ZOLTAN_LIBPATH) -lzoltan
- else
- LIBS += -L$(ZOLTAN_HOME)/lib -lzoltan
- endif
-endif
-#########################
-
-all: reg_includes externals swmodel_main
-
-reg_includes:
-        cd Registry; make CC="$(SCC)"
-        cd Registry; ./parse Registry; mv *.inc ../inc
-
-externals:
-        cd external; make FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" RANLIB="$(RANLIB)" NETCDF="$(NETCDF)"
-
-swmodel_main:
-        cd src; make FC="$(FC)" CC="$(CC)" CFLAGS="$(CFLAGS)" FFLAGS="$(FFLAGS)" LDFLAGS="$(LDFLAGS)" RM="$(RM)" CPP="$(CPP)" CPPFLAGS="$(CPPFLAGS)" LIBS="$(LIBS)" CPPINCLUDES="$(CPPINCLUDES)" FCINCLUDES="$(FCINCLUDES)"
-        if [ ! -e swmodel ]; then ln -s src/swmodel .; fi
-
-clean:
-        cd Registry; make clean
-        cd external; make clean
-        cd src; make clean RM="$(RM)"
-        $(RM) inc/*.inc
-        $(RM) swmodel
+#MODEL_FORMULATION = -DNCAR_FORMULATION
+MODEL_FORMULATION = -DLANL_FORMULATION
+
+#EXPAND_LEVELS = -DEXPAND_LEVELS=32
+FILE_OFFSET = -DOFFSET64BIT
+
+#########################
+# Section for Zoltan TPL
+#########################
+ifdef ZOLTAN_HOME
+ ZOLTAN_DEFINE = -DHAVE_ZOLTAN
+endif
+#########################
+
+dummy:
+        @( echo "try one of:"; \
+        echo " make xlf"; \
+        echo " make pgi"; \
+        echo " make ifort"; \
+        echo " make gfortran"; \
+        echo " make coyote"; \
+        echo " make coyote_serial"; \
+        )
+
+xlf:
+        ( make all \
+        "FC = mpxlf90" \
+        "CC = mpcc" \
+        "SFC = xlf90" \
+        "SCC = xlc" \
+        "FFLAGS = -qrealsize=8 -g -C " \
+        "CFLAGS = -g" \
+        "LDFLAGS = -g -C" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -D_MPI $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+
+pgi:
+        ( make all \
+        "FC = mpif90" \
+        "CC = mpicc" \
+        "SFC = pgf90" \
+        "SCC = pgcc" \
+        "FFLAGS = -r8 -O3" \
+        "CFLAGS = -O3" \
+        "LDFLAGS = -O3" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -D_MPI -DUNDERSCORE $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+
+ifort:
+        ( make all \
+        "FC = mpif90" \
+        "CC = gcc" \
+        "SFC = ifort" \
+        "SCC = gcc" \
+        "FFLAGS = -real-size 64 -O3" \
+        "CFLAGS = -O3 -m64" \
+        "LDFLAGS = -O3 -L$MKLPATH -I$MKLINCLUDE -I$MKLPATH -lmkl_lapack95_lp64 -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+
+gfortran:
+        ( make all \
+        "FC = mpif90" \
+        "CC = mpicc" \
+        "SFC = gfortran" \
+        "SCC = gcc" \
+        "FFLAGS = -O3 -m64 -ffree-line-length-none" \
+        "CFLAGS = -O3 -m64" \
+        "LDFLAGS = -O3 -m64" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -D_MPI -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+
+
+coyote:
+        ( make all \
+        "FC = mpif90" \
+        "CC = gcc" \
+        "SFC = ifort" \
+        "SCC = gcc" \
+        "FFLAGS = -real-size 64 -O3" \
+        "CFLAGS = -O3 -m64" \
+        "LDFLAGS = -O3" \
+        "NETCDF = =/usr/projects/climate/bzhao/netcdf-3.6.1" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE) -D_MPI" )
+
+coyote_serial:
+        ( make all \
+        "FC = mpif90" \
+        "CC = gcc" \
+        "SFC = ifort" \
+        "SCC = gcc" \
+        "FFLAGS = -real-size 64 -O3" \
+        "CFLAGS = -O3 -m64" \
+        "LDFLAGS = -O3" \
+        "NETCDF = =/usr/projects/climate/bzhao/netcdf-3.6.1" \
+        "CPPFLAGS = -DRKIND=8 $(MODEL_FORMULATION) $(EXPAND_LEVELS) -DUNDERSCORE -m64 $(FILE_OFFSET) $(ZOLTAN_DEFINE)" )
+
+CPPINCLUDES = -I../inc -I$(NETCDF)/include
+FCINCLUDES = -I../inc -I$(NETCDF)/include
+LIBS = -L$(NETCDF)/lib -lnetcdf
+
+RM = rm -f
+CPP = cpp -C -P -traditional
+RANLIB = ranlib
+
+#########################
+# Section for Zoltan TPL
+#########################
+ifdef ZOLTAN_HOME
+ ifdef ZOLTAN_INC_PATH
+ FCINCLUDES += -I$(ZOLTAN_INC_PATH)
+ else
+ FCINCLUDES += -I$(ZOLTAN_HOME)/include
+ endif
+
+ ifdef ZOLTAN_LIB_PATH
+ LIBS += -L$(ZOLTAN_LIB_PATH) -lzoltan
+ else
+ LIBS += -L$(ZOLTAN_HOME)/lib -lzoltan
+ endif
+endif
+#########################
+
+all: reg_includes externals swmodel_main
+
+reg_includes:
+        cd Registry; make CC="$(SCC)"
+        cd Registry; ./parse Registry; mv *.inc ../inc
+
+externals:
+        cd external; make FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" RANLIB="$(RANLIB)" NETCDF="$(NETCDF)"
+
+swmodel_main:
+        cd src; make FC="$(FC)" CC="$(CC)" CFLAGS="$(CFLAGS)" FFLAGS="$(FFLAGS)" LDFLAGS="$(LDFLAGS)" RM="$(RM)" CPP="$(CPP)" CPPFLAGS="$(CPPFLAGS)" LIBS="$(LIBS)" CPPINCLUDES="$(CPPINCLUDES)" FCINCLUDES="$(FCINCLUDES)"
+        if [ ! -e swmodel ]; then ln -s src/swmodel .; fi
+
+clean:
+        cd Registry; make clean
+        cd external; make clean
+        cd src; make clean RM="$(RM)"
+        $(RM) inc/*.inc
+        $(RM) swmodel
Modified: branches/ocean/Registry/Registry
===================================================================
--- branches/ocean/Registry/Registry        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/Registry/Registry        2010-02-09 22:05:34 UTC (rev 116)
@@ -129,15 +129,12 @@
# xsad 10-02-05:
# Globally reduced diagnostic variables: only written to output
-var real cellAreaGlobal ( Time ) o cellAreaGlobal - -
-var real edgeAreaGlobal ( Time ) o edgeAreaGlobal - -
-var real triangleAreaGlobal ( Time ) o triangleAreaGlobal - -
+var real areaCellGlobal ( Time ) o areaCellGlobal - -
+var real areaEdgeGlobal ( Time ) o areaEdgeGlobal - -
+var real areaTriangleGlobal ( Time ) o areaTriangleGlobal - -
-var real cellVolumeGlobal ( Time ) o cellVolumeGlobal - -
-var real keGlobal ( Time ) o keGlobal - -
-var real averageLayerThicknessGlobal ( Time ) o averageLayerThicknessGlobal - -
-var real maximumVelocityGlobal ( Time ) o maximumVelocityGlobal - -
-var real minimumLayerThicknessGlobal ( Time ) o minimumLayerThicknessGlobal - -
+var real volumeCellGlobal ( Time ) o volumeCellGlobal - -
+var real volumeEdgeGlobal ( Time ) o volumeEdgeGlobal - -
var real CFLNumberGlobal ( Time ) o CFLNumberGlobal - -
# xsad 10-02-05 end
Modified: branches/ocean/src/module_dmpar.F
===================================================================
--- branches/ocean/src/module_dmpar.F        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/src/module_dmpar.F        2010-02-09 22:05:34 UTC (rev 116)
@@ -336,7 +336,125 @@
!xsad 10-02-01 end
+ !xsad 10-02-08:
+ subroutine dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGER, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_sum_int_array
+
+ subroutine dmpar_min_int_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGER, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_min_int_array
+
+ subroutine dmpar_max_int_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGER, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_max_int_array
+
+ subroutine dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_sum_real_array
+
+ subroutine dmpar_min_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_min_real_array
+
+ subroutine dmpar_max_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine dmpar_max_real_array
+
+ !xsad 10-02-08 end
+
+
+
subroutine dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
implicit none
Modified: branches/ocean/src/module_global_diagnostics.F
===================================================================
--- branches/ocean/src/module_global_diagnostics.F        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/src/module_global_diagnostics.F        2010-02-09 22:05:34 UTC (rev 116)
@@ -11,26 +11,43 @@
contains
- subroutine computeGlobalDiagnostics(dminfo, state, grid, dt)
+ subroutine computeGlobalDiagnostics(dminfo, state, grid, timeIndex, dt)
+ ! Note: this routine assumes that there is only one block per processor. No looping
+ ! is preformed over blocks.
+ ! dminfo is the domain info needed for global communication
+ ! state contains the state variables needed to compute global diagnostics
+ ! grid conains the meta data about the grid
+ ! timeIndex is the current time step counter
+ ! dt is the duration of each time step
+
+ ! Sums of variables at vertices are not weighted by thickness (since h is not known at
+ ! vertices as it is at cell centers and at edges).
+
implicit none
type (dm_info), intent(in) :: dminfo
type (grid_state), intent(inout) :: state
type (grid_meta), intent(in) :: grid
+ integer, intent(in) :: timeIndex
real (kind=RKIND), intent(in) :: dt
- type (block_type), pointer :: block
- integer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve
- real (kind=RKIND) :: cellAreaGlobal, edgeAreaGlobal, triangleAreaGlobal
- real (kind=RKIND), dimension(:), pointer :: areaCell, dcEdge, dvEdge, areaTriangle
- real (kind=RKIND), dimension(:,:), pointer :: h, ke, u
- real (kind=RKIND) :: cellVolumeGlobal, keGlobal, averageLayerThicknessGlobal, maximumVelocityGlobal, &
- minimumLayerThicknessGlobal, CFLNumberGlobal
+ integer :: nVertLevels, nCellsSolve, nEdgesSolve, nVerticesSolve, nCellsGlobal, nEdgesGlobal, nVerticesGlobal
+ real (kind=RKIND) :: areaCellGlobal, areaEdgeGlobal, areaTriangleGlobal
+ real (kind=RKIND), dimension(:), pointer :: areaCell, dcEdge, dvEdge, areaTriangle, areaEdge
+ real (kind=RKIND), dimension(:,:), pointer :: h, u, v, h_edge, circulation, vorticity, ke, pv_edge, pv_vertex, &
+ pv_cell, gradPVn, gradPVt, zmid, zbot, pmid, pbot, MontPot
+ real (kind=RKIND) :: volumeCellGlobal, volumeEdgeGlobal, CFLNumberGlobal
real (kind=RKIND) :: localCFL, localSum
- integer :: elementIndex
+ integer :: elementIndex, variableIndex, nVariables, nSums, nMaxes, nMins
integer :: timeLevel
+ integer, parameter :: kMaxVariables = 1024 ! this must be a little more than double the number of variables to be reduced
+
+ real (kind=RKIND), dimension(kMaxVariables) :: sums, mins, maxes, averages, verticalSumMins, verticalSumMaxes, reductions
+
+ integer :: fileID
+
nVertLevels = grid % nVertLevels
nCellsSolve = grid % nCellsSolve
nEdgesSolve = grid % nEdgesSolve
@@ -40,54 +57,407 @@
dcEdge => grid % dcEdge % array
dvEdge => grid % dvEdge % array
areaTriangle => grid % areaTriangle % array
+ allocate(areaEdge(1:nEdgesSolve))
+ areaEdge = dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve)
h => state % h % array
+ u => state % u % array
+ v => state % v % array
+ h_edge => state % h_edge % array
+ circulation => state % circulation % array
+ vorticity => state % vorticity % array
ke => state % ke % array
- u => state % u % array
+ pv_edge => state % pv_edge % array
+ pv_vertex => state % pv_vertex % array
+ pv_cell => state % pv_cell % array
+ gradPVn => state % gradPVn % array
+ gradPVt => state % gradPVt % array
+ zmid => state % zmid % array
+ zbot => state % zbot % array
+ pmid => state % pmid % array
+ pbot => state % pbot % array
+ MontPot => state % MontPot % array
- localSum = sum(areaCell(1:nCellsSolve))
- call dmpar_sum_real(dminfo, localSum, cellAreaGlobal)
+ variableIndex = 1
+ ! h
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- localSum = sum(dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve))
- call dmpar_sum_real(dminfo, localSum, edgeAreaGlobal)
+ ! u
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ u(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- localSum = sum(areaTriangle(1:nVerticesSolve))
- call dmpar_sum_real(dminfo, localSum, triangleAreaGlobal)
+ ! v
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ v(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- call computeAreaWeightedGlobalSum(dminfo, nVertLevels, nCellsSolve, &
- areaCell(1:nCellsSolve), h(:,1:nCellsSolve), cellVolumeGlobal)
+ ! h_edge
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- averageLayerThicknessGlobal = cellVolumeGlobal/(cellAreaGlobal*nVertLevels)
+ ! circulation
+ call computeFieldLocalStats(dminfo, nVertLevels, nVerticesSolve, circulation(:,1:nVerticesSolve), &
+ sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- call computeVolumeWeightedGlobalSum(dminfo, nVertLevels, nCellsSolve, &
- areaCell(1:nCellsSolve), h(:,1:nCellsSolve), ke(:,1:nCellsSolve), keGlobal)
+ ! vorticity
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
+ vorticity(:,1:nVerticesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), &
+ verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- call computeGlobalMax(dminfo, nVertLevels, nEdgesSolve, &
- u(:,1:nEdgesSolve), maximumVelocityGlobal)
+ ! ke
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ ke(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
- call computeGlobalMin(dminfo, nVertLevels, nCellsSolve, &
- h(:,1:nEdgesSolve), minimumLayerThicknessGlobal)
+ ! pv_edge
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ pv_edge(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+ ! pv_vertex
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nVerticesSolve, areaTriangle(1:nVerticesSolve), &
+ pv_vertex(:,1:nVerticesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), &
+ verticalSumMins(variableIndex), verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! pv_cell
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ pv_cell(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! gradPVn
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ gradPVn(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! gradPVt
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nEdgesSolve, areaEdge(1:nEdgesSolve), h_edge(:,1:nEdgesSolve), &
+ gradPVt(:,1:nEdgesSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! zmid
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), &
+ zmid(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! zbot
+ call computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), &
+ zbot(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! pmid
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ pmid(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! pbot
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ pbot(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+ variableIndex = variableIndex + 1
+
+ ! MontPot
+ call computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nCellsSolve, areaCell(1:nCellsSolve), h(:,1:nCellsSolve), &
+ MontPot(:,1:nCellsSolve), sums(variableIndex), mins(variableIndex), maxes(variableIndex), verticalSumMins(variableIndex), &
+ verticalSumMaxes(variableIndex))
+
+ nVariables = variableIndex
+ nSums = nVariables
+ nMins = nVariables
+ nMaxes = nVariables
+
+ nSums = nSums + 1
+ sums(nSums) = sum(areaCell(1:nCellsSolve))
+
+ nSums = nSums + 1
+ sums(nSums) = sum(dcEdge(1:nEdgesSolve)*dvEdge(1:nEdgesSolve))
+
+ nSums = nSums + 1
+ sums(nSums) = sum(areaTriangle(1:nVerticesSolve))
+
+ nSums = nSums + 1
+ sums(nSums) = nCellsSolve
+
+ nSums = nSums + 1
+ sums(nSums) = nEdgesSolve
+
+ nSums = nSums + 1
+ sums(nSums) = nVerticesSolve
+
localCFL = 0.0
do elementIndex = 1,nEdgesSolve
localCFL = max(localCFL, maxval(dt*u(:,elementIndex)/dcEdge(elementIndex)))
end do
- call dmpar_max_real(dminfo, localCFL, CFLNumberGlobal)
+ nMaxes = nMaxes + 1
+ maxes(nMaxes) = localCFL
+ mins(nMins+1:nMins+nVariables) = verticalSumMins(1:nVariables)
+ nMins = nMins + nVariables
+ maxes(nMaxes+1:nMaxes+nVariables) = verticalSumMaxes(1:nVariables)
+ nMaxes = nMaxes + nVariables
- state % cellAreaGlobal % scalar = cellVolumeGlobal
- state % edgeAreaGlobal % scalar = edgeAreaGlobal
- state % triangleAreaGlobal % scalar = triangleAreaGlobal
+ ! global reduction of the 5 arrays (packed into 3 to minimize global communication)
+ call dmpar_sum_real_array(dminfo, nSums, sums(1:nSums), reductions(1:nSums))
+ sums(1:nVariables) = reductions(1:nVariables)
+ areaCellGlobal = reductions(nVariables+1)
+ areaEdgeGlobal = reductions(nVariables+2)
+ areaTriangleGlobal = reductions(nVariables+3)
+ nCellsGlobal = int(reductions(nVariables+4))
+ nEdgesGlobal = int(reductions(nVariables+5))
+ nVerticesGlobal = int(reductions(nVariables+6))
- state % cellVolumeGlobal % scalar = cellVolumeGlobal
- state % keGlobal % scalar = keGlobal
- state % averageLayerThicknessGlobal % scalar = averageLayerThicknessGlobal
- state % maximumVelocityGlobal % scalar = maximumVelocityGlobal
- state % minimumLayerThicknessGlobal % scalar = minimumLayerThicknessGlobal
+ call dmpar_min_real_array(dminfo, nMins, mins(1:nMins), reductions(1:nMins))
+ mins(1:nVariables) = reductions(1:nVariables)
+ verticalSumMins(1:nVariables) = reductions(nMins-nVariables+1:nMins)
+
+ call dmpar_max_real_array(dminfo, nMaxes, maxes(1:nMaxes), reductions(1:nMaxes))
+ maxes(1:nVariables) = reductions(1:nVariables)
+ CFLNumberGlobal = reductions(nVariables+1)
+ verticalSumMaxes(1:nVariables) = reductions(nMaxes-nVariables+1:nMaxes)
+
+ volumeCellGlobal = sums(1)
+ volumeEdgeGlobal = sums(4)
+ ! compute the averages (slightly different depending on how the sum was computed)
+ variableIndex = 1
+ ! h
+ averages(variableIndex) = sums(variableIndex)/(areaCellGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! u
+ averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
+ variableIndex = variableIndex + 1
+
+ ! v
+ averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
+ variableIndex = variableIndex + 1
+
+ ! h_edge
+ averages(variableIndex) = sums(variableIndex)/(areaEdgeGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! circulation
+ averages(variableIndex) = sums(variableIndex)/(nVerticesGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! vorticity
+ averages(variableIndex) = sums(variableIndex)/(areaTriangleGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! ke
+ averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
+ variableIndex = variableIndex + 1
+
+ ! pv_edge
+ averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
+ variableIndex = variableIndex + 1
+
+ ! pv_vertex
+ averages(variableIndex) = sums(variableIndex)/(areaTriangleGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! pv_cell
+ averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
+ variableIndex = variableIndex + 1
+
+ ! gradPVn
+ averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
+ variableIndex = variableIndex + 1
+
+ ! gradPVt
+ averages(variableIndex) = sums(variableIndex)/volumeEdgeGlobal
+ variableIndex = variableIndex + 1
+
+ ! zmid
+ averages(variableIndex) = sums(variableIndex)/(areaCellGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! zbot
+ averages(variableIndex) = sums(variableIndex)/(areaCellGlobal*nVertLevels)
+ variableIndex = variableIndex + 1
+
+ ! pmid
+ averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
+ variableIndex = variableIndex + 1
+
+ ! pbot
+ averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
+ variableIndex = variableIndex + 1
+
+ ! MontPot
+ averages(variableIndex) = sums(variableIndex)/volumeCellGlobal
+
+ ! write out the data to files
+ if (dminfo % my_proc_id == IO_NODE) then
+ fileID = getFreeUnit()
+ open(fileID,file='stats/min.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') mins(1:nVariables)
+ close (fileID)
+ open(fileID,file='stats/max.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') maxes(1:nVariables)
+ close (fileID)
+ open(fileID,file='stats/sum.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') sums(1:nVariables)
+ close (fileID)
+ open(fileID,file='stats/avg.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') averages(1:nVariables)
+ close (fileID)
+ open(fileID,file='stats/time.txt',ACCESS='append')
+ write (fileID,'(i5,100es24.16)') timeIndex, &
+ state % xtime % scalar, dt, &
+ CFLNumberGlobal
+ close (fileID)
+ open(fileID,file='stats/colmin.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') verticalSumMins(1:nVariables)
+ close (fileID)
+ open(fileID,file='stats/colmax.txt',ACCESS='append')
+ write (fileID,'(100es24.16)') verticalSumMaxes(1:nVariables)
+ close (fileID)
+ end if
+
+ state % areaCellGlobal % scalar = areaCellGlobal
+ state % areaEdgeGlobal % scalar = areaEdgeGlobal
+ state % areaTriangleGlobal % scalar = areaTriangleGlobal
+
+ state % volumeCellGlobal % scalar = volumeCellGlobal
+ state % volumeEdgeGlobal % scalar = volumeEdgeGlobal
state % CFLNumberGlobal % scalar = CFLNumberGlobal
+ deallocate(areaEdge)
end subroutine computeGlobalDiagnostics
+ integer function getFreeUnit()
+ implicit none
+
+ integer :: index
+ logical :: isOpened
+
+ getFreeUnit = 0
+ do index = 1,99
+ if((index /= 5) .and. (index /= 6)) then
+ inquire(unit = index, opened = isOpened)
+ if( .not. isOpened) then
+ getFreeUnit = index
+ return
+ end if
+ end if
+ end do
+ end function getFreeUnit
+
+ subroutine computeFieldLocalStats(dminfo, nVertLevels, nElements, field, localSum, localMin, localMax, localVertSumMin, &
+ localVertSumMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, &
+ localVertSumMax
+
+ localSum = sum(field)
+ localMin = minval(field)
+ localMax = maxval(field)
+ localVertSumMin = minval(sum(field,1))
+ localVertSumMax = maxval(sum(field,1))
+
+ end subroutine computeFieldLocalStats
+
+ subroutine computeFieldAreaWeightedLocalStats(dminfo, nVertLevels, nElements, areas, field, localSum, localMin, &
+ localMax, localVertSumMin, localVertSumMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nElements), intent(in) :: areas
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, &
+ localVertSumMax
+
+ integer :: elementIndex
+
+ localSum = 0.0
+ do elementIndex = 1, nElements
+ localSum = localSum + areas(elementIndex) * sum(field(:,elementIndex))
+ end do
+
+ localMin = minval(field)
+ localMax = maxval(field)
+ localVertSumMin = minval(sum(field,1))
+ localVertSumMax = maxval(sum(field,1))
+
+ end subroutine computeFieldAreaWeightedLocalStats
+
+ subroutine computeFieldThicknessWeightedLocalStats(dminfo, nVertLevels, nElements, h, field, &
+ localSum, localMin, localMax, localVertSumMin, localVertSumMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, &
+ localVertSumMax
+
+ real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField
+
+ integer :: elementIndex
+
+ localSum = sum(h*field)
+ localMin = minval(field)
+ localMax = maxval(field)
+ localVertSumMin = minval(sum(h*field,1))
+ localVertSumMax = maxval(sum(h*field,1))
+
+ end subroutine computeFieldThicknessWeightedLocalStats
+
+ subroutine computeFieldVolumeWeightedLocalStats(dminfo, nVertLevels, nElements, areas, h, field, &
+ localSum, localMin, localMax, localVertSumMin, localVertSumMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nElements), intent(in) :: areas
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: localSum, localMin, localMax, localVertSumMin, &
+ localVertSumMax
+
+ real (kind=RKIND), dimension(nVertLevels, nElements) :: hTimesField
+
+ integer :: elementIndex
+
+ localSum = 0.0
+ do elementIndex = 1, nElements
+ localSum = localSum + areas(elementIndex) * sum(h(:,elementIndex)*field(:,elementIndex))
+ end do
+
+ localMin = minval(field)
+ localMax = maxval(field)
+ localVertSumMin = minval(sum(h*field,1))
+ localVertSumMax = maxval(sum(h*field,1))
+
+ end subroutine computeFieldVolumeWeightedLocalStats
+
+
subroutine computeGlobalSum(dminfo, nVertLevels, nElements, field, globalSum)
implicit none
@@ -177,4 +547,68 @@
end subroutine computeGlobalMax
+ subroutine computeGlobalVertSumHorizMin(dminfo, nVertLevels, nElements, field, globalMin)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: globalMin
+
+ real (kind=RKIND) :: localMin
+
+ localMin = minval(sum(field,1))
+ call dmpar_min_real(dminfo, localMin, globalMin)
+
+ end subroutine computeGlobalVertSumHorizMin
+
+ subroutine computeGlobalVertSumHorizMax(dminfo, nVertLevels, nElements, field, globalMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: field
+ real (kind=RKIND), intent(out) :: globalMax
+
+ real (kind=RKIND) :: localMax
+
+ localMax = maxval(sum(field,1))
+ call dmpar_max_real(dminfo, localMax, globalMax)
+
+ end subroutine computeGlobalVertSumHorizMax
+
+ subroutine computeGlobalVertThicknessWeightedSumHorizMin(dminfo, nVertLevels, nElements, h, field, globalMin)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h, field
+ real (kind=RKIND), intent(out) :: globalMin
+
+ real (kind=RKIND) :: localMin
+
+ localMin = minval(sum(h*field,1))
+ call dmpar_min_real(dminfo, localMin, globalMin)
+
+ end subroutine computeGlobalVertThicknessWeightedSumHorizMin
+
+ subroutine computeGlobalVertThicknessWeightedSumHorizMax(dminfo, nVertLevels, nElements, h, field, globalMax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nVertLevels, nElements
+ real (kind=RKIND), dimension(nVertLevels, nElements), intent(in) :: h, field
+ real (kind=RKIND), intent(out) :: globalMax
+
+ real (kind=RKIND) :: localMax
+
+ localMax = maxval(sum(h*field,1))
+ call dmpar_max_real(dminfo, localMax, globalMax)
+
+ end subroutine computeGlobalVertThicknessWeightedSumHorizMax
+
end module global_diagnostics
Modified: branches/ocean/src/module_sw_solver.F
===================================================================
--- branches/ocean/src/module_sw_solver.F        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/src/module_sw_solver.F        2010-02-09 22:05:34 UTC (rev 116)
@@ -6,7 +6,12 @@
use configure
use dmpar
use timer
+ use global_diagnostics
+ ! xsad 10-02-09:
+ use vector_reconstruction
+ ! xsad 10-02-09 end
+
integer :: output_frame
integer :: restart_frame
@@ -44,6 +49,10 @@
block_ptr => domain % blocklist
do while (associated(block_ptr))
call compute_solve_diagnostics(dt,block_ptr % time_levs(1) % state, block_ptr % mesh)
+ ! xsad 10-02-09:
+ ! commenting this out until we incorporate the necessary lapack routines into mpas
+ ! call init_reconstruct(block_ptr % mesh)
+ ! xsad 10-02-09 end
if (.not. config_do_restart) block_ptr % time_levs(1) % state % xtime % scalar = 0.0
block_ptr => block_ptr % next
end do
@@ -52,8 +61,19 @@
output_frame = 1
restart_frame = 1
! mrp 100120:
- call write_stats(domain, 0, dt)
- ! mrp 100120 end
+ !call write_stats(domain, 0, dt)
+ ! mrp 100120 end
+ ! xsad 10-02-08:
+ block_ptr => domain % blocklist
+ if(associated(block_ptr % next)) then
+ write(0,*) 'Error: computeGlobalDiagnostics assumes that there is only one block per processor.'
+ call dmpar_abort(domain % dminfo)
+ end if
+
+ call timer_start("global diagnostics")
+ call computeGlobalDiagnostics(domain % dminfo, block_ptr % time_levs(1) % state, block_ptr % mesh, 0, dt)
+ call timer_stop("global diagnostics")
+ ! xsad 10-02-08 end
call output_state_init(output_obj, domain, "OUTPUT")
call write_output_frame(output_obj, domain)
@@ -71,7 +91,19 @@
! mrp 100120:
if (mod(itimestep, config_stats_interval) == 0) then
- call write_stats(domain, itimestep, dt)
+ ! xsad 10-02-08:
+ !call write_stats(domain, itimestep, dt)
+ block_ptr => domain % blocklist
+ if(associated(block_ptr % next)) then
+ write(0,*) 'Error: computeGlobalDiagnostics assumes that there is only one block per processor.'
+ call dmpar_abort(domain % dminfo)
+ end if
+
+ call timer_start("global diagnostics")
+ call computeGlobalDiagnostics(domain % dminfo, block_ptr % time_levs(1) % state, block_ptr % mesh, &
+ itimestep, dt)
+ call timer_stop("global diagnostics")
+ ! xsad 10-02-08 end
end if
! mrp 100120 end
Modified: branches/ocean/src/module_time_integration.F
===================================================================
--- branches/ocean/src/module_time_integration.F        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/src/module_time_integration.F        2010-02-09 22:05:34 UTC (rev 116)
@@ -5,7 +5,6 @@
use constants
use dmpar
! xsad 10-02-05:
- use global_diagnostics
use vector_reconstruction
! xsad 10-02-05 end
@@ -217,12 +216,11 @@
call compute_solve_diagnostics(dt, block % time_levs(2) % state, block % mesh)
- call reconstruct(block % time_levs(2) % state, block % mesh)
+ ! xsad 10-02-09:
+ ! commenting this out until we incorporate the necessary lapack routines into mpas
+ !call reconstruct(block % time_levs(2) % state, block % mesh)
+ ! xsad 10-02-09 end
- ! xsad 10-02-05:
- call computeGlobalDiagnostics(domain % dminfo, block % time_levs(2) % state, block % mesh, dt)
- ! xsad 10-02-05 end
-
block => block % next
end do
Modified: branches/ocean/src/module_vector_reconstruction.F
===================================================================
--- branches/ocean/src/module_vector_reconstruction.F        2010-02-08 17:44:38 UTC (rev 115)
+++ branches/ocean/src/module_vector_reconstruction.F        2010-02-09 22:05:34 UTC (rev 116)
@@ -127,21 +127,24 @@
enddo
! invert matrix_reconstruct using lapack routines
- allocate(mwork(npoints,npoints))
- lwork = npoints*npoints
- mwork(:,:) = matrix_reconstruct(1:npoints,1:npoints,iCell)
- call dgetrf(npoints, npoints, mwork, npoints, ipvt, info)
- if(info.ne.0) then
- write(6,*) info, 'dgetrf'
- stop
- endif
- call dgetri(npoints, mwork, npoints, ipvt, work, lwork, info)
- matrix_reconstruct(1:npoints,1:npoints,iCell) = mwork(:,:)
- if(info.ne.0) then
- write(6,*) info, 'dgetri'
- stop
- endif
- deallocate(mwork)
+ ! xsad 10-02-09:
+ !!!!! the lapack stuff isn't working on coyote in parallel !!!!!!!
+ !allocate(mwork(npoints,npoints))
+ !lwork = npoints*npoints
+ !mwork(:,:) = matrix_reconstruct(1:npoints,1:npoints,iCell)
+ !call dgetrf(npoints, npoints, mwork, npoints, ipvt, info)
+ !if(info.ne.0) then
+ !write(6,*) info, 'dgetrf'
+ !stop
+ !endif
+ !call dgetri(npoints, mwork, npoints, ipvt, work, lwork, info)
+ !matrix_reconstruct(1:npoints,1:npoints,iCell) = mwork(:,:)
+ !if(info.ne.0) then
+ !write(6,*) info, 'dgetri'
+ !stop
+ !endif
+ !deallocate(mwork)
+ ! xsad 10-02-09 end
! compute the coefficients for reconstructing uX, uY, uZ at cell centers from u_i normal to edges
! uX = sum_j(coeffs(1,j) * u_j) (similarly for Y,Z)
@@ -217,7 +220,7 @@
! allocate space for temporary arrays
EdgeMax = maxval(nEdgesOnCell)
- allocate(c(nVertLevels,EdgeMax))
+! allocate(c(nVertLevels,EdgeMax))
allocate(rhs(nVertLevels,EdgeMax))
! init the intent(out)
@@ -228,15 +231,6 @@
! loop over cell centers
do iCell=1,nCellsSolve
-! t2(1) = xCell(iCell)
-! t2(2) = yCell(iCell)
-! t2(3) = zCell(iCell)
-! t3(1) = 0
-! t3(2) = 0
-! t3(3) = 1.0
-! call cross_product_in_R3(t3(:), t2(:), east(:))
-! call unit_vector_in_R3(east(:))
-
npoints = nEdgesOnCell(iCell)
! fill the RHS of the matrix equation
@@ -247,19 +241,18 @@
iEdge = edgesOnCell(j,iCell)
do k=1,nVertLevels
rhs(k,j) = u(k,iEdge)
- ! call get_dotproduct(normal(:,j,iCell),east(:),rhs(k,j))
enddo
enddo
! compute c by multiplying matrix_reconstruct * rhs (Eq 8 in Tex document)
- c = 0.0
- do i=1,npoints
- do j=1,npoints
- do k=1,nVertLevels
- c(k,i) = c(k,i) + matrix_reconstruct(i,j,iCell)*rhs(k,j)
- enddo
- enddo
- enddo
+! c = 0.0
+! do i=1,npoints
+! do j=1,npoints
+! do k=1,nVertLevels
+! c(k,i) = c(k,i) + matrix_reconstruct(i,j,iCell)*rhs(k,j)
+! enddo
+! enddo
+! enddo
! reconstruct the velocity field at point X1 (Eq 6 in Tex document)
! do i=1,npoints
@@ -270,44 +263,21 @@
! enddo
! enddo
!
-! t1(1) = uReconstructX(1,iCell)
-! t1(2) = uReconstructY(1,iCell)
-! t1(3) = uReconstructZ(1,iCell)
-! call get_dotproduct(east, t1, p)
-! write(6,10) iCell, p
-! write(6,10) iCell, t1(:)
-! write(6,10) iCell, east(:)
-! write(6,*)
-! 10 format(i6, 3e15.5)
-
- ! reconstruct the velocity field at point X1 (Eq 6 in Tex document)
-! uReconstructX(:,iCell) = 0.0
-! uReconstructY(:,iCell) = 0.0
-! uReconstructZ(:,iCell) = 0.0
+ ! a more efficient reconstruction where rbf_values*matrix_reconstruct has been precomputed
+ ! in coeffs_reconstruct
do i=1,npoints
uReconstructX(:,iCell) = uReconstructX(:,iCell) + coeffs_reconstruct(1,i,iCell) * rhs(:,i)
uReconstructY(:,iCell) = uReconstructY(:,iCell) + coeffs_reconstruct(2,i,iCell) * rhs(:,i)
uReconstructZ(:,iCell) = uReconstructZ(:,iCell) + coeffs_reconstruct(3,i,iCell) * rhs(:,i)
enddo
-! t1(1) = uReconstructX(1,iCell)
-! t1(2) = uReconstructY(1,iCell)
-! t1(3) = uReconstructZ(1,iCell)
-! call get_dotproduct(east, t1, p)
-! write(6,10) iCell, p
-! write(6,10) iCell, t1(:)
-! write(6,10) iCell, east(:)
-! write(6,*)
-
enddo ! iCell
! deallocate
deallocate(rhs)
- deallocate(c)
+! deallocate(c)
-! stop
-
end subroutine reconstruct
subroutine get_distance(x1,x2,xout)
</font>
</pre>