<p><b>duda</b> 2013-03-08 17:48:01 -0700 (Fri, 08 Mar 2013)</p><p>BRANCH COMMIT<br>
<br>
Merge recent developments in the infrastructure from the trunk to the atmos_physics branch.<br>
<br>
<br>
M src/registry/gen_inc.c<br>
M src/framework/mpas_timer.F<br>
M src/framework/mpas_configure.F<br>
M src/framework/mpas_block_decomp.F<br>
M src/framework/mpas_io_input.F<br>
M src/framework/mpas_io_output.F<br>
M src/framework/mpas_framework.F<br>
M src/framework/mpas_timekeeping.F<br>
M src/framework/mpas_dmpar.F<br>
M src/framework/mpas_io_streams.F<br>
M src/framework/mpas_io.F<br>
M src/framework/mpas_grid_types.F<br>
M src/Makefile<br>
</p><hr noshade><pre><font color="gray">Modified: branches/atmos_physics/src/Makefile
===================================================================
--- branches/atmos_physics/src/Makefile        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/Makefile        2013-03-09 00:48:01 UTC (rev 2578)
@@ -1,34 +1,45 @@
.SUFFIXES: .F .c .o
+ifeq "$(CESM)" "true"
+
+ifeq "$(CORE)" "ocean"
+include Makefile.in.CESM_OCN
+endif
+
+else
+
all: mpas
+
mpas: reg_includes externals frame ops dycore drver
        $(LINKER) $(LDFLAGS) -o $(CORE)_model.exe driver/*.o -L. -ldycore -lops -lframework $(LIBS) -I./external/esmf_time_f90 -L./external/esmf_time_f90 -lesmf_time
+externals: reg_includes
+        ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" )
+
+drver: reg_includes externals frame ops dycore
+        ( cd driver; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all )
+endif
+
reg_includes:
        ( cd registry; $(MAKE) CC="$(SCC)" )
        ( cd inc; $(CPP) ../core_$(CORE)/Registry | ../registry/parse > Registry.processed)
-externals: reg_includes
-        ( cd external; $(MAKE) FC="$(FC)" SFC="$(SFC)" CC="$(CC)" SCC="$(SCC)" FFLAGS="$(FFLAGS)" CFLAGS="$(CFLAGS)" CPP="$(CPP)" NETCDF="$(NETCDF)" CORE="$(CORE)" )
-
frame: reg_includes externals
-        ( cd framework; $(MAKE) all )
+        ( cd framework; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all )
        ln -sf framework/libframework.a libframework.a
ops: reg_includes externals frame
-        ( cd operators; $(MAKE) all )
+        ( cd operators; $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all )
        ln -sf operators/libops.a libops.a
dycore: reg_includes externals frame ops
-        ( cd core_$(CORE); $(MAKE) all )
+        ( cd core_$(CORE); $(MAKE) CPPFLAGS="$(CPPFLAGS)" CPPINCLUDES="$(CPPINCLUDES)" all )
        ln -sf core_$(CORE)/libdycore.a libdycore.a
-drver: reg_includes externals frame ops dycore
-        ( cd driver; $(MAKE) all )
clean:
-        $(RM) $(CORE)_model.exe libframework.a libops.a libdycore.a
+        $(RM) $(CORE)_model.exe libframework.a libops.a libdycore.a lib$(CORE).a *.o
        ( cd registry; $(MAKE) clean )
        ( cd external; $(MAKE) clean )
        ( cd framework; $(MAKE) clean )
Modified: branches/atmos_physics/src/framework/mpas_block_decomp.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_block_decomp.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_block_decomp.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -165,7 +165,7 @@
allocate(block_id(blocks_per_proc))
allocate(block_start(blocks_per_proc))
allocate(block_count(blocks_per_proc))
-
+
do i = 1, blocks_per_proc
block_start = 0
block_count = 0
@@ -436,11 +436,11 @@
end if
else
blocks_per_proc = 0
- do i = 1, total_blocks
+ do i = 0, total_blocks-1
call mpas_get_owning_proc(dminfo, i, owning_proc)
if(owning_proc == proc_number) then
call mpas_get_local_block_id(dminfo, i, local_block_id)
- blocks_per_proc = max(blocks_per_proc, local_block_id)
+ blocks_per_proc = max(blocks_per_proc, local_block_id+1)
end if
end do
end if
Modified: branches/atmos_physics/src/framework/mpas_configure.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_configure.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_configure.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -7,11 +7,12 @@
contains
- subroutine mpas_read_namelist(dminfo)
+ subroutine mpas_read_namelist(dminfo, nml_filename)
implicit none
type (dm_info), intent(in) :: dminfo
+ character (len=*), optional :: nml_filename
integer :: funit, ierr
@@ -23,8 +24,13 @@
#include "config_set_defaults.inc"
if (dminfo % my_proc_id == IO_NODE) then
- write(0,*) 'Reading namelist.input'
- open(funit,file='namelist.input',status='old',form='formatted')
+ if (present(nml_filename)) then
+ write(0,*) 'Reading ', trim(nml_filename)
+ open(funit,file=trim(nml_filename),status='old',form='formatted')
+ else
+ write(0,*) 'Reading namelist.input'
+ open(funit,file='namelist.input',status='old',form='formatted')
+ end if
#include "config_namelist_reads.inc"
close(funit)
Modified: branches/atmos_physics/src/framework/mpas_dmpar.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_dmpar.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_dmpar.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -25,6 +25,8 @@
module procedure mpas_dmpar_alltoall_field1d_real
module procedure mpas_dmpar_alltoall_field2d_real
module procedure mpas_dmpar_alltoall_field3d_real
+ module procedure mpas_dmpar_alltoall_field4d_real
+ module procedure mpas_dmpar_alltoall_field5d_real
end interface
private :: mpas_dmpar_alltoall_field1d_integer
@@ -32,6 +34,8 @@
private :: mpas_dmpar_alltoall_field1d_real
private :: mpas_dmpar_alltoall_field2d_real
private :: mpas_dmpar_alltoall_field3d_real
+ private :: mpas_dmpar_alltoall_field4d_real
+ private :: mpas_dmpar_alltoall_field5d_real
interface mpas_dmpar_exch_halo_field
@@ -41,6 +45,8 @@
module procedure mpas_dmpar_exch_halo_field1d_real
module procedure mpas_dmpar_exch_halo_field2d_real
module procedure mpas_dmpar_exch_halo_field3d_real
+ module procedure mpas_dmpar_exch_halo_field4d_real
+ module procedure mpas_dmpar_exch_halo_field5d_real
end interface
private :: mpas_dmpar_exch_halo_field1d_integer
@@ -49,6 +55,8 @@
private :: mpas_dmpar_exch_halo_field1d_real
private :: mpas_dmpar_exch_halo_field2d_real
private :: mpas_dmpar_exch_halo_field3d_real
+ private :: mpas_dmpar_exch_halo_field4d_real
+ private :: mpas_dmpar_exch_halo_field5d_real
interface mpas_dmpar_copy_field
module procedure mpas_dmpar_copy_field1d_integer
@@ -57,6 +65,8 @@
module procedure mpas_dmpar_copy_field1d_real
module procedure mpas_dmpar_copy_field2d_real
module procedure mpas_dmpar_copy_field3d_real
+ module procedure mpas_dmpar_copy_field4d_real
+ module procedure mpas_dmpar_copy_field5d_real
end interface
private :: mpas_dmpar_copy_field1d_integer
@@ -65,6 +75,8 @@
private :: mpas_dmpar_copy_field1d_real
private :: mpas_dmpar_copy_field2d_real
private :: mpas_dmpar_copy_field3d_real
+ private :: mpas_dmpar_copy_field4d_real
+ private :: mpas_dmpar_copy_field5d_real
contains
@@ -98,7 +110,9 @@
write(0,'(a,i5,a,i5,a)') 'task ', mpi_rank, ' of ', mpi_size, &
' is running'
+#ifndef MPAS_CESM
call open_streams(dminfo % my_proc_id)
+#endif
dminfo % info = MPI_INFO_NULL
#else
@@ -2808,7 +2822,610 @@
end subroutine mpas_dmpar_alltoall_field3d_real!}}}
+ subroutine mpas_dmpar_alltoall_field4d_real(fieldIn, fieldout, haloLayersIn)!{{{
+ implicit none
+
+ type (field4dReal), pointer :: fieldIn
+ type (field4dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
+
+ type (field4dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k, l
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
+#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
+
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
+
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(3)
+ do k = 1, fieldInPtr % dimSizes(2)
+ do l = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) &
+ + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) &
+ + (k-1) * fieldInPtr % dimSizes(1) + l + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(l, k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(3)
+ do k = 1, fieldOutPtr % dimSizes(2)
+ do l = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) &
+ + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) &
+ + (k-1) * fieldOutPtr % dimSizes(1) + l + bufferOffset
+ fieldOutPtr % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_alltoall_field4d_real!}}}
+
+ subroutine mpas_dmpar_alltoall_field5d_real(fieldIn, fieldout, haloLayersIn)!{{{
+
+ implicit none
+
+ type (field5dReal), pointer :: fieldIn
+ type (field5dReal), pointer :: fieldOut
+ integer, dimension(:), pointer, optional :: haloLayersIn
+
+ type (field5dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (mpas_exchange_list), pointer :: exchListPtr, exchListPtr2
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ type (dm_info), pointer :: dminfo
+
+ logical :: comm_list_found
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: nAdded, bufferOffset
+ integer :: mpi_ierr
+ integer :: iHalo, iBuffer, i, j, k, l, m
+ integer :: nHaloLayers
+ integer, dimension(:), pointer :: haloLayers
+
+ dminfo => fieldIn % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(fieldIn % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
+#ifdef _MPI
+ nullify(sendList)
+ nullify(recvList)
+
+ ! Setup recieve lists.
+ do iHalo = 1, nHaloLayers
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(recvList)) then
+ allocate(recvList)
+ nullify(recvList % next)
+ commListPtr => recvList
+ else
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ end do
+
+ ! Determine size of receive list buffers.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = nAdded
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate buffers for recieves, and initiate mpi_irecv calls.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_realKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Setup send lists, and determine the size of their buffers.
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ ! Search for an already created commList to this processor.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointID) then
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4)
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! If no comm list exists, create a new one.
+ if(.not. comm_list_found) then
+ if(.not.associated(sendList)) then
+ allocate(sendList)
+ nullify(sendList % next)
+ commListPtr => sendList
+ else
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ end if
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldInPtr % dimSizes(4)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+ ! Allocate sendLists, copy data into buffer, and initiate mpi_isends
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldInPtr % dimSizes(4)
+ do k = 1, fieldInPtr % dimSizes(3)
+ do l = 1, fieldInPtr % dimSizes(2)
+ do m = 1, fieldInPtr % dimSizes(1)
+ iBuffer = (exchListPtr % destList(i)-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) * fieldInPtr % dimSizes(4) &
+ + (j-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) * fieldInPtr % dimSizes(3) &
+ + (k-1) * fieldInPtr % dimSizes(1) * fieldInPtr % dimSizes(2) &
+ + (l-1) * fieldInPtr % dimSizes(1) + m + bufferOffset
+ commListPtr % rbuffer(iBuffer) = fieldInPtr % array(m, l, k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldInPtr => fieldInPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nlist, MPI_realKIND, &
+ commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+#endif
+
+ ! Handle Local Copies. Only local copies if no MPI
+ do iHalo = 1, nHaloLayers
+ fieldInPtr => fieldIn
+ do while(associated(fieldInPtr))
+ exchListPtr => fieldInPtr % copyList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ if(exchListPtr % endPointID == fieldOutPtr % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldOutPtr % array(:, :, :, :, exchListPtr % destList(i)) = fieldInPtr % array(:, :, :, :, exchListPtr % srcList(i))
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+ end do
+
+#ifdef _MPI
+ ! Wait for MPI_Irecv's to finish, and unpack data.
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldOutPtr => fieldOut
+ do while(associated(fieldOutPtr))
+ exchListPtr => fieldOutPtr % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldOutPtr % dimSizes(4)
+ do k = 1, fieldOutPtr % dimSizes(3)
+ do l = 1, fieldOutPtr % dimSizes(2)
+ do m = 1, fieldOutPtr % dimSizes(1)
+ iBuffer = (exchListPtr % srcList(i)-1) * fieldOutPtr % dimSizes(4) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(1) &
+ + (j-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) &
+ + (k-1) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) &
+ + (l-1) * fieldOutPtr % dimSizes(1) + m + bufferOffset
+ fieldOutPtr % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer(iBuffer)
+ end do
+ end do
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldOutPtr % dimSizes(1) * fieldOutPtr % dimSizes(2) * fieldOutPtr % dimSizes(3) * fieldOutPtr % dimSizes(4))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Wait for MPI_Isend's to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_alltoall_field5d_real!}}}
+
+
+
subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayersIn)!{{{
implicit none
@@ -4505,6 +5122,602 @@
end subroutine mpas_dmpar_exch_halo_field3d_real!}}}
+ subroutine mpas_dmpar_exch_halo_field4d_real(field, haloLayersIn)!{{{
+
+ implicit none
+
+ type (field4dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
+
+ type (dm_info), pointer :: dminfo
+ type (field4dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k, l
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+
+ logical :: comm_list_found
+
+ do i = 1, 4
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
+ dminfo = field % block % domain % dminfo
+
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
+
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(3)
+ do k = 1, fieldCursor % dimSizes(2)
+ do l = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &
+ + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (k-1) * fieldCursor % dimSizes(1) + l + bufferOffset) &
+ = fieldCursor % array(l, k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+#endif
+
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(3)
+ do k = 1, fieldCursor % dimSizes(2)
+ do l = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)&
+ *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3)&
+ + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (k-1)*fieldCursor % dimSizes(1) + l + bufferOffset)
+ end do
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_exch_halo_field4d_real!}}}
+
+ subroutine mpas_dmpar_exch_halo_field5d_real(field, haloLayersIn)!{{{
+
+ implicit none
+
+ type (field5dReal), pointer :: field
+ integer, dimension(:), intent(in), optional :: haloLayersIn
+
+ type (dm_info), pointer :: dminfo
+ type (field5dReal), pointer :: fieldCursor, fieldCursor2
+ type (mpas_exchange_list), pointer :: exchListPtr
+ type (mpas_communication_list), pointer :: sendList, recvList, commListPtr, commListPtr2
+ integer :: mpi_ierr
+ integer :: nHaloLayers, iHalo, i, j, k, l, m
+ integer :: bufferOffset, nAdded
+ integer, dimension(:), pointer :: haloLayers
+
+ logical :: comm_list_found
+
+ do i = 1, 5
+ if(field % dimSizes(i) <= 0) then
+ return
+ end if
+ end do
+
+ dminfo => field % block % domain % dminfo
+
+ if(present(haloLayersIn)) then
+ nHaloLayers = size(haloLayersIn)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = haloLayersIn(iHalo)
+ end do
+ else
+ nHaloLayers = size(field % sendList % halos)
+ allocate(haloLayers(nHaloLayers))
+ do iHalo = 1, nHaloLayers
+ haloLayers(iHalo) = iHalo
+ end do
+ end if
+
+#ifdef _MPI
+ ! Allocate communication lists, and setup dead header nodes
+ allocate(sendList)
+ nullify(sendList % next)
+ sendList % procID = -1
+ sendList % nList = 0
+
+ allocate(recvList)
+ nullify(recvList % next)
+ recvList % procID = -1
+ recvList % nList = 0
+
+ dminfo = field % block % domain % dminfo
+
+ ! Determine size of buffers for communication lists
+ fieldCursor => field
+ do while(associated(fieldCursor))
+
+ ! Need to aggregate across halo layers
+ do iHalo = 1, nHaloLayers
+
+ ! Determine size from send lists
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ commListPtr % nList = commListPtr % nList + exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => sendList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ commListPtr % nList = exchListPtr % nList * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ ! Setup recv lists
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ comm_list_found = .false.
+
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ if(commListPtr % procID == exchListPtr % endPointId) then
+ comm_list_found = .true.
+ exit
+ end if
+
+ commListPtr => commListPtr % next
+ end do
+
+ if(.not. comm_list_found) then
+ commListPtr => recvList
+ commListPtr2 => commListPtr % next
+ do while(associated(commListPtr2))
+ commListPtr => commListPtr % next
+ commListPtr2 => commListPtr % next
+ end do
+
+ allocate(commListPtr % next)
+ commListPtr => commListPtr % next
+ nullify(commListPtr % next)
+ commListPtr % procID = exchListPtr % endPointID
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+ ! Remove the dead head pointer on send and recv list
+ commListPtr => sendList
+ sendList => sendList % next
+ deallocate(commListPtr)
+
+ commListPtr => recvList
+ recvList => recvList % next
+ deallocate(commListPtr)
+
+ ! Determine size of recv lists
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr % nList = bufferOffset
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in recv lists, and initiate mpi_irecv calls
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ call MPI_Irecv(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, commListPtr % procID, dminfo % comm, commListPtr % reqID, mpi_ierr)
+
+ commListPtr => commListPtr % next
+ end do
+
+ ! Allocate space in send lists, copy data into buffer, and initiate mpi_isend calls
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ allocate(commListPtr % rbuffer(commListPtr % nList))
+ nullify(commListPtr % ibuffer)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % sendList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(4)
+ do k = 1, fieldCursor % dimSizes(3)
+ do l = 1, fieldCursor % dimSizes(2)
+ do m = 1, fieldCursor % dimSizes(1)
+ commListPtr % rbuffer((exchListPtr % destList(i)-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4) &
+ + (j-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &
+ + (k-1) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (l-1) * fieldCursor % dimSizes(1) + m + bufferOffset) &
+ = fieldCursor % array(m, l, k, j, exchListPtr % srcList(i))
+ nAdded = nAdded + 1
+ end do
+ end do
+ end do
+ end do
+ end do
+ end if
+
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+
+ call MPI_Isend(commListPtr % rbuffer, commListPtr % nList, MPI_REALKIND, commListPtr % procID, dminfo % my_proc_id, dminfo % comm, commListPtr % reqID, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+#endif
+
+ ! Handle local copy. If MPI is off, then only local copies are performed.
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ do iHalo = 1, nHaloLayers
+ exchListPtr => fieldCursor % copyList % halos(haloLayers(iHalo)) % exchList
+
+ do while(associated(exchListPtr))
+ fieldCursor2 => field
+ do while(associated(fieldCursor2))
+ if(exchListPtr % endPointID == fieldCursor2 % block % localBlockID) then
+ do i = 1, exchListPtr % nList
+ fieldCursor2 % array(:, :, :, :, exchListPtr % destList(i)) = fieldCursor % array(:, :, :, :, exchListPtr % srcList(i))
+ end do
+ end if
+
+ fieldCursor2 => fieldCursor2 % next
+ end do
+
+ exchListPtr => exchListPtr % next
+ end do
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+
+#ifdef _MPI
+
+ ! Wait for mpi_irecv to finish, and unpack data from buffer
+ commListPtr => recvList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ bufferOffset = 0
+ do iHalo = 1, nHaloLayers
+ nAdded = 0
+ fieldCursor => field
+ do while(associated(fieldCursor))
+ exchListPtr => fieldCursor % recvList % halos(haloLayers(iHalo)) % exchList
+ do while(associated(exchListPtr))
+ if(exchListPtr % endPointID == commListPtr % procID) then
+ do i = 1, exchListPtr % nList
+ do j = 1, fieldCursor % dimSizes(4)
+ do k = 1, fieldCursor % dimSizes(3)
+ do l = 1, fieldCursor % dimSizes(2)
+ do m = 1, fieldCursor % dimSizes(1)
+ fieldCursor % array(m, l, k, j, exchListPtr % destList(i)) = commListPtr % rbuffer((exchListPtr % srcList(i)-1)&
+ *fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) *fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4)&
+ + (j-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) &
+ + (k-1)*fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) &
+ + (l-1)*fieldCursor % dimSizes(1) + m + bufferOffset)
+ end do
+ end do
+ end do
+ end do
+ end do
+ nAdded = max(nAdded, maxval(exchListPtr % srcList) * fieldCursor % dimSizes(1) * fieldCursor % dimSizes(2) * fieldCursor % dimSizes(3) * fieldCursor % dimSizes(4))
+ end if
+ exchListPtr => exchListPtr % next
+ end do
+
+ fieldCursor => fieldCursor % next
+ end do
+ bufferOffset = bufferOffset + nAdded
+ end do
+ commListPtr => commListPtr % next
+ end do
+
+ ! wait for mpi_isend to finish.
+ commListPtr => sendList
+ do while(associated(commListPtr))
+ call MPI_Wait(commListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ commListPtr => commListPtr % next
+ end do
+
+ ! Destroy commLists.
+ call mpas_dmpar_destroy_communication_list(sendList)
+ call mpas_dmpar_destroy_communication_list(recvList)
+#endif
+
+ deallocate(haloLayers)
+
+ end subroutine mpas_dmpar_exch_halo_field5d_real!}}}
+
subroutine mpas_dmpar_init_mulithalo_exchange_list(exchList, nHalos)!{{{
type (mpas_multihalo_exchange_list), pointer :: exchList
integer, intent(in) :: nHalos
@@ -4666,4 +5879,30 @@
end if
end subroutine mpas_dmpar_copy_field3d_real!}}}
+ subroutine mpas_dmpar_copy_field4d_real(field)!{{{
+ type (field4dReal), pointer :: field
+ type (field4dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field4d_real!}}}
+
+ subroutine mpas_dmpar_copy_field5d_real(field)!{{{
+ type (field5dReal), pointer :: field
+ type (field5dReal), pointer :: fieldCursor
+
+ if(associated(field % next)) then
+ fieldCursor => field % next
+ do while(associated(fieldCursor))
+ fieldCursor % array = field % array
+ fieldCursor => fieldCursor % next
+ end do
+ end if
+ end subroutine mpas_dmpar_copy_field5d_real!}}}
+
end module mpas_dmpar
Modified: branches/atmos_physics/src/framework/mpas_framework.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_framework.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_framework.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -13,7 +13,7 @@
contains
- subroutine mpas_framework_init(dminfo, domain, mpi_comm)
+ subroutine mpas_framework_init(dminfo, domain, mpi_comm, nml_filename, io_system)
implicit none
@@ -21,13 +21,16 @@
type (domain_type), pointer :: domain
integer, intent(in), optional :: mpi_comm
+ character (len=*), optional :: nml_filename
+ type (iosystem_desc_t), optional, pointer :: io_system
+
integer :: pio_num_iotasks
integer :: pio_stride
allocate(dminfo)
call mpas_dmpar_init(dminfo, mpi_comm)
- call mpas_read_namelist(dminfo)
+ call mpas_read_namelist(dminfo, nml_filename)
call mpas_allocate_domain(domain, dminfo)
@@ -38,19 +41,20 @@
if (pio_num_iotasks == 0) then
pio_num_iotasks = domain % dminfo % nprocs
end if
- call MPAS_io_init(dminfo, pio_num_iotasks, pio_stride)
+ call MPAS_io_init(dminfo, pio_num_iotasks, pio_stride, io_system)
end subroutine mpas_framework_init
- subroutine mpas_framework_finalize(dminfo, domain)
+ subroutine mpas_framework_finalize(dminfo, domain, io_system)
implicit none
type (dm_info), pointer :: dminfo
type (domain_type), pointer :: domain
+ type (iosystem_desc_t), optional, pointer :: io_system
- call MPAS_io_finalize()
+ call MPAS_io_finalize(io_system)
call mpas_deallocate_domain(domain)
Modified: branches/atmos_physics/src/framework/mpas_grid_types.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_grid_types.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_grid_types.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -20,8 +20,66 @@
logical :: output
end type io_info
+ ! Derived type for storing fields
+ type field5DReal
+
+ ! Back-pointer to the containing block
+ type (block_type), pointer :: block
+ ! Raw array holding field data on this block
+ real (kind=RKIND), dimension(:,:,:,:,:), pointer :: array
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(5) :: dimNames
+ integer, dimension(5) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field5DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
+ end type field5DReal
+
+
! Derived type for storing fields
+ type field4DReal
+
+ ! Back-pointer to the containing block
+ type (block_type), pointer :: block
+
+ ! Raw array holding field data on this block
+ real (kind=RKIND), dimension(:,:,:,:), pointer :: array
+
+ ! Information used by the I/O layer
+ type (io_info), pointer :: ioinfo ! to be removed later
+ character (len=StrKIND) :: fieldName
+ character (len=StrKIND), dimension(:), pointer :: constituentNames => null()
+ character (len=StrKIND), dimension(4) :: dimNames
+ integer, dimension(4) :: dimSizes
+ logical :: hasTimeDimension
+ logical :: isSuperArray
+ type (att_list_type), pointer :: attList => null()
+
+ ! Pointers to the prev and next blocks for this field on this task
+ type (field4DReal), pointer :: prev, next
+
+ ! Halo communication lists
+ type (mpas_multihalo_exchange_list), pointer :: sendList
+ type (mpas_multihalo_exchange_list), pointer :: recvList
+ type (mpas_multihalo_exchange_list), pointer :: copyList
+ end type field4DReal
+
+
+
+ ! Derived type for storing fields
type field3DReal
! Back-pointer to the containing block
@@ -370,6 +428,8 @@
module procedure mpas_allocate_scratch_field1d_real
module procedure mpas_allocate_scratch_field2d_real
module procedure mpas_allocate_scratch_field3d_real
+ module procedure mpas_allocate_scratch_field4d_real
+ module procedure mpas_allocate_scratch_field5d_real
module procedure mpas_allocate_scratch_field1d_char
end interface
@@ -380,6 +440,8 @@
module procedure mpas_deallocate_scratch_field1d_real
module procedure mpas_deallocate_scratch_field2d_real
module procedure mpas_deallocate_scratch_field3d_real
+ module procedure mpas_deallocate_scratch_field4d_real
+ module procedure mpas_deallocate_scratch_field5d_real
module procedure mpas_deallocate_scratch_field1d_char
end interface
@@ -392,6 +454,8 @@
module procedure mpas_deallocate_field1d_real
module procedure mpas_deallocate_field2d_real
module procedure mpas_deallocate_field3d_real
+ module procedure mpas_deallocate_field4d_real
+ module procedure mpas_deallocate_field5d_real
module procedure mpas_deallocate_field0d_char
module procedure mpas_deallocate_field1d_char
end interface
@@ -632,6 +696,62 @@
end subroutine mpas_allocate_scratch_field3d_real!}}}
+ subroutine mpas_allocate_scratch_field4d_real(f, single_block_in)!{{{
+ type (field4dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field4dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field4d_real!}}}
+
+ subroutine mpas_allocate_scratch_field5d_real(f, single_block_in)!{{{
+ type (field5dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field5dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not. single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(.not.associated(f_cursor % array)) then
+ allocate(f_cursor % array(f_cursor % dimSizes(1), f_cursor % dimSizes(2), f_cursor % dimSizes(3), f_cursor % dimSizes(4), f_cursor % dimSizes(5)))
+ end if
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(.not.associated(f % array)) then
+ allocate(f % array(f % dimSizes(1), f % dimSizes(2), f % dimSizes(3), f % dimSizes(4), f % dimSizes(5)))
+ end if
+ end if
+
+ end subroutine mpas_allocate_scratch_field5d_real!}}}
+
subroutine mpas_allocate_scratch_field1d_char(f, single_block_in)!{{{
type (field1dChar), pointer :: f
logical, intent(in), optional :: single_block_in
@@ -834,6 +954,64 @@
end subroutine mpas_deallocate_scratch_field3d_real!}}}
+ subroutine mpas_deallocate_scratch_field4d_real(f, single_block_in)!{{{
+ type (field4dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field4dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field4d_real!}}}
+
+ subroutine mpas_deallocate_scratch_field5d_real(f, single_block_in)!{{{
+ type (field5dReal), pointer :: f
+ logical, intent(in), optional :: single_block_in
+ logical :: single_block
+ type (field5dReal), pointer :: f_cursor
+
+ if(present(single_block_in)) then
+ single_block = single_block_in
+ else
+ single_block = .false.
+ end if
+
+ if(.not.single_block) then
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ f_cursor => f_cursor % next
+ end do
+ else
+ if(associated(f % array)) then
+ deallocate(f % array)
+ end if
+ end if
+
+ end subroutine mpas_deallocate_scratch_field5d_real!}}}
+
subroutine mpas_deallocate_scratch_field1d_char(f, single_block_in)!{{{
type (field1dChar), pointer :: f
logical, intent(in), optional :: single_block_in
@@ -1073,6 +1251,60 @@
end subroutine mpas_deallocate_field3d_real!}}}
+ subroutine mpas_deallocate_field4d_real(f)!{{{
+ type (field4dReal), pointer :: f
+ type (field4dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field4d_real!}}}
+
+ subroutine mpas_deallocate_field5d_real(f)!{{{
+ type (field5dReal), pointer :: f
+ type (field5dReal), pointer :: f_cursor
+
+ f_cursor => f
+ do while(associated(f_cursor))
+ if(associated(f % next)) then
+ f => f % next
+ else
+ nullify(f)
+ end if
+
+ if(associated(f_cursor % ioinfo)) then
+ deallocate(f_cursor % ioinfo)
+ end if
+
+ if(associated(f_cursor % array)) then
+ deallocate(f_cursor % array)
+ end if
+
+ deallocate(f_cursor)
+
+ f_cursor => f
+ end do
+
+ end subroutine mpas_deallocate_field5d_real!}}}
+
subroutine mpas_deallocate_field0d_char(f)!{{{
type (field0dChar), pointer :: f
type (field0dChar), pointer :: f_cursor
Modified: branches/atmos_physics/src/framework/mpas_io.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_io.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -77,6 +77,7 @@
module procedure MPAS_io_get_var_real2d
module procedure MPAS_io_get_var_real3d
module procedure MPAS_io_get_var_real4d
+ module procedure MPAS_io_get_var_real5d
module procedure MPAS_io_get_var_char0d
end interface MPAS_io_get_var
@@ -91,6 +92,7 @@
module procedure MPAS_io_put_var_real2d
module procedure MPAS_io_put_var_real3d
module procedure MPAS_io_put_var_real4d
+ module procedure MPAS_io_put_var_real5d
module procedure MPAS_io_put_var_char0d
end interface MPAS_io_put_var
@@ -176,21 +178,21 @@
type (fieldlist_type), pointer :: next => null()
end type fieldlist_type
- type (iosystem_desc_t), private, save :: pio_iosystem
+ type (iosystem_desc_t), pointer, private, save :: pio_iosystem
type (decomplist_type), pointer, private :: decomp_list => null()
type (dm_info), private :: local_dminfo
contains
+ subroutine MPAS_io_init(dminfo, io_task_count, io_task_stride, io_system, ierr)
- subroutine MPAS_io_init(dminfo, io_task_count, io_task_stride, ierr)
-
implicit none
type (dm_info), intent(in) :: dminfo
integer, intent(in) :: io_task_count
integer, intent(in) :: io_task_stride
+ type (iosystem_desc_t), optional, pointer :: io_system
integer, intent(out), optional :: ierr
! write(0,*) 'Called MPAS_io_init()'
@@ -198,17 +200,22 @@
local_dminfo = dminfo
+ if(present(io_system)) then
+ pio_iosystem => io_system
+ else
!write(0,*) 'MGD PIO_init'
- call PIO_init(local_dminfo % my_proc_id, & ! comp_rank
- local_dminfo % comm, & ! comp_comm
- io_task_count, & ! num_iotasks
- 0, & ! num_aggregator
- io_task_stride, & ! stride
- PIO_rearr_box, & ! rearr
- pio_iosystem) ! iosystem
+ allocate(pio_iosystem)
+ call PIO_init(local_dminfo % my_proc_id, & ! comp_rank
+ local_dminfo % comm, & ! comp_comm
+ io_task_count, & ! num_iotasks
+ 0, & ! num_aggregator
+ io_task_stride, & ! stride
+ PIO_rearr_box, & ! rearr
+ pio_iosystem) ! iosystem
+
+ call pio_seterrorhandling(pio_iosystem, PIO_BCAST_ERROR)
+ end if
- call pio_seterrorhandling(pio_iosystem, PIO_BCAST_ERROR)
-
end subroutine MPAS_io_init
@@ -552,7 +559,7 @@
if (present(ierr)) ierr = MPAS_IO_ERR_PIO
deallocate(new_fieldlist_node % fieldhandle)
deallocate(new_fieldlist_node)
-! write(0,*) 'WARNING: Variable ', trim(fieldname), ' not in input file.'
+ write(0,*) 'WARNING: Variable ', trim(fieldname), ' not in input file.'
return
end if
!write(0,*) 'Inquired about variable ID', new_fieldlist_node % fieldhandle % fieldid
@@ -931,8 +938,6 @@
end if
! write(0,*) 'Assigning ', size(indices), ' indices for ', trim(fieldname)
-
-
!
! Check whether the field has been defined
!
@@ -1047,7 +1052,7 @@
pio_type = PIO_int
else if (field_cursor % fieldhandle % field_type == MPAS_IO_CHAR) then
pio_type = PIO_char
-!!!!!!!! PIO DOES NOT SUPPORT LOGICAL !!!!!!!!
+ !!!!!!! PIO DOES NOT SUPPORT LOGICAL !!!!!!!!
end if
allocate(dimlist(ndims))
@@ -1143,7 +1148,7 @@
subroutine MPAS_io_get_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &
- realVal, realArray1d, realArray2d, realArray3d, realArray4d, &
+ realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, &
charVal, ierr)
implicit none
@@ -1160,6 +1165,7 @@
real (kind=RKIND), dimension(:,:), intent(out), optional :: realArray2d
real (kind=RKIND), dimension(:,:,:), intent(out), optional :: realArray3d
real (kind=RKIND), dimension(:,:,:,:), intent(out), optional :: realArray4d
+ real (kind=RKIND), dimension(:,:,:,:,:), intent(out), optional :: realArray5d
character (len=*), intent(out), optional :: charVal
integer, intent(out), optional :: ierr
@@ -1260,6 +1266,10 @@
! write (0,*) ' value is real4'
call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
realArray4d, pio_ierr)
+ else if (present(realArray5d)) then
+! write (0,*) ' value is real5'
+ call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray5d, pio_ierr)
else if (present(intArray1d)) then
! write (0,*) ' value is int1'
call PIO_read_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
@@ -1489,6 +1499,26 @@
end subroutine MPAS_io_get_var_real4d
+ subroutine MPAS_io_get_var_real5d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:,:,:), intent(out) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_get_var_real5d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_get_var_generic(handle, fieldname, realArray5d=array, ierr=ierr)
+
+ end subroutine MPAS_io_get_var_real5d
+
+
subroutine MPAS_io_get_var_char0d(handle, fieldname, val, ierr)
implicit none
@@ -1510,7 +1540,7 @@
subroutine MPAS_io_put_var_generic(handle, fieldname, intVal, intArray1d, intArray2d, intArray3d, intArray4d, &
- realVal, realArray1d, realArray2d, realArray3d, realArray4d, &
+ realVal, realArray1d, realArray2d, realArray3d, realArray4d, realArray5d, &
charVal, ierr)
implicit none
@@ -1527,6 +1557,7 @@
real (kind=RKIND), dimension(:,:), intent(in), optional :: realArray2d
real (kind=RKIND), dimension(:,:,:), intent(in), optional :: realArray3d
real (kind=RKIND), dimension(:,:,:,:), intent(in), optional :: realArray4d
+ real (kind=RKIND), dimension(:,:,:,:,:), intent(in), optional :: realArray5d
character (len=*), intent(in), optional :: charVal
integer, intent(out), optional :: ierr
@@ -1626,6 +1657,9 @@
else if (present(realArray4d)) then
call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
realArray4d, pio_ierr)
+ else if (present(realArray5d)) then
+ call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
+ realArray5d, pio_ierr)
else if (present(intArray1d)) then
call PIO_write_darray(handle % pio_file, field_cursor % fieldhandle % field_desc, field_cursor % fieldhandle % decomp % pio_iodesc, &
intArray1d, pio_ierr)
@@ -1849,6 +1883,26 @@
end subroutine MPAS_io_put_var_real4d
+ subroutine MPAS_io_put_var_real5d(handle, fieldname, array, ierr)
+
+ implicit none
+
+ type (MPAS_IO_Handle_type), intent(inout) :: handle
+ character (len=*), intent(in) :: fieldname
+ real (kind=RKIND), dimension(:,:,:,:,:), intent(in) :: array
+ integer, intent(out), optional :: ierr
+
+ integer :: pio_ierr
+ type (fieldlist_type), pointer :: field_cursor
+
+! write(0,*) 'Called MPAS_io_put_var_real5d()'
+ if (present(ierr)) ierr = MPAS_IO_NOERR
+
+ call MPAS_io_put_var_generic(handle, fieldname, realArray5d=array, ierr=ierr)
+
+ end subroutine MPAS_io_put_var_real5d
+
+
subroutine MPAS_io_put_var_char0d(handle, fieldname, val, ierr)
implicit none
@@ -3388,10 +3442,11 @@
end subroutine MPAS_io_close
- subroutine MPAS_io_finalize(ierr)
+ subroutine MPAS_io_finalize(io_system, ierr)
implicit none
+ type (iosystem_desc_t), optional, pointer :: io_system
integer, intent(out), optional :: ierr
integer :: pio_ierr
@@ -3414,10 +3469,13 @@
end do
!write(0,*) 'MGD PIO_finalize'
- call PIO_finalize(pio_iosystem, pio_ierr)
- if (pio_ierr /= PIO_noerr) then
- if (present(ierr)) ierr = MPAS_IO_ERR_PIO
- return
+ if(.not.present(io_system)) then
+ call PIO_finalize(pio_iosystem, pio_ierr)
+ if (pio_ierr /= PIO_noerr) then
+ if (present(ierr)) ierr = MPAS_IO_ERR_PIO
+ return
+ end if
+ deallocate(pio_iosystem)
end if
end subroutine MPAS_io_finalize
Modified: branches/atmos_physics/src/framework/mpas_io_input.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_input.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_io_input.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -94,7 +94,7 @@
type (graph) :: partial_global_graph_info
type (MPAS_Time_type) :: startTime
- character(len=StrKIND) :: timeStamp
+ character(len=StrKIND) :: timeStamp, restartTimeStamp
character(len=StrKIND) :: filename
integer :: nHalos
@@ -103,9 +103,18 @@
if (config_do_restart) then
! this get followed by set is to ensure that the time is in standard format
- call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
+ if(trim(config_start_time) == 'file') then
+ open(22,file='restart_timestamp',form='formatted',status='old')
+ read(22,*) restartTimeStamp
+ close(22)
+
+ else
+ restartTimeStamp = config_start_time
+ end if
+
+ write(0,*) 'RestartTimeStamp ', trim(restartTimeStamp)
+ call mpas_set_time(curr_time=startTime, dateTimeString=restartTimeStamp)
call mpas_get_time(curr_time=startTime, dateTimeString=timeStamp)
-
call mpas_insert_string_suffix(trim(config_restart_name), timeStamp, filename)
input_obj % filename = trim(filename)
@@ -256,11 +265,17 @@
! If doing a restart, we need to decide which time slice to read from the
! restart file
!
- input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+ input_obj % time = MPAS_seekStream(input_obj % io_stream, restartTimeStamp, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
if (ierr == MPAS_IO_ERR) then
- write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
+ write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(restartTimeStamp)
call mpas_dmpar_abort(domain % dminfo)
end if
+
+! input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+! if (ierr == MPAS_IO_ERR) then
+! write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
+! call mpas_dmpar_abort(domain % dminfo)
+! end if
!write(0,*) 'MGD DEBUGGING time = ', input_obj % time
write(0,*) 'Restarting model from time ', trim(timeStamp)
end if
Modified: branches/atmos_physics/src/framework/mpas_io_output.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_output.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_io_output.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -48,6 +48,9 @@
else if (trim(stream) == 'RESTART') then
if(present(outputSuffix)) then
call mpas_insert_string_suffix(config_restart_name, outputSuffix, tempfilename)
+ open(22,file='restart_timestamp',form='formatted',status='replace')
+ write(22,*) outputSuffix
+ close(22)
else
tempfilename = config_restart_name
end if
Modified: branches/atmos_physics/src/framework/mpas_io_streams.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_io_streams.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_io_streams.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -19,6 +19,8 @@
type (field1dReal), pointer :: real1dField => null()
type (field2dReal), pointer :: real2dField => null()
type (field3dReal), pointer :: real3dField => null()
+ type (field4dReal), pointer :: real4dField => null()
+ type (field5dReal), pointer :: real5dField => null()
type (field0dChar), pointer :: char0dField => null()
type (field1dChar), pointer :: char1dField => null()
type (field_list_type), pointer :: next => null()
@@ -44,6 +46,8 @@
module procedure MPAS_streamAddField_1dReal
module procedure MPAS_streamAddField_2dReal
module procedure MPAS_streamAddField_3dReal
+ module procedure MPAS_streamAddField_4dReal
+ module procedure MPAS_streamAddField_5dReal
module procedure MPAS_streamAddField_0dChar
end interface MPAS_streamAddField
@@ -82,8 +86,10 @@
FIELD_1D_REAL = 6, &
FIELD_2D_REAL = 7, &
FIELD_3D_REAL = 8, &
- FIELD_0D_CHAR = 9, &
- FIELD_1D_CHAR = 10
+ FIELD_4D_REAL = 9, &
+ FIELD_5D_REAL = 10, &
+ FIELD_0D_CHAR = 11, &
+ FIELD_1D_CHAR = 12
private mergeArrays
@@ -996,6 +1002,208 @@
end subroutine MPAS_streamAddField_3dReal
+ subroutine MPAS_streamAddField_4dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field4DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field4dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+!write(0,*) '^^^^^^^^^^^^^^^^^^^^^^^^^^^ we are adding a super-array'
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_4D_REAL
+ new_field_list_node % real4dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+!write(0,*) 'DEBUGGING : Finished adding 4d real field '//trim(field % fieldName)
+
+ end subroutine MPAS_streamAddField_4dReal
+
+
+ subroutine MPAS_streamAddField_5dReal(stream, field, ierr)
+
+ implicit none
+
+ type (MPAS_Stream_type), intent(inout) :: stream
+ type (field5DReal), intent(in), target :: field
+ integer, intent(out), optional :: ierr
+
+ integer :: io_err
+ integer :: i
+ integer :: idim
+ integer :: totalDimSize, globalDimSize
+ logical :: isDecomposed
+ integer :: ndims
+ type (field5dReal), pointer :: field_ptr
+ character (len=StrKIND), dimension(5) :: dimNames
+ character (len=StrKIND), dimension(:), pointer :: dimNamesInq
+ integer, dimension(:), pointer :: indices
+ type (field_list_type), pointer :: field_list_cursor
+ type (field_list_type), pointer :: new_field_list_node
+ logical :: any_success
+ logical, dimension(:), pointer :: isAvailable
+
+ if (present(ierr)) ierr = MPAS_STREAM_NOERR
+
+ !
+ ! Sanity checks
+ !
+ if (.not. stream % isInitialized) then
+ if (present(ierr)) ierr = MPAS_STREAM_NOT_INITIALIZED
+ return
+ end if
+
+!write(0,*) '... Adding field '//trim(field % fieldName)//' to stream'
+
+ ndims = size(field % dimSizes)
+
+!write(0,*) '... field has ', ndims, ' dimensions'
+
+ !
+ ! Determine whether the field is decomposed, the indices that are owned by this task's blocks,
+ ! and the total number of outer-indices owned by this task
+ !
+#include "add_field_indices.inc"
+
+
+ any_success = .false.
+ if (field % isSuperArray) then
+!write(0,*) '^^^^^^^^^^^^^^^^^^^^^^^^^^^ we are adding a super-array'
+ allocate(isAvailable(size(field % constituentNames)))
+ isAvailable(:) = .false.
+ do i=1,size(field % constituentNames)
+ call MPAS_streamAddField_generic(stream, trim(field % constituentNames(i)), MPAS_IO_DOUBLE, field % dimNames(2:ndims), &
+ field % dimSizes(2:ndims), field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, &
+ indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ isAvailable(i) = .true.
+ any_success = .true.
+ end if
+ end do
+ else
+ nullify(isAvailable)
+ call MPAS_streamAddField_generic(stream, trim(field % fieldName), MPAS_IO_DOUBLE, field % dimNames, field % dimSizes, &
+ field % hasTimeDimension, isDecomposed, totalDimSize, globalDimSize, indices, io_err)
+ if (io_err == MPAS_STREAM_NOERR) then
+ any_success = .true.
+ end if
+ end if
+
+ deallocate(indices)
+ if (.not. any_success) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ return
+ end if
+
+ if (field % isSuperArray) then
+ do i=1,size(field % constituentNames)
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % constituentNames(i)), field % attList)
+ end do
+ else
+ call put_get_field_atts(stream % fileHandle, stream % ioDirection, trim(field % fieldname), field % attList)
+ end if
+
+
+ !
+ ! Set field pointer and type in fieldList
+ !
+ new_field_list_node => stream % fieldList
+ do while (associated(new_field_list_node % next))
+ new_field_list_node => new_field_list_node % next
+ end do
+ new_field_list_node % field_type = FIELD_5D_REAL
+ new_field_list_node % real5dField => field
+ new_field_list_node % isAvailable => isAvailable
+
+!write(0,*) '... done adding field'
+!write(0,*) 'DEBUGGING : Finished adding 3d real field '//trim(field % fieldName)
+
+ end subroutine MPAS_streamAddField_5dReal
+
+
subroutine MPAS_streamAddField_0dChar(stream, field, ierr)
implicit none
@@ -1313,6 +1521,8 @@
type (field1dReal), pointer :: field_1dreal_ptr
type (field2dReal), pointer :: field_2dreal_ptr
type (field3dReal), pointer :: field_3dreal_ptr
+ type (field4dReal), pointer :: field_4dreal_ptr
+ type (field5dReal), pointer :: field_5dreal_ptr
type (field0dChar), pointer :: field_0dchar_ptr
type (field1dChar), pointer :: field_1dchar_ptr
type (field_list_type), pointer :: field_cursor
@@ -1324,6 +1534,8 @@
real (kind=RKIND), dimension(:), pointer :: real1d_temp
real (kind=RKIND), dimension(:,:), pointer :: real2d_temp
real (kind=RKIND), dimension(:,:,:), pointer :: real3d_temp
+ real (kind=RKIND), dimension(:,:,:,:), pointer :: real4d_temp
+ real (kind=RKIND), dimension(:,:,:,:,:), pointer :: real5d_temp
if (present(ierr)) ierr = MPAS_STREAM_NOERR
@@ -1876,7 +2088,185 @@
else
deallocate(real3d_temp)
end if
+ else if (field_cursor % field_type == FIELD_4D_REAL) then
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real3dField % fieldName)
+!write(0,*) 'DEBUGGING : reading a 4d real array'
+ if (field_cursor % real4dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : reading a 4d real super-array'
+ ncons = size(field_cursor % real4dField % constituentNames)
+ allocate(real3d_temp(field_cursor % real4dField % dimSizes(2), &
+ field_cursor % real4dField % dimSizes(3), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real4d_temp(field_cursor % real4dField % dimSizes(1), &
+ field_cursor % real4dField % dimSizes(2), &
+ field_cursor % real4dField % dimSizes(3), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % real4dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+!write(0,*) 'DEBUGGING : calling get_var for a constitutent'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real4dField % constituentNames(j), real3d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real4dField % fieldName, real4d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % real4dField % isSuperArray) then
+ deallocate(real3d_temp)
+ else
+ deallocate(real4d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_4dreal_ptr => field_cursor % real4dField
+ i = 1
+ do while (associated(field_4dreal_ptr))
+ if (trim(field_4dreal_ptr % dimNames(4)) == 'nCells') then
+ ownedSize = field_4dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_4dreal_ptr % dimNames(4)) == 'nEdges') then
+ ownedSize = field_4dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_4dreal_ptr % dimNames(4)) == 'nVertices') then
+ ownedSize = field_4dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_4dreal_ptr % dimSizes(4)
+ end if
+
+ if (field_cursor % real4dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : copying the temporary array'
+ field_4dreal_ptr % array(j, :,:,1:ownedSize) = real3d_temp(:,:,i:i+ownedSize-1)
+ else
+ field_4dreal_ptr % array(:,:,:,1:ownedSize) = real4d_temp(:,:,:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_4dreal_ptr => field_4dreal_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % real3dField % isSuperArray) then
+ call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real3d_temp), real3d_temp(:,1,1))
+ field_4dreal_ptr => field_cursor % real4dField
+ do while (associated(field_4dreal_ptr))
+ field_4dreal_ptr % array(j,:,:,:) = real3d_temp(:,:,:)
+ field_4dreal_ptr => field_4dreal_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_reals(field_cursor % real4dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1))
+ field_4dreal_ptr => field_cursor % real4dField
+ do while (associated(field_4dreal_ptr))
+ field_4dreal_ptr % array(:,:,:,:) = real4d_temp(:,:,:,:)
+ field_4dreal_ptr => field_4dreal_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % real4dField % isSuperArray) then
+ deallocate(real3d_temp)
+ else
+ deallocate(real4d_temp)
+ end if
+
+ else if (field_cursor % field_type == FIELD_5D_REAL) then
+
+!write(0,*) 'DEBUGGING : *************** '//trim(field_cursor % real3dField % fieldName)
+!write(0,*) 'DEBUGGING : reading a 4d real array'
+ if (field_cursor % real5dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : reading a 4d real super-array'
+ ncons = size(field_cursor % real5dField % constituentNames)
+ allocate(real4d_temp(field_cursor % real5dField % dimSizes(2), &
+ field_cursor % real5dField % dimSizes(3), &
+ field_cursor % real5dField % dimSizes(4), &
+ field_cursor % totalDimSize))
+ else
+ ncons = 1
+ allocate(real5d_temp(field_cursor % real5dField % dimSizes(1), &
+ field_cursor % real5dField % dimSizes(2), &
+ field_cursor % real5dField % dimSizes(3), &
+ field_cursor % real5dField % dimSizes(4), &
+ field_cursor % totalDimSize))
+ end if
+
+ do j=1,ncons
+ if (field_cursor % real5dField % isSuperArray) then
+ if (.not. field_cursor % isAvailable(j)) cycle
+!write(0,*) 'DEBUGGING : calling get_var for a constitutent'
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real5dField % constituentNames(j), real4d_temp, io_err)
+ else
+ call MPAS_io_get_var(stream % fileHandle, field_cursor % real5dField % fieldName, real5d_temp, io_err)
+ end if
+ call MPAS_io_err_mesg(io_err, .false.)
+ if (io_err /= MPAS_IO_NOERR) then
+ if (present(ierr)) ierr = MPAS_IO_ERR
+ if (field_cursor % real5dField % isSuperArray) then
+ deallocate(real4d_temp)
+ else
+ deallocate(real5d_temp)
+ end if
+ return
+ end if
+
+ if (field_cursor % isDecomposed) then
+ ! Distribute field to multiple blocks
+ field_5dreal_ptr => field_cursor % real5dField
+ i = 1
+ do while (associated(field_5dreal_ptr))
+ if (trim(field_5dreal_ptr % dimNames(5)) == 'nCells') then
+ ownedSize = field_5dreal_ptr % block % mesh % nCellsSolve
+ else if (trim(field_5dreal_ptr % dimNames(5)) == 'nEdges') then
+ ownedSize = field_5dreal_ptr % block % mesh % nEdgesSolve
+ else if (trim(field_5dreal_ptr % dimNames(5)) == 'nVertices') then
+ ownedSize = field_5dreal_ptr % block % mesh % nVerticesSolve
+ else
+ ownedSize = field_5dreal_ptr % dimSizes(5)
+ end if
+
+ if (field_cursor % real5dField % isSuperArray) then
+!write(0,*) 'DEBUGGING : copying the temporary array'
+ field_5dreal_ptr % array(j,:,:,:,1:ownedSize) = real4d_temp(:,:,:,i:i+ownedSize-1)
+ else
+ field_5dreal_ptr % array(:,:,:,:,1:ownedSize) = real5d_temp(:,:,:,:,i:i+ownedSize-1)
+ end if
+ i = i + ownedSize
+ field_5dreal_ptr => field_5dreal_ptr % next
+ end do
+
+ else
+
+ if (field_cursor % real5dField % isSuperArray) then
+ call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real4d_temp), real4d_temp(:,1,1,1))
+ field_5dreal_ptr => field_cursor % real5dField
+ do while (associated(field_5dreal_ptr))
+ field_5dreal_ptr % array(j,:,:,:,:) = real4d_temp(:,:,:,:)
+ field_5dreal_ptr => field_5dreal_ptr % next
+ end do
+ else
+ call mpas_dmpar_bcast_reals(field_cursor % real5dField % block % domain % dminfo, size(real5d_temp), real5d_temp(:,1,1,1,1))
+ field_5dreal_ptr => field_cursor % real5dField
+ do while (associated(field_5dreal_ptr))
+ field_5dreal_ptr % array(:,:,:,:,:) = real5d_temp(:,:,:,:,:)
+ field_5dreal_ptr => field_5dreal_ptr % next
+ end do
+ end if
+ end if
+ end do
+
+ if (field_cursor % real5dField % isSuperArray) then
+ deallocate(real4d_temp)
+ else
+ deallocate(real5d_temp)
+ end if
+
+
else if (field_cursor % field_type == FIELD_0D_CHAR) then
!write(0,*) 'Reading in field '//trim(field_cursor % char0dField % fieldName)
Modified: branches/atmos_physics/src/framework/mpas_timekeeping.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_timekeeping.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_timekeeping.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -116,6 +116,9 @@
character (len=*), intent(in) :: calendar
+#ifdef MPAS_CESM
+ TheCalendar = defaultCal % Type % caltype - 1
+#else
if (trim(calendar) == 'gregorian') then
TheCalendar = MPAS_GREGORIAN
call ESMF_Initialize(defaultCalendar=ESMF_CAL_GREGORIAN)
@@ -128,6 +131,7 @@
else
write(0,*) 'ERROR: mpas_timekeeping_init: Invalid calendar type'
end if
+#endif
end subroutine mpas_timekeeping_init
@@ -136,7 +140,9 @@
implicit none
+#ifndef MPAS_CESM
call ESMF_Finalize()
+#endif
end subroutine mpas_timekeeping_finalize
Modified: branches/atmos_physics/src/framework/mpas_timer.F
===================================================================
--- branches/atmos_physics/src/framework/mpas_timer.F        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/framework/mpas_timer.F        2013-03-09 00:48:01 UTC (rev 2578)
@@ -269,7 +269,7 @@
timer_ptr%avg_time = 0.0d0
percent = 0.0d0
else
- timer_ptr%avg_time = timer_ptr%avg_time/timer_ptr%calls
+ timer_ptr%avg_time = timer_ptr%total_time/timer_ptr%calls
percent = timer_ptr%total_time/total_ptr%total_time
endif
Modified: branches/atmos_physics/src/registry/gen_inc.c
===================================================================
--- branches/atmos_physics/src/registry/gen_inc.c        2013-03-08 23:01:58 UTC (rev 2577)
+++ branches/atmos_physics/src/registry/gen_inc.c        2013-03-09 00:48:01 UTC (rev 2578)
@@ -718,35 +718,37 @@
var_list_ptr3 = var_list_ptr3->next;
}
- fortprintf(fd, " allocate(%s %% %s %% array(%i, ", group_ptr->name, var_ptr2->super_array, i);
- dimlist_ptr = var_ptr2->dimlist;
- if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
- !strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
- !strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
- if (!dimlist_ptr->dim->namelist_defined) fortprintf(fd, "%s + 1", dimlist_ptr->dim->name_in_code);
- else fortprintf(fd, "%s + 1", dimlist_ptr->dim->name_in_file);
- else
- if (dimlist_ptr->dim->namelist_defined) fortprintf(fd, "%s", dimlist_ptr->dim->name_in_file);
- else fortprintf(fd, "%s", dimlist_ptr->dim->name_in_code);
- dimlist_ptr = dimlist_ptr->next;
- while (dimlist_ptr) {
+                        if(var_ptr2->persistence == PERSISTENT){
+ fortprintf(fd, " allocate(%s %% %s %% array(%i, ", group_ptr->name, var_ptr2->super_array, i);
+ dimlist_ptr = var_ptr2->dimlist;
if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
- if (!dimlist_ptr->dim->namelist_defined) fortprintf(fd, ", %s + 1", dimlist_ptr->dim->name_in_code);
- else fortprintf(fd, ", %s + 1", dimlist_ptr->dim->name_in_file);
+ if (!dimlist_ptr->dim->namelist_defined) fortprintf(fd, "%s + 1", dimlist_ptr->dim->name_in_code);
+ else fortprintf(fd, "%s + 1", dimlist_ptr->dim->name_in_file);
else
- if (dimlist_ptr->dim->namelist_defined) fortprintf(fd, ", %s", dimlist_ptr->dim->name_in_file);
- else fortprintf(fd, ", %s", dimlist_ptr->dim->name_in_code);
+ if (dimlist_ptr->dim->namelist_defined) fortprintf(fd, "%s", dimlist_ptr->dim->name_in_file);
+ else fortprintf(fd, "%s", dimlist_ptr->dim->name_in_code);
dimlist_ptr = dimlist_ptr->next;
- }
- fortprintf(fd, "))</font>
<font color="red">");
- if (var_ptr->vtype == INTEGER)
- fortprintf(fd, " %s %% %s %% array = 0</font>
<font color="red">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
- else if (var_ptr->vtype == REAL)
- fortprintf(fd, " %s %% %s %% array = 0.0</font>
<font color="red">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
- else if (var_ptr->vtype == CHARACTER)
- fortprintf(fd, " %s %% %s %% array = \'\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
+ while (dimlist_ptr) {
+ if (!strncmp(dimlist_ptr->dim->name_in_file, "nCells", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
+ !strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
+ if (!dimlist_ptr->dim->namelist_defined) fortprintf(fd, ", %s + 1", dimlist_ptr->dim->name_in_code);
+ else fortprintf(fd, ", %s + 1", dimlist_ptr->dim->name_in_file);
+ else
+ if (dimlist_ptr->dim->namelist_defined) fortprintf(fd, ", %s", dimlist_ptr->dim->name_in_file);
+ else fortprintf(fd, ", %s", dimlist_ptr->dim->name_in_code);
+ dimlist_ptr = dimlist_ptr->next;
+ }
+ fortprintf(fd, "))</font>
<font color="blue">");
+ if (var_ptr->vtype == INTEGER)
+ fortprintf(fd, " %s %% %s %% array = 0</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
+ else if (var_ptr->vtype == REAL)
+ fortprintf(fd, " %s %% %s %% array = 0.0</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
+ else if (var_ptr->vtype == CHARACTER)
+ fortprintf(fd, " %s %% %s %% array = \'\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array ); /* initialize field to zero */
+                        }
fortprintf(fd, " %s %% %s %% dimSizes(1) = %i</font>
<font color="black">", group_ptr->name, var_ptr2->super_array, i);
fortprintf(fd, " %s %% %s %% dimNames(1) = \'num_%s\'</font>
<font color="gray">", group_ptr->name, var_ptr2->super_array, var_ptr2->super_array);
@@ -757,8 +759,14 @@
!strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
if (!dimlist_ptr->dim->namelist_defined) {
- fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="red">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_code);
- fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+                                         if (var_ptr2->persistence == PERSISTENT){
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+                                         }
+                                         else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s+1</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
+                                         }
}
else {
fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="gray">", group_ptr->name, var_ptr2->super_array, i, dimlist_ptr->dim->name_in_file);
@@ -814,7 +822,6 @@
fortprintf(fd, " %s %% %s %% isSuperArray = .false.</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code);
if (var_ptr->ndims > 0) {
                          if(var_ptr->persistence == SCRATCH){
-                                 fortprintf(fd, " ! SCRATCH VARIABLE</font>
<font color="black">");
                                 fortprintf(fd, " nullify(%s %% %s %% array)</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code);
                         } else if(var_ptr->persistence == PERSISTENT){
fortprintf(fd, " allocate(%s %% %s %% array(", group_ptr->name, var_ptr->name_in_code);
@@ -855,8 +862,14 @@
!strncmp(dimlist_ptr->dim->name_in_file, "nEdges", 1024) ||
!strncmp(dimlist_ptr->dim->name_in_file, "nVertices", 1024))
if (!dimlist_ptr->dim->namelist_defined) {
- fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="red">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_code);
- fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+                                                if(var_ptr->persistence == PERSISTENT){
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+                                                }
+                                                else {
+ fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s+1</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_code);
+ fortprintf(fd, " %s %% %s %% dimNames(%i) = \'%s\'</font>
<font color="blue">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
+                                                }
}
else {
fortprintf(fd, " %s %% %s %% dimSizes(%i) = %s</font>
<font color="gray">", group_ptr->name, var_ptr->name_in_code, i, dimlist_ptr->dim->name_in_file);
@@ -999,10 +1012,12 @@
fortprintf(fd, " dest %% %s %% scalar = src %% %s %% scalar</font>
<font color="blue">", var_ptr2->super_array, var_ptr2->super_array);
}
else {
+                        if (var_ptr->persistence == PERSISTENT){
if (var_ptr->ndims > 0)
fortprintf(fd, " dest %% %s %% array = src %% %s %% array</font>
<font color="black">", var_ptr->name_in_code, var_ptr->name_in_code);
else
fortprintf(fd, " dest %% %s %% scalar = src %% %s %% scalar</font>
<font color="blue">", var_ptr->name_in_code, var_ptr->name_in_code);
+                        }
var_list_ptr = var_list_ptr->next;
}
}
</font>
</pre>