<p><b>dwj07@fsu.edu</b> 2012-04-27 15:56:20 -0600 (Fri, 27 Apr 2012)</p><p><br>
        -- BRANCH COMMIT --<br>
<br>
        Committing new routines for building the exchange lists.<br>
        Routines for linking exchange lists from one field to another.<br>
        Rourines for allToall communication, including shared memory copies.<br>
<br>
        mpas_io_input.F only tests these routines right now.<br>
<br>
        ** NOTE **<br>
        The model is not expected to run, but so far it compiles, and passes a basic test.<br>
        This is some required ground work prior to supporting multiple blocks.<br>
</p><hr noshade><pre><font color="gray">Modified: branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F
===================================================================
--- branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F        2012-04-27 21:53:55 UTC (rev 1834)
+++ branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F        2012-04-27 21:56:20 UTC (rev 1835)
@@ -6,6 +6,20 @@
use mpas_sort
use mpas_grid_types
+ interface mpas_link_exchange_list_field
+ module procedure mpas_link_exchange_list_field1d_integer
+ module procedure mpas_link_exchange_list_field2d_integer
+ module procedure mpas_link_exchange_list_field3d_integer
+ module procedure mpas_link_exchange_list_field2d_real
+ module procedure mpas_link_exchange_list_field3d_real
+ end interface
+
+ private :: mpas_link_exchange_list_field1d_integer
+ private :: mpas_link_exchange_list_field2d_integer
+ private :: mpas_link_exchange_list_field3d_integer
+ private :: mpas_link_exchange_list_field1d_real
+ private :: mpas_link_exchange_list_field2d_real
+
type graph
integer :: nVerticesTotal
integer :: nVertices, maxDegree
@@ -529,47 +543,63 @@
deallocate(block_local_id_list)
end subroutine mpas_finish_block_proc_list!}}}
- subroutine mpas_get_exchange_lists(dminfo, ownedListField, ownedBlockListField, ownedDecomposed, neededListField, neededBlockListField, neededDecomposed, sendList, recvList)!{{{
+ subroutine mpas_get_exchange_lists(dminfo, ownedListField, ownedDecomposed, neededListField, neededDecomposed)!{{{
type (dm_info), intent(in) :: dminfo !< Input: Domain information
- type (field1dInteger), pointer :: ownedListField !< Input: pointer to the field which contains owned elements for exchange list.
- type (field1dInteger), pointer :: ownedBlockListField !< Input: pointer to a field which contains block id's for the elements in ownedList.
+ type (field1dInteger), pointer :: ownedListField !< Input/Output: pointer to the field which contains owned elements for exchange list.
logical, intent(in) :: ownedDecomposed !< Input: logical flag determining if the ownedList is decomposed using block_decomp or not.
- type (field1dInteger), pointer :: neededListField !< Input: pointer to a field which contains needed elements for exchange list.
- type (field1dInteger), pointer :: neededBlockListField !< Input: pointer to a field which contains block id's for elements in neededList
+ type (field1dInteger), pointer :: neededListField !< Input/Output: pointer to a field which contains needed elements for exchange list.
logical, intent(in) :: neededDecomposed !< Input: logical flag determining if the neededList is decomposed using block_decomp or not.
- type (exchange_list), pointer :: sendList !< Output: exchange list containing the information to send from the owned elements.
- type (exchange_list), pointer :: recvList !< Output: exchange list containing the information to recieve for the needed elements.
- type (field1dInteger), pointer :: field_ptr, field_block_ptr
- type (exchange_list), pointer :: sendListPtr, recvListPtr
+ type (field2dInteger), pointer :: ownedListSorted, sorted_field_ptr
- integer :: i, j, k, n, iBlock
+ type (field1dInteger), pointer :: field_ptr
+ type (exchange_list), pointer :: exchListPtr, exchListPtr2
+
+ integer :: i, j, k, n, iBlock, iElement, elementShift
integer :: nBlocksNeeded, nBlocksOwned
integer :: nBlocksNeededMax, nBlocksOwnedMax
integer :: totalBlocksOwned, totalBlocksNeeded
- integer :: nOwnedElelemts, nNeededElements, nNeededElementsMax
+ integer :: nNeededElements, nNeededElementsMax
+ integer :: nOwnedElementsMax, nOwnedElements
integer :: recvNeighbor, sendNeighbor
integer :: current_proc, nMesgRecv, nMesgSend
integer :: localBlockID, globalBlockID, owningProc
- integer :: mpi_ierr, mpi_rreq1, mpi_rreq2, mpi_sreq1, mpi_sreq2
+ integer :: mpi_ierr, mpi_rreq, mpi_sreq
- integer, dimension(:), pointer :: ownedList, ownedBlockList
- integer, dimension(:), pointer :: neededList, neededBlockList
- integer, dimension(:), pointer :: numToSend, numToRecv
+ integer, dimension(:), pointer :: numToSend, numToRecv, numToCopy
integer, dimension(:), pointer :: ownerListIn, ownerBlockListIn, ownerListOut, ownerBlockListOut
+ integer, dimension(:), pointer :: elementRecipients
- integer, dimension(:,:), pointer :: ownedListSorted, elementRecipients
+ ! Setup exchange lists on ownedList to be built later.
+ ! Really only sendList will be setup
+ field_ptr => ownedListField
+ do while(associated(field_ptr))
+ allocate(field_ptr % sendList(1))
+ allocate(field_ptr % recvList(1))
+ allocate(field_ptr % copyList(1))
+ nullify(field_ptr % sendList(1) % next)
+ nullify(field_ptr % recvList(1) % next)
+ nullify(field_ptr % copyList(1) % next)
- allocate(sendList)
- sendListPtr => sendList
- nullify(sendListPtr % next)
+ field_ptr => field_ptr % next
+ end do
- allocate(recvList)
- recvListPtr => recvList
- nullify(recvListPtr % next)
+ ! Setup exchange lists on neededList to be build later
+ ! Really only recvList and copyList will be setup
+ field_ptr => neededListField
+ do while(associated(field_ptr))
+ allocate(field_ptr % sendList(1))
+ allocate(field_ptr % recvList(1))
+ allocate(field_ptr % copyList(1))
+ nullify(field_ptr % sendList(1) % next)
+ nullify(field_ptr % recvList(1) % next)
+ nullify(field_ptr % copyList(1) % next)
+ field_ptr => field_ptr % next
+ end do
+
if(ownedDecomposed) then
call mpas_get_blocks_per_proc(dminfo, dminfo % my_proc_id, nBlocksOwned)
totalBlocksOwned = total_blocks
@@ -587,227 +617,465 @@
end if
! Determine number of blocks on current processor, and maximum number of blocks on any processor
-#ifdef _MPI
- call MPI_Allreduce(nBlocksNeeded, nBlocksNeededMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
- call MPI_Allreduce(nBlocksOwned, nBlocksOwnedMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- nBlocksNeededMax = nBlocksNeeded
- nBlocksOwnedMax = nBlocksNeeded
-#endif
+ call mpas_dmpar_max_int(dminfo, nBlocksNeeded, nBlocksNeededMax)
+ call mpas_dmpar_max_int(dminfo, nBlocksOwned, nBlocksOwnedMax)
- ! Determine number of owned and needed elements by looping over linked list
+ allocate(numToSend(totalBlocksNeeded))
+ allocate(numToRecv(totalBlocksOwned))
+ allocate(numToCopy(totalBlocksOwned))
+
+ allocate(ownedListSorted)
field_ptr => ownedListField
- nOwnedElements = 0
- do while(associated(field_ptr))
- nOwnedElements = nOwnedElements + size(field_ptr % array)
- field_ptr => field_ptr % next
- end do
+ sorted_field_ptr => ownedListSorted
- field_ptr => neededListField
- nNeededElements = 0
- do while(associated(field_ptr))
- nNeededElements = nNeededElements + size(field_ptr % array)
- field_ptr => field_ptr % next
- end do
+ ! sort owned elements, and link to block structure.
+ nOwnedElementsMax = 0
+ do while (associated(field_ptr))
+ allocate(sorted_field_ptr % array(2, field_ptr % dimSizes(1)))
- ! Allocate ownedList, ownedBlockList, neededList, and neededBlockList to hold all elements.
- allocate(ownedList(nOwnedElements))
- allocate(ownedListSorted(2, nOwnedElements))
- allocate(ownedBlockList(nOwnedElements))
- allocate(elementRecipients(2, nOwnedElements))
- allocate(neededList(nNeededElements))
- allocate(neededBlockList(nNeededElements))
+ nOwnedElementsMax = max(nOwnedElementsMax, field_ptr % dimSizes(1))
- ! Transfer ownedList and neededList fields into 1D arrays
- field_ptr => ownedListField
- field_block_ptr => ownedBlockListField
- j = 0
- do while(associated(field_ptr))
- do i = 1, size(field_ptr % array)
- j = j + 1
- ownedList(j) = field_ptr % array(i)
- ownedBlockList(j) = field_block_ptr % array(i)
- ownedListSorted(1, j) = ownedList(j)
- ownedListSorted(2, j) = j
+ sorted_field_ptr % block => field_ptr % block
+ sorted_field_ptr % dimSizes(1) = 2
+ sorted_field_ptr % dimSizes(2) = field_ptr % dimSizes(1)
+ write(6,*) 'Setting sorted_field_ptr dimensions = ', 2, field_ptr % dimSizes(1)
+ write(6,*) 'Set sorted_field_ptr dimensions = ', sorted_field_ptr % dimSizes(1), sorted_field_ptr % dimSizes(2)
+ sorted_field_ptr % sendList => field_ptr % sendList
+ sorted_field_ptr % recvList => field_ptr % recvList
+ sorted_field_ptr % copyList => field_ptr % copyList
+
+ do i = 1, field_ptr % dimSizes(1)
+ sorted_field_ptr % array(1, i) = field_ptr % array(i)
+ sorted_field_ptr % array(2, i) = i
end do
+ call quicksort(field_ptr % dimSizes(1), sorted_field_ptr % array)
+
field_ptr => field_ptr % next
- field_block_ptr => field_block_ptr % next
+ if(associated(field_ptr)) then
+ allocate(sorted_field_ptr % next)
+ sorted_field_ptr => sorted_field_ptr % next
+ else
+ nullify(sorted_field_ptr % next)
+ end if
end do
+
+ ! Determine number of local needed elements.
field_ptr => neededListField
- field_block_ptr => neededBlockListField
- j = 0
+ nNeededElements = 0
+
do while(associated(field_ptr))
- do i = 1, size(field_ptr % array)
- j = j + 1
- neededList(j) = field_ptr % array(i)
- neededBlockList(j) = field_block_ptr % array(i)
- end do
-
+ nNeededElements = nNeededElements + field_ptr % dimSizes(1)
field_ptr => field_ptr % next
- field_block_ptr => field_block_ptr % next
end do
- ! Sort ownedList to enable binary search
- call quicksort(nOwnedElements, ownedListSorted)
+ ! Determine number of maximum needed elements
+ call mpas_dmpar_max_int(dminfo, nNeededElements, nNeededElementsMax)
- ! Find maximum number of needed elements on any processor
-#ifdef _MPI
- call MPI_Allreduce(nNeededElements, nNeededElementsMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- nNeededElementsMax = nNeededElements
-#endif
-
- ! Allocate owner arrays
allocate(ownerListIn(nNeededElementsMax))
allocate(ownerBlockListIn(nNeededElementsMax))
allocate(ownerListOut(nNeededElementsMax))
allocate(ownerBlockListOut(nNeededElementsMax))
+ allocate(elementRecipients(nOwnedElementsMax))
- ownerListIn(1:nNeededElements) = neededList(1:nNeededElements)
- ownerBlockListIn(1:nNeededElements) = neededBlockList(1:nNeededElements)
- ownerBlockListOut(1:nNeededElements) = neededBlockList(1:nNeededElements)
- nMesgRecv = nNeededElements
+ field_ptr => neededListField
+ iElement = 1
+ do while(associated(field_ptr))
+ do i = 1, field_ptr % dimSizes(1)
+ ownerListIn(iElement) = field_ptr % array(i)
+ ownerBlockListIn(iElement) = field_ptr % block % blockID
+ iElement = iElement + 1
+ end do
+ field_ptr => field_ptr % next
+ end do
- ! recieve from the left, send to the right. Assume circularly connected
+ write(6,*) 'iElement = ', iElement
+
recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
+ nMesgRecv = nNeededElements
- ! Allocate numToSend array. Representing the number of elements to send from owned blocks.
- allocate(numToSend(totalBlocksOwned))
+ write(6,*) 'cp 2'
+ ! Build send lists, and determine owning block id's for needed elements
do i = 1, dminfo % nProcs
- ! current_proc is the index to the processor that is currently being worked on by this MPI task
- current_proc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
- numToSend(:) = 0
- elementRecipients(:,:) = -1
+ ownerBlockListOut = ownerBlockListIn
+ currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
- ! Check to see if this MPI task owns any of the elements in the current ownerListIn.
- do j = 1, nMesgRecv
+ field_ptr => ownedListField
+ sorted_field_ptr => ownedListSorted
+ do while (associated(field_ptr))
+ elementRecipients = -1
+ numToSend = 0
+ nOwnedElements = field_ptr % dimSizes(1)
- if(ownerListIn(j) > 0) then
- k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedElements, ownerListIn(j))
- if(k <= nOwnedElements) then
- globalBlockID = ownedBlockList(ownedListSorted(2,k))
-
-
- ownerListOut(j) = -1 * globalBlockID
- numToSend(globalBlockID + 1) = numToSend(globalBlockID + 1) + 1
- elementRecipients(1, ownedListSorted(2,k)) = globalBlockID
- elementRecipients(2, ownedListSorted(2,k)) = numToSend(globalBlockID+1)
+ do j = 1, nMesgRecv
+ if(ownerListIn(j) > 0) then
+ k = mpas_binary_search(sorted_field_ptr % array, 2, 1, nOwnedElements, ownerListIn(j))
+ if(k <= nOwnedElements) then
+ ownerListOut(j) = -1 * sorted_field_ptr % block % blockID
+ ownerListIn(j) = -1 * sorted_field_ptr % block % blockID
+ numToSend(ownerBlockListIn(j) + 1) = numToSend(ownerBlockListIn(j) + 1) + 1
+ elementRecipients(sorted_field_ptr % array(2,k)) = numToSend(ownerblockListIn(j)+1)
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
else
ownerListOut(j) = ownerListIn(j)
end if
- else
- ownerListOut(j) = ownerListIn(j)
- end if
- end do ! j loop over nMsgRecv
+ end do ! j loop over nMesgRecv
- ! Check to see if this MPI task has to send any messages to current_proc
- ! If it does, build a new sendList pointer for the messages.
- do j = 0, totalBlocksOwned-1
- if(numToSend(j+1) > 0) then
- allocate(sendListPtr % next)
- sendListPtr => sendListPtr % next
- nullify(sendListPtr % next)
+ ! Find end of send list
+ exchListPtr => field_ptr % sendList(1)
+ exchListPtr2 => field_ptr % sendList(1) % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr2
+ exchListPtr2 => exchListPtr2 % next
+ end do
- sendListPtr % procID = current_proc
- sendListPtr % blockID = j
- sendListPtr % nList = numToSend(j+1)
- allocate(sendListPtr % list(numToSend(j+1)))
+ do j = 1, totalBlocksNeeded
+ if(numToSend(j) > 0) then
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
- n = 0
- do k = 1, nOwnedElements
- if(j == elementRecipients(1, k)) then
- n = n + 1
- sendListPtr % list(elementRecipients(2,k)) = k
- end if
- end do ! k loop over nOwnedElements
- end if
- end do ! j loop over nBlocksMax
+ exchListPtr % procID = currentProc
+ exchListPtr % blockID = j - 1
+ exchListPtr % nlist = numToSend(j)
+ write(6,*) 'New send list with proc and block = ', currentProc, j-1, numToSend(j)
+ allocate(exchListPtr % list(numToSend(j)))
+
+ do iElement = 1, nOwnedElements
+ exchListPtr % list(elementRecipients(iElement)) = iElement
+ end do
+ end if
+ end do ! j loop over totalBlocksNeeded
+
+ field_ptr => field_ptr % next
+ sorted_field_ptr => sorted_field_ptr % next
+ end do ! associated loop over sorted_field_ptr
+
+ !Send messages to next processor, and recieve next batch of elements to process
nMesgSend = nMesgRecv
-#ifdef _MPI
- call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
- call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
- call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
- call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
- call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerBlockListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
- call MPI_Isend(ownerBlockListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
- call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
-#else
- ownerListIn = ownerListOut
-#endif
- ownerBlockListOut(1:nMesgRecv) = ownerBlockListIn(1:nMesgRecv)
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerBlockListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerBlockListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
end do ! i loop over nProcs
- ! sendLists are build. ownerListIn should be the same as the original ownerListIn
- ! time to build recvLists
+ write(6,*) 'cp 3'
- ! Allocate numToRecv. Representing the number of elements to recieve from owned blocks.
- allocate(numToRecv(totalBlocksOwned))
- do iBlock = 0, nBlocksNeeded-1
- numToRecv(:) = 0
+ field_ptr => neededListField
+ elementShift = 0
+ do while(associated(field_ptr))
+ numToRecv = 0
+ do i = 1, field_ptr % dimSizes(1)
+ if(ownerBlockListIn(elementShift + i) == field_ptr % block % blockID) then
+ numToRecv(abs(ownerListIn(elementShift+i))+1) = numToRecv(abs(ownerListIn(elementShift+i))+1) + 1
+ end if
+ end do
- do i = 0, totalBlocksOwned-1
- do j = 1, nNeededElements
- if(ownerListIn(j) == -i) then
- numToRecv(i+1) = numToRecv(i+1) + 1
+ write(6,*) 'numToRecv'
+ write(6,*) numToRecv
+
+ do iBlock = 1, totalBlocksNeeded
+ if(numToRecv(iBlock) > 0) then
+ if(ownedDecomposed) then
+ owningProc = iBlock - 1
+ else
+ call mpas_get_owning_proc(dminfo, iBlock -1, owningProc)
end if
- end do ! j loop over nNeededElements
- end do ! i loop over totalBlocksOwned
- do i = 0, totalBlocksOwned-1
- if(numToRecv(i+1) > 0) then
- call mpas_get_owning_proc(dminfo, i, owningProc)
+ ! Determine if copyList or recvList should be used
+ if(owningProc == dminfo % my_proc_id) then
+ exchListPtr => field_ptr % copyList(1)
+ else
+ exchListPtr => field_ptr % recvList(1)
+ end if
- allocate(recvListPtr % next)
- recvListPtr => recvListPtr % next
- nullify(recvListPtr % next)
+ ! Find end of exchange list
+ exchListPtr2 => exchListPtr % next
+ do while(associated(exchListPtr2))
+ exchListPtr => exchListPtr2
+ exchListPtr2 => exchListPtr2 % next
+ end do
- recvListPtr % procID = owningProc
- recvListPtr % blockID = i
- recvListPtr % nList = numToRecv(i+1)
+ allocate(exchListPtr % next)
+ exchListPtr => exchListPtr % next
+ nullify(exchListPtr % next)
- allocate(recvListPtr % list(numToRecv(i+1)))
- n = 0
- do j = 1, nNeededElements
- if(ownerListIn(j) == -i) then
- n = n + 1
- recvListPtr % list(n) = j
+ exchListPtr % procID = owningProc
+ exchListPtr % blockID = iBlock - 1
+ exchListPtr % nList = numToRecv(iBlock)
+ allocate(exchListPtr % list(numToRecv(iBlock)))
+
+ iElement = 0
+ do i = 1, field_ptr % dimSizes(1)
+ if(ownerBlockListIn(elementShift + i) == field_ptr % block % blockID) then
+ iElement = iElement + 1
+ exchListPtr % list(iElement) = i
end if
end do
end if
- end do ! i loop over totalBlocksOwned
- end do ! iBlock loop over nBlocksNeeded (local)
+ end do
+
+ elementShift = elementShift + field_ptr % dimSizes(1)
+ field_ptr => field_ptr % next
+ end do
- sendListPtr => sendList
- sendList => sendList % next
- deallocate(sendListPtr)
+ write(6,*) 'cp 4'
- recvListPtr => recvList
- recvList => recvList % next
- deallocate(recvListPtr)
-
- ! Deallocate all allocated memory
- deallocate(numToSend)
- deallocate(ownedList)
- deallocate(ownedBlockList)
- deallocate(elementRecipients)
- deallocate(neededList)
- deallocate(neededBlockList)
deallocate(ownerListIn)
deallocate(ownerBlockListIn)
deallocate(ownerListOut)
deallocate(ownerBlockListOut)
-
+ deallocate(elementRecipients)
+
end subroutine mpas_get_exchange_lists!}}}
+ subroutine mpas_link_exchange_list_field1d_integer(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field1dInteger), pointer :: ownedListField
+ type (field1dInteger), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field1dInteger), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field1d_integer!}}}
+
+ subroutine mpas_link_exchange_list_field2d_integer(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field2dInteger), pointer :: ownedListField
+ type (field2dInteger), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field2dInteger), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field2d_integer!}}}
+
+ subroutine mpas_link_exchange_list_field3d_integer(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field3dInteger), pointer :: ownedListField
+ type (field3dInteger), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field3dInteger), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field3d_integer!}}}
+
+ subroutine mpas_link_exchange_list_field1d_real(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field1dReal), pointer :: ownedListField
+ type (field1dReal), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field1dReal), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field1d_real!}}}
+
+ subroutine mpas_link_exchange_list_field2d_real(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field2dReal), pointer :: ownedListField
+ type (field2dReal), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field2dReal), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field2d_real!}}}
+
+ subroutine mpas_link_exchange_list_field3d_real(sendListField, recvListField, ownedListField, neededListField)!{{{
+ type (field1dInteger), pointer :: sendListField
+ type (field1dInteger), pointer :: recvListField
+ type (field3dReal), pointer :: ownedListField
+ type (field3dReal), pointer :: neededListField
+
+ type (field1dInteger), pointer :: exchFieldPtr
+ type (field3dReal), pointer :: dataFieldPtr
+
+ exchFieldPtr => sendListField
+ dataFieldPtr => ownedListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % sendList(1))
+ nullify(dataFieldPtr % sendList(1) % next)
+ dataFieldPtr % sendList(1) % next => exchFieldPtr % sendList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+ exchFieldPtr => recvListField
+ dataFieldPtr => neededListField
+
+ do while(associated(exchFieldPtr))
+ allocate(dataFieldPtr % recvList(1))
+ allocate(dataFieldPtr % copyList(1))
+ nullify(dataFieldPtr % recvList(1) % next)
+ nullify(dataFieldPtr % copyList(1) % next)
+ dataFieldPtr % recvList(1) % next => exchFieldPtr % recvList(1) % next
+ dataFieldPtr % copyList(1) % next => exchFieldPtr % copyList(1) % next
+
+ exchFieldPtr => exchFieldPtr % next
+ dataFieldPtr => dataFieldPtr % next
+ end do
+
+
+ end subroutine mpas_link_exchange_list_field3d_real!}}}
+
end module mpas_block_decomp
Modified: branches/omp_blocks/multiple_blocks/src/framework/mpas_dmpar.F
===================================================================
--- branches/omp_blocks/multiple_blocks/src/framework/mpas_dmpar.F        2012-04-27 21:53:55 UTC (rev 1834)
+++ branches/omp_blocks/multiple_blocks/src/framework/mpas_dmpar.F        2012-04-27 21:56:20 UTC (rev 1835)
@@ -50,12 +50,10 @@
private :: mpas_dmpar_exch_halo_field2d_real
private :: mpas_dmpar_exch_halo_field3d_real
-
contains
+ subroutine mpas_dmpar_init(dminfo, mpi_comm)!{{{
- subroutine mpas_dmpar_init(dminfo, mpi_comm)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -94,11 +92,10 @@
dminfo % using_external_comm = .false.
#endif
- end subroutine mpas_dmpar_init
+ end subroutine mpas_dmpar_init!}}}
+ subroutine mpas_dmpar_finalize(dminfo)!{{{
- subroutine mpas_dmpar_finalize(dminfo)
-
implicit none
type (dm_info), intent(inout) :: dminfo
@@ -111,11 +108,10 @@
end if
#endif
- end subroutine mpas_dmpar_finalize
+ end subroutine mpas_dmpar_finalize!}}}
+ subroutine mpas_dmpar_abort(dminfo)!{{{
- subroutine mpas_dmpar_abort(dminfo)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -128,11 +124,10 @@
stop
- end subroutine mpas_dmpar_abort
+ end subroutine mpas_dmpar_abort!}}}
+ subroutine mpas_dmpar_global_abort(mesg)!{{{
- subroutine mpas_dmpar_global_abort(mesg)
-
implicit none
character (len=*), intent(in) :: mesg
@@ -147,11 +142,10 @@
write(0,*) trim(mesg)
stop
- end subroutine mpas_dmpar_global_abort
+ end subroutine mpas_dmpar_global_abort!}}}
+ subroutine mpas_dmpar_bcast_int(dminfo, i)!{{{
- subroutine mpas_dmpar_bcast_int(dminfo, i)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -163,11 +157,10 @@
call MPI_Bcast(i, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_int
+ end subroutine mpas_dmpar_bcast_int!}}}
+ subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)!{{{
- subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -180,11 +173,10 @@
call MPI_Bcast(iarray, n, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_ints
+ end subroutine mpas_dmpar_bcast_ints!}}}
+ subroutine mpas_dmpar_bcast_real(dminfo, r)!{{{
- subroutine mpas_dmpar_bcast_real(dminfo, r)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -196,11 +188,10 @@
call MPI_Bcast(r, 1, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_real
+ end subroutine mpas_dmpar_bcast_real!}}}
+ subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)!{{{
- subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -213,11 +204,10 @@
call MPI_Bcast(rarray, n, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_reals
+ end subroutine mpas_dmpar_bcast_reals!}}}
+ subroutine mpas_dmpar_bcast_logical(dminfo, l)!{{{
- subroutine mpas_dmpar_bcast_logical(dminfo, l)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -244,11 +234,10 @@
end if
#endif
- end subroutine mpas_dmpar_bcast_logical
+ end subroutine mpas_dmpar_bcast_logical!}}}
+ subroutine mpas_dmpar_bcast_char(dminfo, c)!{{{
- subroutine mpas_dmpar_bcast_char(dminfo, c)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -260,11 +249,10 @@
call MPI_Bcast(c, len(c), MPI_CHARACTER, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_bcast_char
+ end subroutine mpas_dmpar_bcast_char!}}}
+ subroutine mpas_dmpar_sum_int(dminfo, i, isum)!{{{
- subroutine mpas_dmpar_sum_int(dminfo, i, isum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -279,11 +267,10 @@
isum = i
#endif
- end subroutine mpas_dmpar_sum_int
+ end subroutine mpas_dmpar_sum_int!}}}
+ subroutine mpas_dmpar_sum_real(dminfo, r, rsum)!{{{
- subroutine mpas_dmpar_sum_real(dminfo, r, rsum)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -298,11 +285,10 @@
rsum = r
#endif
- end subroutine mpas_dmpar_sum_real
+ end subroutine mpas_dmpar_sum_real!}}}
+ subroutine mpas_dmpar_min_int(dminfo, i, imin)!{{{
- subroutine mpas_dmpar_min_int(dminfo, i, imin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -317,11 +303,10 @@
imin = i
#endif
- end subroutine mpas_dmpar_min_int
+ end subroutine mpas_dmpar_min_int!}}}
+ subroutine mpas_dmpar_min_real(dminfo, r, rmin)!{{{
- subroutine mpas_dmpar_min_real(dminfo, r, rmin)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -336,11 +321,10 @@
rmin = r
#endif
- end subroutine mpas_dmpar_min_real
+ end subroutine mpas_dmpar_min_real!}}}
+ subroutine mpas_dmpar_max_int(dminfo, i, imax)!{{{
- subroutine mpas_dmpar_max_int(dminfo, i, imax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -355,11 +339,10 @@
imax = i
#endif
- end subroutine mpas_dmpar_max_int
+ end subroutine mpas_dmpar_max_int!}}}
+ subroutine mpas_dmpar_max_real(dminfo, r, rmax)!{{{
- subroutine mpas_dmpar_max_real(dminfo, r, rmax)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -374,11 +357,10 @@
rmax = r
#endif
- end subroutine mpas_dmpar_max_real
+ end subroutine mpas_dmpar_max_real!}}}
+ subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -394,10 +376,9 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_int_array
+ end subroutine mpas_dmpar_sum_int_array!}}}
-
- subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)
+ subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)!{{{
implicit none
@@ -414,11 +395,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_int_array
+ end subroutine mpas_dmpar_min_int_array!}}}
+ subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -434,11 +414,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_int_array
+ end subroutine mpas_dmpar_max_int_array!}}}
+ subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -454,11 +433,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_sum_real_array
+ end subroutine mpas_dmpar_sum_real_array!}}}
+ subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -474,11 +452,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_min_real_array
+ end subroutine mpas_dmpar_min_real_array!}}}
+ subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)!{{{
- subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -494,11 +471,10 @@
outArray = inArray
#endif
- end subroutine mpas_dmpar_max_real_array
+ end subroutine mpas_dmpar_max_real_array!}}}
+ subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)!{{{
- subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
-
implicit none
type (dm_info), intent(in) :: dminfo
@@ -513,10 +489,9 @@
call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
#endif
- end subroutine mpas_dmpar_scatter_ints
+ end subroutine mpas_dmpar_scatter_ints!}}}
-
- subroutine mpas_dmpar_get_index_range(dminfo, &
+ subroutine mpas_dmpar_get_index_range(dminfo, &!{{{
global_start, global_end, &
local_start, local_end)
@@ -529,10 +504,9 @@
local_start = nint(real(dminfo % my_proc_id) * real(global_end - global_start + 1) / real(dminfo % nprocs)) + 1
local_end = nint(real(dminfo % my_proc_id + 1) * real(global_end - global_start + 1) / real(dminfo % nprocs))
- end subroutine mpas_dmpar_get_index_range
+ end subroutine mpas_dmpar_get_index_range!}}}
-
- subroutine mpas_dmpar_compute_index_range(dminfo, &
+ subroutine mpas_dmpar_compute_index_range(dminfo, &!{{{
local_start, local_end, &
global_start, global_end)
@@ -566,10 +540,9 @@
end if
- end subroutine mpas_dmpar_compute_index_range
+ end subroutine mpas_dmpar_compute_index_range!}}}
-
- subroutine mpas_dmpar_get_owner_list(dminfo, &
+ subroutine mpas_dmpar_get_owner_list(dminfo, &!{{{
nOwnedList, nNeededList, &
ownedList, neededList, &
sendList, recvList, inOffset)
@@ -731,469 +704,765 @@
end do
#endif
- end subroutine mpas_dmpar_get_owner_list
+ end subroutine mpas_dmpar_get_owner_list!}}}
+ subroutine mpas_dmpar_alltoall_field1d_integer(dminfo, fieldIn, fieldout)!{{{
- subroutine mpas_dmpar_alltoall_field1d_integer(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ implicit none
- implicit none
+ type (dm_info), intent(in) :: dminfo
+ type (field1dInteger), pointer :: fieldIn
+ type (field1dInteger), pointer :: fieldOut
- type (dm_info), intent(in) :: dminfo
- integer, dimension(*), intent(in) :: arrayIn
- integer, dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
#ifdef _MPI
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % ibuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
- end do
- end if
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(copyListPtr % list(i)) = fieldInPtr % array(sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % ibuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_integer(fieldInPtr % dimSizes(1), fieldInPtr % array, sendListPtr, 1, sendListPtr % nlist, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_integer(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_integer(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_integer(fieldOutPtr % dimSizes(1), fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(copyListPtr % list(i)) = fieldInPtr % array(sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
+ end do
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
#endif
- end subroutine mpas_dmpar_alltoall_field1d_integer
+ end subroutine mpas_dmpar_alltoall_field1d_integer!}}}
+ subroutine mpas_dmpar_alltoall_field2d_integer(dminfo, fieldIn, fieldout)!{{{
- subroutine mpas_dmpar_alltoall_field2d_integer(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ implicit none
- implicit none
+ type (dm_info), intent(in) :: dminfo
+ type (field2dInteger), pointer :: fieldIn
+ type (field2dInteger), pointer :: fieldOut
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- integer, dimension(dim1,*), intent(in) :: arrayIn
- integer, dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field2dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
#ifdef _MPI
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % ibuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
- end do
- end if
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:,copyListPtr % list(i)) = fieldInPtr % array(:,sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % ibuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf2d_integer(1, fieldInPtr % dimSizes(1), fieldInPtr % dimSizes(2), fieldInPtr % array, sendListPtr, 1, sendListPtr % nList, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call mpas_pack_send_buf2d_integer(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_integer(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ call mpas_unpack_recv_buf2d_integer(1, fieldOutPtr % dimSizes(1), fieldOutPtr % dimSizes(2), fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, copyListPtr % list(i)) = fieldInPtr % array(:, sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
+ end do
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
#endif
- end subroutine mpas_dmpar_alltoall_field2d_integer
+ end subroutine mpas_dmpar_alltoall_field2d_integer!}}}
+ subroutine mpas_dmpar_alltoall_field3d_integer(dminfo, fieldIn, fieldout)!{{{
- subroutine mpas_dmpar_alltoall_field1d_real(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+ implicit none
- implicit none
+ type (dm_info), intent(in) :: dminfo
+ type (field3dInteger), pointer :: fieldIn
+ type (field3dInteger), pointer :: fieldOut
- type (dm_info), intent(in) :: dminfo
- real (kind=RKIND), dimension(*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
+ type (field3dInteger), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
#ifdef _MPI
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % ibuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
- end do
- end if
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, :, copyListPtr % list(i)) = fieldInPtr % array(:, :, sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % ibuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf3d_integer(1, fieldInPtr % dimSizes(1), 1, fieldInPtr % dimSizes(2), fieldInPtr % dimSizes(3), fieldInPtr % array, &
+ sendListPtr, 1, sendListPtr % nList, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call mpas_pack_send_buf1d_real(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call mpas_unpack_recv_buf1d_real(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ call mpas_unpack_recv_buf3d_integer(1, fieldOutPtr % dimSizes(1), 1, fieldOutPtr % dimSizes(2), fieldOutPtr % dimSizes(3), &
+ fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, :, copyListPtr % list(i)) = fieldInPtr % array(:, :, sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
+ end do
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
#endif
- end subroutine mpas_dmpar_alltoall_field1d_real
+ end subroutine mpas_dmpar_alltoall_field3d_integer!}}}
+ subroutine mpas_dmpar_alltoall_field1d_real(dminfo, fieldIn, fieldout)!{{{
- subroutine mpas_dmpar_alltoall_field2d_real(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+ implicit none
- implicit none
+ type (dm_info), intent(in) :: dminfo
+ type (field1dReal), pointer :: fieldIn
+ type (field1dReal), pointer :: fieldOut
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (field1dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
#ifdef _MPI
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % rbuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_realKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
+ recvListPtr => recvListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
+
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(copyListPtr % list(i)) = fieldInPtr % array(sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % rbuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_real(fieldInPtr % dimSizes(1), fieldInPtr % array, sendListPtr, 1, sendListPtr % nlist, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_realKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_real(fieldOutPtr % dimSizes(1), fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
+
+#else
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(copyListPtr % list(i)) = fieldInPtr % array(sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
end do
- end if
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
+#endif
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
+ end subroutine mpas_dmpar_alltoall_field1d_real!}}}
+
+ subroutine mpas_dmpar_alltoall_field2d_real(dminfo, fieldIn, fieldout)!{{{
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ type (field2dReal), pointer :: fieldIn
+ type (field2dReal), pointer :: fieldOut
+
+ type (field2dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
+
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
+
+#ifdef _MPI
+
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % rbuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_realKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call mpas_pack_send_buf2d_real(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:,copyListPtr % list(i)) = fieldInPtr % array(:,sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % rbuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf2d_real(1, fieldInPtr % dimSizes(1), fieldInPtr % dimSizes(2), fieldInPtr % array, sendListPtr, 1, sendListPtr % nList, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+
+ call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_realKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call mpas_unpack_recv_buf2d_real(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ call mpas_unpack_recv_buf2d_real(1, fieldOutPtr % dimSizes(1), fieldOutPtr % dimSizes(2), fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, copyListPtr % list(i)) = fieldInPtr % array(:, sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
+ end do
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
#endif
- end subroutine mpas_dmpar_alltoall_field2d_real
+ end subroutine mpas_dmpar_alltoall_field2d_real!}}}
-
- subroutine mpas_dmpar_alltoall_field3d_real(dminfo, arrayIn, arrayOut, dim1, dim2, nOwnedList, nNeededList, sendList, recvList)
+ subroutine mpas_dmpar_alltoall_field3d_real(dminfo, fieldIn, fieldout)!{{{
- implicit none
+ implicit none
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,dim2,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
+ type (dm_info), intent(in) :: dminfo
+ type (field3dReal), pointer :: fieldIn
+ type (field3dReal), pointer :: fieldOut
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d3
+ type (field3dReal), pointer :: fieldInPtr, fieldOutPtr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
+
#ifdef _MPI
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
+ ! Initiate mpi_irecv calls
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
+ do while(associated(recvListPtr))
+ allocate(recvListPtr % rbuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_realKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,:,recvListPtr % list(i)) = arrayIn(:,:,sendListPtr % list(i))
- end do
- end if
+ ! Initiate mpi_isend calls, and handle local copies
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
+ if (sendListPtr % procID == dminfo % my_proc_id) then ! Local Copy
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ if(fieldOutPtr % block % blockID == sendListPtr % blockID) then
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while(associated(copyListPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, :, copyListPtr % list(i)) = fieldInPtr % array(:, :, sendListPtr % list(i))
+ end do
+ end if
+ copyListPtr => copyListPtr % next
+ end do
+ end if
+ fieldOutPtr => fieldOutPtr % next
+ end do
+ else ! mpi_isend
+ allocate(sendListPtr % rbuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf3d_real(1, fieldInPtr % dimSizes(1), 1, fieldInPtr % dimSizes(2), fieldInPtr % dimSizes(3), fieldInPtr % array, &
+ sendListPtr, 1, sendListPtr % nList, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_realKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
end if
sendListPtr => sendListPtr % next
- end do
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, nNeededList, arrayOut, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
+ ! Recieve all mpi_irecv commands, and unpack data into array
+ ! Deallocate recieve buffers
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ recvListPtr => fieldOutPtr % recvList(1) % next
+ do while (associated(recvListPtr))
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+
+ call mpas_unpack_recv_buf3d_real(1, fieldOutPtr % dimSizes(1), 1, fieldOutPtr % dimSizes(2), fieldOutPtr % dimSizes(3), &
+ fieldOutPtr % array, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
recvListPtr => recvListPtr % next
- end do
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
+ ! Dellocate send buffers
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ fieldInPtr => fieldInPtr % next
+ end do
#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call mpas_dmpar_abort(dminfo)
- else
- arrayOut(:,:,1:nNeededList) = arrayIn(:,:,1:nOwnedList)
- end if
+ ! Only local copies if no mpi
+ fieldOutPtr => fieldOut
+ do while (associated(fieldOutPtr))
+ copyListPtr => fieldOutPtr % copyList(1) % next
+ do while (associated(copyListPtr))
+ fieldInPtr => fieldIn
+ do while (associated(fieldInPtr))
+ if(copyListPtr % blockID == fieldInPtr % block % blockID) then
+ sendListPtr => fieldInPtr % sendList(1) % next
+ do while (associated(sendListPtr))
+ if(sendListPtr % blockID == fieldOutPtr % block % blockID) then
+ do i = 1, copyListPtr % nList
+ fieldOutPtr % array(:, :, copyListPtr % list(i)) = fieldInPtr % array(:, :, sendListPtr % list(i))
+ end do
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+ end if
+ fieldInPtr => fieldInPtr % next
+ end do
+ copyListPtr => copyListPtr % next
+ end do
+ fieldOutPtr => fieldOutPtr % next
+ end do
#endif
- end subroutine mpas_dmpar_alltoall_field3d_real
-
+ end subroutine mpas_dmpar_alltoall_field3d_real!}}}
- subroutine mpas_pack_send_buf1d_integer(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+ subroutine mpas_pack_send_buf1d_integer(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
implicit none
@@ -1217,11 +1486,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf1d_integer
+ end subroutine mpas_pack_send_buf1d_integer!}}}
+ subroutine mpas_pack_send_buf2d_integer(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
- subroutine mpas_pack_send_buf2d_integer(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
implicit none
integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
@@ -1252,11 +1520,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf2d_integer
+ end subroutine mpas_pack_send_buf2d_integer!}}}
+ subroutine mpas_pack_send_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
- subroutine mpas_pack_send_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
implicit none
integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
@@ -1291,11 +1558,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf3d_integer
+ end subroutine mpas_pack_send_buf3d_integer!}}}
+ subroutine mpas_pack_send_buf1d_real(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
- subroutine mpas_pack_send_buf1d_real(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
implicit none
integer, intent(in) :: nField, nBuffer, startPackIdx
@@ -1318,11 +1584,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf1d_real
+ end subroutine mpas_pack_send_buf1d_real!}}}
+ subroutine mpas_pack_send_buf2d_real(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
- subroutine mpas_pack_send_buf2d_real(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
implicit none
integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
@@ -1353,11 +1618,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf2d_real
+ end subroutine mpas_pack_send_buf2d_real!}}}
+ subroutine mpas_pack_send_buf3d_real(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)!{{{
- subroutine mpas_pack_send_buf3d_real(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
implicit none
integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
@@ -1392,11 +1656,10 @@
end do
lastPackedIdx = sendList % nlist
- end subroutine mpas_pack_send_buf3d_real
+ end subroutine mpas_pack_send_buf3d_real!}}}
+ subroutine mpas_unpack_recv_buf1d_integer(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)!{{{
- subroutine mpas_unpack_recv_buf1d_integer(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
implicit none
integer, intent(in) :: nField, nBuffer, startUnpackIdx
@@ -1419,11 +1682,10 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf1d_integer
+ end subroutine mpas_unpack_recv_buf1d_integer!}}}
+ subroutine mpas_unpack_recv_buf2d_integer(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)!{{{
- subroutine mpas_unpack_recv_buf2d_integer(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
implicit none
integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
@@ -1448,10 +1710,9 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf2d_integer
+ end subroutine mpas_unpack_recv_buf2d_integer!}}}
-
- subroutine mpas_unpack_recv_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
+ subroutine mpas_unpack_recv_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &!{{{
nUnpacked, lastUnpackedIdx)
implicit none
@@ -1482,11 +1743,10 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_integer
+ end subroutine mpas_unpack_recv_buf3d_integer!}}}
+ subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field1d_integer(field, haloLayers)
-
implicit none
type (field1DInteger), intent(inout) :: field
@@ -1551,11 +1811,10 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_integer
+ end subroutine mpas_dmpar_exch_halo_field1d_integer!}}}
+ subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field2d_integer(field, haloLayers)
-
implicit none
type (field2DInteger), intent(inout) :: field
@@ -1624,11 +1883,10 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field2d_integer
+ end subroutine mpas_dmpar_exch_halo_field2d_integer!}}}
+ subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field3d_integer(field, haloLayers)
-
implicit none
type (field3DInteger), intent(inout) :: field
@@ -1699,10 +1957,9 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_integer
-
+ end subroutine mpas_dmpar_exch_halo_field3d_integer!}}}
- subroutine mpas_unpack_recv_buf1d_real(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+ subroutine mpas_unpack_recv_buf1d_real(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)!{{{
implicit none
@@ -1726,11 +1983,10 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf1d_real
+ end subroutine mpas_unpack_recv_buf1d_real!}}}
+ subroutine mpas_unpack_recv_buf2d_real(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)!{{{
- subroutine mpas_unpack_recv_buf2d_real(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
implicit none
integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
@@ -1755,10 +2011,9 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf2d_real
+ end subroutine mpas_unpack_recv_buf2d_real!}}}
-
- subroutine mpas_unpack_recv_buf3d_real(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
+ subroutine mpas_unpack_recv_buf3d_real(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &!{{{
nUnpacked, lastUnpackedIdx)
implicit none
@@ -1789,11 +2044,10 @@
end do
lastUnpackedIdx = recvList % nlist
- end subroutine mpas_unpack_recv_buf3d_real
+ end subroutine mpas_unpack_recv_buf3d_real!}}}
+ subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field1d_real(field, haloLayers)
-
implicit none
type (field1DReal), intent(inout) :: field
@@ -1858,11 +2112,10 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field1d_real
+ end subroutine mpas_dmpar_exch_halo_field1d_real!}}}
+ subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field2d_real(field, haloLayers)
-
implicit none
type (field2DReal), intent(inout) :: field
@@ -1932,11 +2185,10 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field2d_real
+ end subroutine mpas_dmpar_exch_halo_field2d_real!}}}
+ subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayers)!{{{
- subroutine mpas_dmpar_exch_halo_field3d_real(field, haloLayers)
-
implicit none
type (field3DReal), intent(inout) :: field
@@ -2007,11 +2259,10 @@
#endif
- end subroutine mpas_dmpar_exch_halo_field3d_real
+ end subroutine mpas_dmpar_exch_halo_field3d_real!}}}
+ subroutine mpas_aggregate_exchange_lists(myProcID, haloLayersIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)!{{{
- subroutine mpas_aggregate_exchange_lists(myProcID, haloLayersIn, sendListArray, recvListArray, aggregateSendList, aggregateRecvList)
-
implicit none
!--- in variables ---!
@@ -2140,11 +2391,10 @@
deallocate(haloLayers)
end if
- end subroutine mpas_aggregate_exchange_lists
+ end subroutine mpas_aggregate_exchange_lists!}}}
+ subroutine mpas_destroy_exchange_list(exchangeList)!{{{
- subroutine mpas_destroy_exchange_list(exchangeList)
-
implicit none
!--- in variables ---!
@@ -2161,11 +2411,10 @@
exchangeList => exchangeListPtr
end do
- end subroutine mpas_destroy_exchange_list
+ end subroutine mpas_destroy_exchange_list!}}}
+ subroutine mpas_merge_integer_arrays(mergeArray, nMergeArray, dataToAppend)!{{{
- subroutine mpas_merge_integer_arrays(mergeArray, nMergeArray, dataToAppend)
-
implicit none
!--- inout variables ---!
@@ -2189,7 +2438,6 @@
mergeArray(nMergeArray+1:newSize) = dataToAppend
nMergeArray = newSize
- end subroutine mpas_merge_integer_arrays
+ end subroutine mpas_merge_integer_arrays!}}}
-
end module mpas_dmpar
Modified: branches/omp_blocks/multiple_blocks/src/framework/mpas_io_input.F
===================================================================
--- branches/omp_blocks/multiple_blocks/src/framework/mpas_io_input.F        2012-04-27 21:53:55 UTC (rev 1834)
+++ branches/omp_blocks/multiple_blocks/src/framework/mpas_io_input.F        2012-04-27 21:56:20 UTC (rev 1835)
@@ -27,6 +27,7 @@
end type io_input_object
+ type (exchange_list), pointer :: sendListPtr, recvListPtr, copyListPtr
type (exchange_list), pointer :: sendCellList, recvCellList
type (exchange_list), pointer :: sendEdgeList, recvEdgeList
type (exchange_list), pointer :: sendVertexList, recvVertexList
@@ -40,12 +41,14 @@
contains
-
- subroutine mpas_input_state_for_domain(domain)
+ subroutine mpas_input_state_for_domain(domain)!{{{
implicit none
type (domain_type), pointer :: domain
+
+ type (block_type), pointer :: block_ptr
+ type (block_type), pointer :: readingBlock
integer :: i, j, k
type (io_input_object) :: input_obj
@@ -58,29 +61,36 @@
integer, dimension(:), pointer :: readIndices
type (MPAS_IO_Handle_type) :: inputHandle
- type (field1dInteger) :: indexToCellIDField
- type (field1dInteger) :: indexToEdgeIDField
- type (field1dInteger) :: indexToVertexIDField
- type (field1dInteger) :: nEdgesOnCellField
- type (field2dInteger) :: cellsOnCellField
- type (field2dInteger) :: edgesOnCellField
- type (field2dInteger) :: verticesOnCellField
- type (field2dInteger) :: cellsOnEdgeField
- type (field2dInteger) :: cellsOnVertexField
+ type (field1dInteger), pointer :: indexToCellIDField
+ type (field1dInteger), pointer :: indexToEdgeIDField
+ type (field1dInteger), pointer :: indexToVertexIDField
+ type (field1dInteger), pointer :: nEdgesOnCellField
+ type (field2dInteger), pointer :: cellsOnCellField
+ type (field2dInteger), pointer :: edgesOnCellField
+ type (field2dInteger), pointer :: verticesOnCellField
+ type (field2dInteger), pointer :: cellsOnEdgeField
+ type (field2dInteger), pointer :: cellsOnVertexField
#ifdef HAVE_ZOLTAN
#ifdef _MPI
- type (field1dReal) :: xCellField, yCellField, zCellField
- type (field1dReal) :: xEdgeField, yEdgeField, zEdgeField
- type (field1dReal) :: xVertexField, yVertexField, zVertexField
+ type (field1dReal), pointer :: xCellField, yCellField, zCellField
+ type (field1dReal), pointer :: xEdgeField, yEdgeField, zEdgeField
+ type (field1dReal), pointer :: xVertexField, yVertexField, zVertexField
#endif
#endif
type (field1DChar) :: xtime
+
+ type (field1DInteger), pointer :: int1d_ptr
+ type (field2DInteger), pointer :: int2d_ptr
+
+ type (field1DInteger), pointer :: indexToCellID_0Halo
+ type (field1DInteger), pointer :: nEdgesOnCell_0Halo
+ type (field2DInteger), pointer :: cellsOnCEll_0Halo
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
- integer, dimension(:,:), pointer :: cellsOnCell_0Halo
+! integer, dimension(:), pointer :: indexToCellID_0Halo
+! integer, dimension(:), pointer :: nEdgesOnCell_0Halo
+! integer, dimension(:,:), pointer :: cellsOnCell_0Halo
integer, dimension(:), pointer :: nEdgesOnCell_2Halo
@@ -112,6 +122,8 @@
type (graph) :: block_graph_0Halo, block_graph_1Halo, block_graph_2Halo
integer :: ghostEdgeStart, ghostVertexStart
+ integer :: nBlocksLocal, nBlocksMax, iBlock, nCellsInBlock
+
type (MPAS_Time_type) :: startTime
type (MPAS_Time_type) :: sliceTime
type (MPAS_TimeInterval_type) :: timeDiff
@@ -185,6 +197,10 @@
readVertLevelStart = 1
readVertLevelEnd = nVertLevels
nReadVertLevels = nVertLevels
+
+ allocate(readingBlock)
+ readingBlock % blockID = domain % dminfo % my_proc_id
+ readingBlock % localBlockID = 0
!
@@ -193,6 +209,7 @@
!
! Global cell indices
+ allocate(indexToCellIDField)
allocate(indexToCellIDField % ioinfo)
indexToCellIDField % ioinfo % fieldName = 'indexToCellID'
indexToCellIDField % ioinfo % start(1) = readCellStart
@@ -205,10 +222,14 @@
call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'indexToCellID', indexToCellIDField % array, ierr)
+ indexToCellIDField % dimSizes(1) = nReadCells
+ indexToCellIDField % block => readingBlock
+ nullify(indexToCellIDField % next)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
! Cell x-coordinates (in 3d Cartesian space)
+ allocate(xCellField)
allocate(xCellField % ioinfo)
xCellField % ioinfo % fieldName = 'xCell'
xCellField % ioinfo % start(1) = readCellStart
@@ -217,8 +238,12 @@
call MPAS_io_inq_var(inputHandle, 'xCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'xCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'xCell', xCellField % array, ierr)
+ xCellField % dimSizes(1) = nReadCells
+ xCellField % block => readingBlock
+ nullify(xCellField % next)
! Cell y-coordinates (in 3d Cartesian space)
+ allocate(yCellField)
allocate(yCellField % ioinfo)
yCellField % ioinfo % fieldName = 'yCell'
yCellField % ioinfo % start(1) = readCellStart
@@ -227,8 +252,12 @@
call MPAS_io_inq_var(inputHandle, 'yCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'yCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'yCell', yCellField % array, ierr)
+ yCellField % dimSizes(1) = nReadCells
+ yCellField % block => readingBlock
+ nullify(yCellField % next)
! Cell z-coordinates (in 3d Cartesian space)
+ allocate(zCellField)
allocate(zCellField % ioinfo)
zCellField % ioinfo % fieldName = 'zCell'
zCellField % ioinfo % start(1) = readCellStart
@@ -237,12 +266,16 @@
call MPAS_io_inq_var(inputHandle, 'zCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'zCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'zCell', zCellField % array, ierr)
+ zCellField % dimSizes(1) = nReadCells
+ zCellField % block => readingBlock
+ nullify(zCellField % next)
#endif
#endif
deallocate(readIndices)
! Global edge indices
+ allocate(indexToEdgeIDField)
allocate(indexToEdgeIDField % ioinfo)
indexToEdgeIDField % ioinfo % fieldName = 'indexToEdgeID'
indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
@@ -256,10 +289,14 @@
call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'indexToEdgeID', indexToEdgeIDField % array, ierr)
+ indexToEdgeIDField % dimSizes(1) = nREadEdges
+ indexToEdgeIDField % block => readingBlock
+ nullify(indexToEdgeIDField % next)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
! Edge x-coordinates (in 3d Cartesian space)
+ allocate(xEdgeField)
allocate(xEdgeField % ioinfo)
xEdgeField % ioinfo % fieldName = 'xEdge'
xEdgeField % ioinfo % start(1) = readEdgeStart
@@ -268,8 +305,12 @@
call MPAS_io_inq_var(inputHandle, 'xEdge', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'xEdge', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'xEdge', xEdgeField % array, ierr)
+ xEdgeField % dimSizes(1) = nReadEdges
+ xEdgeField % block => readingBlock
+ nullify(xEdgeField % next)
! Edge y-coordinates (in 3d Cartesian space)
+ allocate(yEdgeField)
allocate(yEdgeField % ioinfo)
yEdgeField % ioinfo % fieldName = 'yEdge'
yEdgeField % ioinfo % start(1) = readEdgeStart
@@ -278,8 +319,12 @@
call MPAS_io_inq_var(inputHandle, 'yEdge', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'yEdge', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'yEdge', yEdgeField % array, ierr)
+ yEdgeField % dimSizes(1) = nReadEdges
+ yEdgeField % block => readingBlock
+ nullify(yEdgeField % next)
! Edge z-coordinates (in 3d Cartesian space)
+ allocate(zEdgeField)
allocate(zEdgeField % ioinfo)
zEdgeField % ioinfo % fieldName = 'zEdge'
zEdgeField % ioinfo % start(1) = readEdgeStart
@@ -288,12 +333,16 @@
call MPAS_io_inq_var(inputHandle, 'zEdge', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'zEdge', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'zEdge', zEdgeField % array, ierr)
+ zEdgeField % dimSizes(1) = nReadEdges
+ zEdgeField % block => readingBlock
+ nullify(zEdgeField % next)
#endif
#endif
deallocate(readIndices)
! Global vertex indices
+ allocate(indexToVertexIDField)
allocate(indexToVertexIDField % ioinfo)
indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
indexToVertexIDField % ioinfo % start(1) = readVertexStart
@@ -306,10 +355,14 @@
call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'indexToVertexID', indexToVertexIDField % array, ierr)
+ indexToVertexIDField % dimSizes(1) = nReadVertices
+ indexToVertexIDField % block => readingBlock
+ nullify(indexToVertexIDField % next)
#ifdef HAVE_ZOLTAN
#ifdef _MPI
! Vertex x-coordinates (in 3d Cartesian space)
+ allocate(xVertexField)
allocate(xVertexField % ioinfo)
xVertexField % ioinfo % fieldName = 'xVertex'
xVertexField % ioinfo % start(1) = readVertexStart
@@ -318,8 +371,12 @@
call MPAS_io_inq_var(inputHandle, 'xVertex', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'xVertex', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'xVertex', xVertexField % array, ierr)
+ xVertexField % dimSizes(1) = nReadVertices
+ xVertexField % block => readingBlock
+ nullify(xVertexField % next)
! Vertex y-coordinates (in 3d Cartesian space)
+ allocate(yVertexField)
allocate(yVertexField % ioinfo)
yVertexField % ioinfo % fieldName = 'yVertex'
yVertexField % ioinfo % start(1) = readVertexStart
@@ -328,8 +385,12 @@
call MPAS_io_inq_var(inputHandle, 'yVertex', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'yVertex', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'yVertex', yVertexField % array, ierr)
+ yVertexField % dimSizes(1) = nReadVertices
+ yVertexField % block => readingBlock
+ nullify(yVertexField % next)
! Vertex z-coordinates (in 3d Cartesian space)
+ allocate(zVertexField)
allocate(zVertexField % ioinfo)
zVertexField % ioinfo % fieldName = 'zVertex'
zVertexField % ioinfo % start(1) = readVertexStart
@@ -338,11 +399,15 @@
call MPAS_io_inq_var(inputHandle, 'zVertex', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'zVertex', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'zVertex', zVertexField % array, ierr)
+ zVertexField % dimSizes(1) = nReadVertices
+ zVertexField % block => readingBlock
+ nullify(zVertexField % next)
#endif
#endif
deallocate(readIndices)
! Number of cell/edges/vertices adjacent to each cell
+ allocate(nEdgesOnCellField)
allocate(nEdgesOnCellField % ioinfo)
nEdgesOnCellField % ioinfo % fieldName = 'nEdgesOnCell'
nEdgesOnCellField % ioinfo % start(1) = readCellStart
@@ -355,8 +420,12 @@
call MPAS_io_inq_var(inputHandle, 'nEdgesOnCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'nEdgesOnCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'nEdgesOnCell', nEdgesOnCellField % array, ierr)
+ nEdgesOnCellField % dimSizes(1) = nReadCells
+ nEdgesOnCellField % block => readingBlock
+ nullify(nEdgesOnCellField % next)
! Global indices of cells adjacent to each cell
+ allocate(cellsOnCellField)
allocate(cellsOnCellField % ioinfo)
cellsOnCellField % ioinfo % fieldName = 'cellsOnCell'
cellsOnCellField % ioinfo % start(1) = 1
@@ -367,8 +436,13 @@
call MPAS_io_inq_var(inputHandle, 'cellsOnCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'cellsOnCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'cellsOnCell', cellsOnCellField % array, ierr)
+ cellsOnCellField % dimSizes(1) = maxEdges
+ cellsOnCellField % dimSizes(2) = nReadCells
+ cellsOnCellField % block => readingBlock
+ nullify(cellsOnCellField % next)
! Global indices of edges adjacent to each cell
+ allocate(edgesOnCellField)
allocate(edgesOnCellField % ioinfo)
edgesOnCellField % ioinfo % fieldName = 'edgesOnCell'
edgesOnCellField % ioinfo % start(1) = 1
@@ -379,8 +453,13 @@
call MPAS_io_inq_var(inputHandle, 'edgesOnCell', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'edgesOnCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'edgesOnCell', edgesOnCellField % array, ierr)
+ edgesOnCellField % dimSizes(1) = maxEdges
+ edgesOnCellField % dimSizes(2) = nReadCells
+ edgesOnCellField % block => readingBlock
+ nullify(edgesOnCellField % next)
! Global indices of vertices adjacent to each cell
+ allocate(verticesOnCellField)
allocate(verticesOnCellField % ioinfo)
verticesOnCellField % ioinfo % fieldName = 'verticesOnCell'
verticesOnCellField % ioinfo % start(1) = 1
@@ -392,10 +471,17 @@
call MPAS_io_set_var_indices(inputHandle, 'verticesOnCell', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'verticesOnCell', verticesOnCellField % array, ierr)
deallocate(readIndices)
+ verticesOnCellField % dimSizes(1) = maxEdges
+ verticesOnCellField % dimSizes(2) = nReadCells
+ verticesOnCellField % block => readingBlock
+ nullify(verticesOnCellField % next)
+
+
! Global indices of cells adjacent to each edge
! used for determining which edges are owned by a block, where
! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
+ allocate(cellsOnEdgeField)
allocate(cellsOnEdgeField % ioinfo)
cellsOnEdgeField % ioinfo % fieldName = 'cellsOnEdge'
cellsOnEdgeField % ioinfo % start(1) = 1
@@ -411,10 +497,16 @@
call MPAS_io_set_var_indices(inputHandle, 'cellsOnEdge', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'cellsOnEdge', cellsOnEdgeField % array, ierr)
deallocate(readIndices)
+ cellsOnEdgeField % dimSizes(1) = 2
+ cellsOnEdgeField % dimSizes(1) = nReadEdges
+ cellsOnEdgeField % block => readingBlock
+ nullify(cellsOnEdgeField)
+
! Global indices of cells adjacent to each vertex
! used for determining which vertices are owned by a block, where
! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
+ allocate(cellsOnVertexField)
allocate(cellsOnVertexField % ioinfo)
cellsOnVertexField % ioinfo % fieldName = 'cellsOnVertex'
cellsOnVertexField % ioinfo % start(1) = 1
@@ -429,6 +521,10 @@
call MPAS_io_inq_var(inputHandle, 'cellsOnVertex', ierr=ierr)
call MPAS_io_set_var_indices(inputHandle, 'cellsOnVertex', readIndices, ierr=ierr)
call mpas_io_get_var(inputHandle, 'cellsOnVertex', cellsOnVertexField % array, ierr)
+ cellsOnVertexField % dimSizes(1) = vertexDegree
+ cellsOnVertexField % dimSizes(2) = nREadVertices
+ cellsOnVertexField % block => readingBlock
+ nullify(cellsOnVertexField)
deallocate(readIndices)
@@ -458,758 +554,864 @@
! Determine which cells are owned by this process
+ write(6,*) 'Getting decomp'
call mpas_block_decomp_cells_for_proc(domain % dminfo, partial_global_graph_info, local_cell_list, block_id, block_start, block_count)
deallocate(partial_global_graph_info % vertexID)
deallocate(partial_global_graph_info % nAdjacent)
deallocate(partial_global_graph_info % adjacencyList)
-
-
- allocate(indexToCellID_0Halo(size(local_cell_list)))
- allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
- allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xCell(size(local_cell_list)))
- allocate(yCell(size(local_cell_list)))
- allocate(zCell(size(local_cell_list)))
-#endif
-#endif
-
- !
- ! Now that each process has a list of cells that it owns, exchange cell connectivity
- ! information between the processes that read info for a cell and those that own that cell
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- indexToCellIDField % array, local_cell_list, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
- size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
- size(xCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
- size(yCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
- size(zCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-#endif
-#endif
+ write(6,*) 'Setting up blocks'
+ nBlocksLocal = size(block_id)
+ call mpas_dmpar_max_int(domain % dminfo, nBlocksLocal, nBlocksMax)
- deallocate(sendCellList % list)
- deallocate(sendCellList)
- deallocate(recvCellList % list)
- deallocate(recvCellList)
+ allocate(indexToCellID_0Halo)
+ allocate(domain % blocklist)
+ block_ptr => domain % blocklist
+ int1d_ptr => indexToCellID_0Halo
+ do iBlock = 1, nBlocksLocal
+ nCellsInBlock = block_count(iBlock)
+ block_ptr % blockID = block_id(iBlock)
+ block_ptr % localBlockID = iBlock-1
- !
- ! Build a graph of cell connectivity based on cells owned by this process
- !
- block_graph_0Halo % nVerticesTotal = size(local_cell_list)
- block_graph_0Halo % nVertices = size(local_cell_list)
- block_graph_0Halo % maxDegree = maxEdges
- block_graph_0Halo % ghostStart = size(local_cell_list) + 1
- allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
- allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
- allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
-
- block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
- block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
- block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
-
- ! Get back a graph describing the owned cells plus the cells in the 1-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
-
-
- !
- ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
-
- !
- ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
- !
- block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
- block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
-
- ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
- call mpas_block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
-
- block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
- block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
+ int1d_ptr % block => block_ptr
+ int1d_ptr % dimSizes(1) = nCellsInBlock
+ write(6,*) 'indexToCellID_0Halo dimSizes(1) = ', int1d_ptr % dimSizes(1)
+ allocate(int1d_ptr % array(nCellsInBlock))
+ do i = 1, nCellsInBlock
+ int1d_ptr % array(i) = local_cell_list(block_start(iBlock) + i)
+ end do
- nOwnCells = block_graph_2Halo % nVertices
+ if(iBlock < nBlocksLocal) then
+ allocate(block_ptr % next)
+ allocate(int1d_ptr % next)
+ block_ptr % next % prev => block_ptr
+ block_ptr => block_ptr % next
+ int1d_ptr => int1d_ptr % next
+ else
+ nullify(block_ptr % next)
+ nullify(int1d_ptr % next)
+ end if
+ end do
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- !! For now, only use Zoltan with MPI
- !! Zoltan initialization
- call mpas_zoltan_start()
+ write(6,*) 'Setting up other arrays'
- !! Zoltan hook for cells
- call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
-#endif
-#endif
+ allocate(nEdgesOnCell_0Halo)
+ allocate(cellsOnCell_0Halo)
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
+ block_ptr => domain % blocklist
+ int1d_ptr => nEdgesOnCell_0Halo
+ int2d_ptr => cellsOnCell_0Halo
+ do while(associated(block_ptr))
+ nullify(int1d_ptr % next)
+ nullify(int2d_ptr % next)
-
- !
- ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
- ! on each cell and which vertices are on each cell from the processes that read these
- ! fields for each cell to the processes that own the cells
- !
- allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
- allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
- allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
- indexToCellIDField % array, block_graph_2Halo % vertexID, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
+ nCellsInBlock = block_count(block_ptr % localBlockID + 1)
- call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
+ allocate(int1d_ptr % array(nCellsInBlock))
+ allocate(int2d_ptr % array(maxEdges, nCellsInBlock))
-
- !
- ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
- !
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- edgesOnCell_2Halo, nlocal_edges, local_edge_list)
- call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-
-
- !
- ! Work out which edges and vertices are owned by this process, and which are ghost
- !
- allocate(cellsOnEdge_2Halo(2,nlocal_edges))
- allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
- 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
- vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
- sendVertexList, recvVertexList)
-
-
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
+ int1d_ptr % block => block_ptr
+ int2d_ptr % block => block_ptr
+ int1d_ptr % dimSizes(1) = nCellsInBlock
+ int2d_ptr % dimSizes(1) = maxEdges
+ int2d_ptr % dimSizes(2) = nCellsInBlock
- call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
- block_graph_2Halo % vertexID(1:nOwnCells), &
- vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
-
- !------- set owned and halo cell indices -------!
-
- nCellsCumulative(1) = nOwnCells
- nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
- nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
-
- !------- determin the perimeter and owned edges of own cells and halos -------!
-
- nOwnEdges = ghostEdgeStart-1
- nOwnVertices = ghostVertexStart-1
-
- ! skip the own edges found at the beginning of local_edge_list
- call mpas_hash_init(edgeHash)
- do i=1,nOwnEdges
- call mpas_hash_insert(edgeHash, local_edge_list(i))
+ block_ptr => block_ptr % next
+ if(associated(block_ptr)) then
+ allocate(int1d_ptr % next)
+ allocate(int2d_ptr % next)
+ int1d_ptr => int1d_ptr % next
+ int2d_ptr => int2d_ptr % next
+ end if
end do
- ! skip the own vertices found at the beginning of local_vertex_list
- call mpas_hash_init(vertexHash)
- do i=1,nOwnVertices
- call mpas_hash_insert(vertexHash, local_vertex_list(i))
- end do
+ write(6,*) 'Getting exchange lists'
+ call mpas_get_exchange_lists(domain % dminfo, indexToCellIDField, .false., indexToCellID_0Halo, .true.)
- cellCount = 1 !tracks the index of the local cell array
- edgeCount = nOwnEdges !tracks where to insert the next local edge
- vertexCount = nOwnVertices !tracks where to insert the next local vertex
+ write(6,*) 'Linking exchange lists 1'
+ call mpas_link_exchange_list_field(indexToCellIDField, indexToCellID_0Halo, nEdgesOnCellField, nEdgesOnCell_0Halo)
+ write(6,*) 'Linking exchange lists 2'
+ call mpas_link_exchange_list_field(indexToCellIDField, indexToCellID_0Halo, cellsOnCellField, cellsOnCell_0Halo)
- nEdgesCumulative(1) = nOwnEdges
- nVerticesCumulative(1) = nOwnVertices
+
+ write(6,*) 'All to all 1'
+ call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField, indexToCellID_0Halo)
+ write(6,*) 'All to all 2'
+ call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField, nEdgesOnCell_0Halo)
+ write(6,*) 'All to all 3'
+ call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField, cellsOnCell_0Halo)
- !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
- do i = 1, nHalos + 1 ! for the own cells and each halo...
- do j = cellCount, nCellsCumulative(i)
-
- ! the number of edges on a cell is same to the number of vertices, and therefore
- ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
- do k = 1, nEdgesOnCell_2Halo(j)
- iEdge = edgesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(edgeHash, iEdge)) then
- edgeCount = edgeCount + 1
- local_edge_list(edgeCount) = iEdge
- call mpas_hash_insert(edgeHash, iEdge)
- end if
-
- iVertex = verticesOnCell_2Halo(k,j)
- if (.not. mpas_hash_search(vertexHash, iVertex)) then
- vertexCount = vertexCount + 1
- local_vertex_list(vertexCount) = iVertex
- call mpas_hash_insert(vertexHash, iVertex)
- end if
- end do
-
- end do
-
- cellCount = nCellsCumulative(i) + 1
- nEdgesCumulative(i+1) = edgeCount
- nVerticesCumulative(i+1) = vertexCount
+ write(6,*) 'coc field'
+ int2d_ptr => cellsOnCellField
+ do while(associated(int2d_ptr))
+ do i = 1, int2d_ptr % dimSizes(2)
+ write(6,*) int2d_ptr % array(:,i)
+ end do
+ int2d_ptr => int2d_ptr % next
end do
- do i = 1, nHalos
- nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
+ write(6,*) 'coc 0halo'
+ int2d_ptr => cellsOnCell_0Halo
+ do while(associated(int2d_ptr))
+ do i = 1, int2d_ptr % dimSizes(2)
+ write(6,*) int2d_ptr % array(:,i)
+ end do
+ int2d_ptr => int2d_ptr % next
end do
- do i = 1, nHalos + 1
- nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
- end do
+ write(6,*) 'Stopping'
- do i = 1, nHalos + 1
- nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
- end do
-
- call mpas_hash_destroy(edgeHash)
- call mpas_hash_destroy(vertexHash)
-
-
- ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
- ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
-
- ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
- ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
-
- ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
- ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
- ! contains all of the ghost cells
-
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
+ stop
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
+! allocate(indexToCellID_0Halo(size(local_cell_list)))
+! allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
+! allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
#ifdef HAVE_ZOLTAN
#ifdef _MPI
- allocate(xEdge(nlocal_edges))
- allocate(yEdge(nlocal_edges))
- allocate(zEdge(nlocal_edges))
- allocate(xVertex(nlocal_vertices))
- allocate(yVertex(nlocal_vertices))
- allocate(zVertex(nlocal_vertices))
+ allocate(xCell(size(local_cell_list)))
+ allocate(yCell(size(local_cell_list)))
+ allocate(zCell(size(local_cell_list)))
#endif
#endif
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
- size(xEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
- size(yEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
- size(zEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
- size(xVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
- size(yVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
- size(zVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- !!!!!!!!!!!!!!!!!!
- !! Reorder edges
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
- !!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!
- !! Reorder vertices
- !!!!!!!!!!!!!!!!!!
- call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
- !!!!!!!!!!!!!!!!!!
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
!
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
+! ! Now that each process has a list of cells that it owns, exchange cell connectivity
+! ! information between the processes that read info for a cell and those that own that cell
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToCellIDField % array), size(local_cell_list), &
+! indexToCellIDField % array, local_cell_list, &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
+! size(indexToCellIDField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
+! size(indexToCellIDField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
+! size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
+! size(xCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
+! size(yCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
+! size(zCellField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!#endif
+!#endif
+!
+!
+! deallocate(sendCellList % list)
+! deallocate(sendCellList)
+! deallocate(recvCellList % list)
+! deallocate(recvCellList)
+!
+!
+!
+! !
+! ! Build a graph of cell connectivity based on cells owned by this process
+! !
+! block_graph_0Halo % nVerticesTotal = size(local_cell_list)
+! block_graph_0Halo % nVertices = size(local_cell_list)
+! block_graph_0Halo % maxDegree = maxEdges
+! block_graph_0Halo % ghostStart = size(local_cell_list) + 1
+! allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
+! allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
+! allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
+!
+! block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
+! block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
+! block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
+!
+! ! Get back a graph describing the owned cells plus the cells in the 1-halo
+! call mpas_block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
+!
+!
+! !
+! ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+! block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
+! send1Halo, recv1Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
+! block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+! send1Halo, recv1Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
+! block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+! send1Halo, recv1Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
+! block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+! send1Halo, recv1Halo)
+!
+!
+! !
+! ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
+! !
+! block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
+! block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
+!
+! ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
+! call mpas_block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
+!
+! block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
+! block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
+!
+! nOwnCells = block_graph_2Halo % nVertices
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! !! For now, only use Zoltan with MPI
+! !! Zoltan initialization
+! call mpas_zoltan_start()
+!
+! !! Zoltan hook for cells
+! call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
+!#endif
+!#endif
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+! block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
+! send2Halo, recv2Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
+! block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+! send2Halo, recv2Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
+! block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+! send2Halo, recv2Halo)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
+! block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+! send2Halo, recv2Halo)
+!
+!
+!
+! !
+! ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
+! ! on each cell and which vertices are on each cell from the processes that read these
+! ! fields for each cell to the processes that own the cells
+! !
+! allocate(nEdgesOnCell_2Halo(block_graph_2Halo % nVerticesTotal))
+! allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
+! allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
+! indexToCellIDField % array, block_graph_2Halo % vertexID, &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_2Halo, &
+! size(indexToCellIDField % array), size(local_cell_list), &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
+! maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
+! sendCellList, recvCellList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
+! maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
+! sendCellList, recvCellList)
+!
+!
+! !
+! ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
+! !
+! call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
+! edgesOnCell_2Halo, nlocal_edges, local_edge_list)
+! call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
+! verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToEdgeIDField % array), nlocal_edges, &
+! indexToEdgeIDField % array, local_edge_list, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToVertexIDField % array), nlocal_vertices, &
+! indexToVertexIDField % array, local_vertex_list, &
+! sendVertexList, recvVertexList)
+!
+!
+!
+! !
+! ! Work out which edges and vertices are owned by this process, and which are ghost
+! !
+! allocate(cellsOnEdge_2Halo(2,nlocal_edges))
+! allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
+! 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
+! vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+!
+!
+! call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+! block_graph_2Halo % vertexID(1:nOwnCells), &
+! 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
+!
+! call mpas_block_decomp_partitioned_edge_list(nOwnCells, &
+! block_graph_2Halo % vertexID(1:nOwnCells), &
+! vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
+!
+! !------- set owned and halo cell indices -------!
+!
+! nCellsCumulative(1) = nOwnCells
+! nCellsCumulative(2) = block_graph_1Halo % nVerticesTotal
+! nCellsCumulative(3) = block_graph_2Halo % nVerticesTotal
+!
+! !------- determin the perimeter and owned edges of own cells and halos -------!
+!
+! nOwnEdges = ghostEdgeStart-1
+! nOwnVertices = ghostVertexStart-1
+!
+! ! skip the own edges found at the beginning of local_edge_list
+! call mpas_hash_init(edgeHash)
+! do i=1,nOwnEdges
+! call mpas_hash_insert(edgeHash, local_edge_list(i))
+! end do
+!
+! ! skip the own vertices found at the beginning of local_vertex_list
+! call mpas_hash_init(vertexHash)
+! do i=1,nOwnVertices
+! call mpas_hash_insert(vertexHash, local_vertex_list(i))
+! end do
+!
+! cellCount = 1 !tracks the index of the local cell array
+! edgeCount = nOwnEdges !tracks where to insert the next local edge
+! vertexCount = nOwnVertices !tracks where to insert the next local vertex
+!
+! nEdgesCumulative(1) = nOwnEdges
+! nVerticesCumulative(1) = nOwnVertices
+!
+! !Order the local_edge_list and local_vertex_list accordingly and set the bounds of each perimeter ----
+! do i = 1, nHalos + 1 ! for the own cells and each halo...
+! do j = cellCount, nCellsCumulative(i)
+!
+! ! the number of edges on a cell is same to the number of vertices, and therefore
+! ! nEdgesOnCell_2Halo(j) will be the correct upper bound for both both edges and vertices on cell
+! do k = 1, nEdgesOnCell_2Halo(j)
+! iEdge = edgesOnCell_2Halo(k,j)
+! if (.not. mpas_hash_search(edgeHash, iEdge)) then
+! edgeCount = edgeCount + 1
+! local_edge_list(edgeCount) = iEdge
+! call mpas_hash_insert(edgeHash, iEdge)
+! end if
+!
+! iVertex = verticesOnCell_2Halo(k,j)
+! if (.not. mpas_hash_search(vertexHash, iVertex)) then
+! vertexCount = vertexCount + 1
+! local_vertex_list(vertexCount) = iVertex
+! call mpas_hash_insert(vertexHash, iVertex)
+! end if
+! end do
+!
+! end do
+!
+! cellCount = nCellsCumulative(i) + 1
+! nEdgesCumulative(i+1) = edgeCount
+! nVerticesCumulative(i+1) = vertexCount
+! end do
+!
+! do i = 1, nHalos
+! nCellsHalo(i) = nCellsCumulative(i+1) - nCellsCumulative(i)
+! end do
+!
+! do i = 1, nHalos + 1
+! nEdgesHalo(i) = nEdgesCumulative(i+1) - nEdgesCumulative(i)
+! end do
+!
+! do i = 1, nHalos + 1
+! nVerticesHalo(i) = nVerticesCumulative(i+1) - nVerticesCumulative(i)
+! end do
+!
+! call mpas_hash_destroy(edgeHash)
+! call mpas_hash_destroy(vertexHash)
+!
+!
+! ! At this point, local_edge_list(1:nOwnEdges) contains all of the owned edges for this block
+! ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
+!
+! ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
+! ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
+!
+! ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
+! ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
+! ! contains all of the ghost cells
+!
+!
+! deallocate(sendEdgeList % list)
+! deallocate(sendEdgeList)
+! deallocate(recvEdgeList % list)
+! deallocate(recvEdgeList)
+!
+! deallocate(sendVertexList % list)
+! deallocate(sendVertexList)
+! deallocate(recvVertexList % list)
+! deallocate(recvVertexList)
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! allocate(xEdge(nlocal_edges))
+! allocate(yEdge(nlocal_edges))
+! allocate(zEdge(nlocal_edges))
+! allocate(xVertex(nlocal_vertices))
+! allocate(yVertex(nlocal_vertices))
+! allocate(zVertex(nlocal_vertices))
+!#endif
+!#endif
+!
+! !
+! ! Knowing which edges/vertices are owned by this block and which are actually read
+! ! from the input or restart file, we can build exchange lists to perform
+! ! all-to-all field exchanges from process that reads a field to the processes that
+! ! need them
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToEdgeIDField % array), nlocal_edges, &
+! indexToEdgeIDField % array, local_edge_list, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToVertexIDField % array), nlocal_vertices, &
+! indexToVertexIDField % array, local_vertex_list, &
+! sendVertexList, recvVertexList)
+!
+!#ifdef HAVE_ZOLTAN
+!#ifdef _MPI
+! call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
+! size(xEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
+! size(yEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
+! size(zEdgeField % array), nlocal_edges, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
+! size(xVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
+! size(yVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
+! size(zVertexField % array), nlocal_vertices, &
+! sendVertexList, recvVertexList)
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder edges
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_edges(nOwnEdges,local_edge_list,3,xEdge,yEdge,zEdge)
+! !!!!!!!!!!!!!!!!!!
+!
+! !!!!!!!!!!!!!!!!!!
+! !! Reorder vertices
+! !!!!!!!!!!!!!!!!!!
+! call mpas_zoltan_order_loc_hsfc_verts(nOwnVertices,local_vertex_list,3,xVertex,yVertex,zVertex)
+! !!!!!!!!!!!!!!!!!!
+!
+! deallocate(sendEdgeList % list)
+! deallocate(sendEdgeList)
+! deallocate(recvEdgeList % list)
+! deallocate(recvEdgeList)
+!
+! deallocate(sendVertexList % list)
+! deallocate(sendVertexList)
+! deallocate(recvVertexList % list)
+! deallocate(recvVertexList)
+!
+! !
+! ! Knowing which edges/vertices are owned by this block and which are actually read
+! ! from the input or restart file, we can build exchange lists to perform
+! ! all-to-all field exchanges from process that reads a field to the processes that
+! ! need them
+! !
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToEdgeIDField % array), nlocal_edges, &
+! indexToEdgeIDField % array, local_edge_list, &
+! sendEdgeList, recvEdgeList)
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(indexToVertexIDField % array), nlocal_vertices, &
+! indexToVertexIDField % array, local_vertex_list, &
+! sendVertexList, recvVertexList)
+!
+!#endif
+!#endif
+!
+! !
+! ! Build ownership and exchange lists for vertical levels
+! ! Essentially, process 0 owns all vertical levels when reading and writing,
+! ! and it distributes them or gathers them to/from all other processes
+! !
+! if (domain % dminfo % my_proc_id == 0) then
+! allocate(local_vertlevel_list(nVertLevels))
+! do i=1,nVertLevels
+! local_vertlevel_list(i) = i
+! end do
+! else
+! allocate(local_vertlevel_list(0))
+! end if
+! allocate(needed_vertlevel_list(nVertLevels))
+! do i=1,nVertLevels
+! needed_vertlevel_list(i) = i
+! end do
+!
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! size(local_vertlevel_list), size(needed_vertlevel_list), &
+! local_vertlevel_list, needed_vertlevel_list, &
+! sendVertLevelList, recvVertLevelList)
+!
+! deallocate(local_vertlevel_list)
+! deallocate(needed_vertlevel_list)
+!
+!
+! !
+! ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
+! !
+! allocate(domain % blocklist)
+!
+! nCells = block_graph_2Halo % nVerticesTotal
+! nEdges = nlocal_edges
+! nVertices = nlocal_vertices
+!
+! call mpas_allocate_block(domain % blocklist, domain, domain%dminfo%my_proc_id, &
+!#include "dim_dummy_args.inc"
+! )
+!
+!!!!!!!!!!!MGD HERE WE NEED TO READ IN indexTo*ID fields !!!!!!!!!!!!!!!!!
+! call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
+! call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', local_cell_list(1:nOwnCells), ierr=ierr)
+! call mpas_io_get_var(inputHandle, 'indexToCellID', domain % blocklist % mesh % indexToCellID % array, ierr)
+!
+! call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
+! call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', local_edge_list(1:nOwnEdges), ierr=ierr)
+! call mpas_io_get_var(inputHandle, 'indexToEdgeID', domain % blocklist % mesh % indexToEdgeID % array, ierr)
+!
+! call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
+! call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', local_vertex_list(1:nOwnVertices), ierr=ierr)
+! call mpas_io_get_var(inputHandle, 'indexToVertexID', domain % blocklist % mesh % indexToVertexID % array, ierr)
+!
+! domain % blocklist % mesh % nCellsSolve = nOwnCells
+! domain % blocklist % mesh % nEdgesSolve = nOwnEdges
+! domain % blocklist % mesh % nVerticesSolve = nOwnVertices
+! domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
+!
+! call mpas_io_input_init(input_obj, domain % blocklist, domain % dminfo)
+!
+!
+! !
+! ! Read attributes
+! !
+! call MPAS_readStreamAtt(input_obj % io_stream, 'sphere_radius', r_sphere_radius, ierr)
+! if (ierr /= MPAS_STREAM_NOERR) then
+! write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
+! write(0,*) ' Setting sphere_radius to 1.0'
+! domain % blocklist % mesh % sphere_radius = 1.0
+! else
+! domain % blocklist % mesh % sphere_radius = r_sphere_radius
+! end if
+!
+! call MPAS_readStreamAtt(input_obj % io_stream, 'on_a_sphere', c_on_a_sphere, ierr)
+! if (ierr /= MPAS_STREAM_NOERR) then
+! write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
+! write(0,*) ' Setting on_a_sphere to ''YES'''
+! domain % blocklist % mesh % on_a_sphere = .true.
+! else
+! if (index(c_on_a_sphere, 'YES') /= 0) then
+! domain % blocklist % mesh % on_a_sphere = .true.
+! else
+! domain % blocklist % mesh % on_a_sphere = .false.
+! end if
+! end if
+!
+! if (.not. config_do_restart) then
+! input_obj % time = 1
+! else
+! !
+! ! If doing a restart, we need to decide which time slice to read from the
+! ! restart file
+! !
+! input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
+! if (ierr == MPAS_IO_ERR) then
+! write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
+! call mpas_dmpar_abort(domain % dminfo)
+! end if
+!write(0,*) 'MGD DEBUGGING time = ', input_obj % time
+! write(0,*) 'Restarting model from time ', timeStamp
+!
+! end if
+!
+!
+! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! ! Do the actual work of reading all fields in from the input or restart file
+! ! For each field:
+! ! 1) Each process reads a contiguous range of cell/edge/vertex indices, which
+! ! may not correspond with the cells/edges/vertices that are owned by the
+! ! process
+! ! 2) All processes then send the global indices that were read to the
+! ! processes that own those indices based on
+! ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
+! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! call mpas_read_and_distribute_fields(input_obj)
+!
+! call mpas_io_input_finalize(input_obj, domain % dminfo)
+!
+! call MPAS_io_close(inputHandle, ierr)
+!
+!
+! !
+! ! Work out halo exchange lists for cells, edges, and vertices
+! ! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
+! ! the list, since Fortran does not allow arrays of pointers
+! !
+!
+! !--------- Create Cell Exchange Lists ---------!
+!
+! ! pass in neededList of ownedCells and halo layer 1 cells
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnCells, nCellsCumulative(2), &
+! block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
+! domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
+!
+! ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
+! offset = nCellsHalo(1)
+! nTempIDs = nOwnCells + nCellsHalo(2)
+! allocate(tempIDs(nTempIDs))
+! tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
+! tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnCells, nTempIDs, &
+! block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
+! domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
+! offset)
+! deallocate(tempIDs)
+!
+!
+! !--------- Create Edge Exchange Lists ---------!
+!
+! ! pass in neededList of ownedEdges and ownedCell perimeter edges
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnEdges, nEdgesCumulative(2), &
+! local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
+! domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
+!
+! ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
+! offset = nEdgesHalo(1)
+! nTempIDs = nOwnEdges + nEdgesHalo(2)
+! allocate(tempIDs(nTempIDs))
+! tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+! tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnEdges, nTempIDs, &
+! local_edge_list(1:nOwnEdges), tempIDs, &
+! domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
+! offset)
+! deallocate(tempIDs)
+!
+! ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
+! offset = nEdgesHalo(1) + nEdgesHalo(2)
+! nTempIDs = nOwnEdges + nEdgesHalo(3)
+! allocate(tempIDs(nTempIDs))
+! tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
+! tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnEdges, nTempIDs, &
+! local_edge_list(1:nOwnEdges), tempIDs, &
+! domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
+! offset)
+! deallocate(tempIDs)
+!
+!
+! !--------- Create Vertex Exchange Lists ---------!
+!
+!
+! ! pass in neededList of ownedVertices and ownedCell perimeter vertices
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnVertices, nVerticesCumulative(2), &
+! local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
+! domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
+!
+! ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
+! offset = nVerticesHalo(1)
+! nTempIDs = nOwnVertices + nVerticesHalo(2)
+! allocate(tempIDs(nTempIDs))
+! tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+! tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnVertices, nTempIDs, &
+! local_vertex_list(1:nOwnVertices), tempIDs, &
+! domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
+! offset)
+! deallocate(tempIDs)
+!
+! ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
+! offset = nVerticesHalo(1) + nVerticesHalo(2)
+! nTempIDs = nOwnVertices + nVerticesHalo(3)
+! allocate(tempIDs(nTempIDs))
+! tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
+! tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
+! call mpas_dmpar_get_owner_list(domain % dminfo, &
+! nOwnVertices, nTempIDs, &
+! local_vertex_list(1:nOwnVertices), tempIDs, &
+! domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
+! offset)
+! deallocate(tempIDs)
+!
+!
+! domain % blocklist % mesh % nCellsSolve = nOwnCells
+! domain % blocklist % mesh % nEdgesSolve = nOwnEdges
+! domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
+! domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
+!
+! ! Link the sendList and recvList pointers in each field type to the appropriate lists
+! ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
+! ! link blocks of fields to eachother
+! call mpas_create_field_links(domain % blocklist)
+!
+!
+! !
+! ! Exchange halos for all of the fields that were read from the input file
+! !
+! call mpas_exch_input_field_halos(domain, input_obj)
+!
+!
+! !
+! ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
+! !
+! allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
+! allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
+! allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
+!
+! do i=1,domain % blocklist % mesh % nCells
+! cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
+! cellIDSorted(2,i) = i
+! end do
+! call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
+!
+! do i=1,domain % blocklist % mesh % nEdges
+! edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
+! edgeIDSorted(2,i) = i
+! end do
+! call quicksort(nlocal_edges, edgeIDSorted)
+!
+! do i=1,domain % blocklist % mesh % nVertices
+! vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
+! vertexIDSorted(2,i) = i
+! end do
+! call quicksort(nlocal_vertices, vertexIDSorted)
+!
+!
+! do i=1,domain % blocklist % mesh % nCells
+! do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
+!
+! k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+! domain % blocklist % mesh % cellsOnCell % array(j,i))
+! if (k <= domain % blocklist % mesh % nCells) then
+! domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
+! else
+! domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
+! end if
+!
+! k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+! domain % blocklist % mesh % edgesOnCell % array(j,i))
+! if (k <= domain % blocklist % mesh % nEdges) then
+! domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
+! else
+! domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! end if
+!
+! k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
+! domain % blocklist % mesh % verticesOnCell % array(j,i))
+! if (k <= domain % blocklist % mesh % nVertices) then
+! domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
+! else
+! domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
+! end if
+!
+! end do
+! end do
+!
+! do i=1,domain % blocklist % mesh % nEdges
+! do j=1,2
+!
+! k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+! domain % blocklist % mesh % cellsOnEdge % array(j,i))
+! if (k <= domain % blocklist % mesh % nCells) then
+! domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
+! else
+! domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
+! end if
+!
+! k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
+! domain % blocklist % mesh % verticesOnEdge % array(j,i))
+! if (k <= domain % blocklist % mesh % nVertices) then
+! domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
+! else
+! domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
+! end if
+!
+! end do
+!
+! do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
+!
+! k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+! domain % blocklist % mesh % edgesOnEdge % array(j,i))
+! if (k <= domain % blocklist % mesh % nEdges) then
+! domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
+! else
+! domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! end if
+!
+! end do
+! end do
+!
+! do i=1,domain % blocklist % mesh % nVertices
+! do j=1,vertexDegree
+!
+! k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+! domain % blocklist % mesh % cellsOnVertex % array(j,i))
+! if (k <= domain % blocklist % mesh % nCells) then
+! domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
+! else
+! domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
+! end if
+!
+! k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+! domain % blocklist % mesh % edgesOnVertex % array(j,i))
+! if (k <= domain % blocklist % mesh % nEdges) then
+! domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
+! else
+! domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! end if
+!
+! end do
+! end do
-#endif
-#endif
-
- !
- ! Build ownership and exchange lists for vertical levels
- ! Essentially, process 0 owns all vertical levels when reading and writing,
- ! and it distributes them or gathers them to/from all other processes
- !
- if (domain % dminfo % my_proc_id == 0) then
- allocate(local_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- local_vertlevel_list(i) = i
- end do
- else
- allocate(local_vertlevel_list(0))
- end if
- allocate(needed_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- needed_vertlevel_list(i) = i
- end do
-
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- size(local_vertlevel_list), size(needed_vertlevel_list), &
- local_vertlevel_list, needed_vertlevel_list, &
- sendVertLevelList, recvVertLevelList)
-
- deallocate(local_vertlevel_list)
- deallocate(needed_vertlevel_list)
-
-
- !
- ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
- !
- allocate(domain % blocklist)
-
- nCells = block_graph_2Halo % nVerticesTotal
- nEdges = nlocal_edges
- nVertices = nlocal_vertices
-
- call mpas_allocate_block(domain % blocklist, domain, domain%dminfo%my_proc_id, &
-#include "dim_dummy_args.inc"
- )
-
-!!!!!!!!!!MGD HERE WE NEED TO READ IN indexTo*ID fields !!!!!!!!!!!!!!!!!
- call MPAS_io_inq_var(inputHandle, 'indexToCellID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToCellID', local_cell_list(1:nOwnCells), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToCellID', domain % blocklist % mesh % indexToCellID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToEdgeID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToEdgeID', local_edge_list(1:nOwnEdges), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToEdgeID', domain % blocklist % mesh % indexToEdgeID % array, ierr)
-
- call MPAS_io_inq_var(inputHandle, 'indexToVertexID', ierr=ierr)
- call MPAS_io_set_var_indices(inputHandle, 'indexToVertexID', local_vertex_list(1:nOwnVertices), ierr=ierr)
- call mpas_io_get_var(inputHandle, 'indexToVertexID', domain % blocklist % mesh % indexToVertexID % array, ierr)
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = nOwnVertices
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
- call mpas_io_input_init(input_obj, domain % blocklist, domain % dminfo)
-
-
- !
- ! Read attributes
- !
- call MPAS_readStreamAtt(input_obj % io_stream, 'sphere_radius', r_sphere_radius, ierr)
- if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute sphere_radius not found in '//trim(input_obj % filename)
- write(0,*) ' Setting sphere_radius to 1.0'
- domain % blocklist % mesh % sphere_radius = 1.0
- else
- domain % blocklist % mesh % sphere_radius = r_sphere_radius
- end if
-
- call MPAS_readStreamAtt(input_obj % io_stream, 'on_a_sphere', c_on_a_sphere, ierr)
- if (ierr /= MPAS_STREAM_NOERR) then
- write(0,*) 'Warning: Attribute on_a_sphere not found in '//trim(input_obj % filename)
- write(0,*) ' Setting on_a_sphere to ''YES'''
- domain % blocklist % mesh % on_a_sphere = .true.
- else
- if (index(c_on_a_sphere, 'YES') /= 0) then
- domain % blocklist % mesh % on_a_sphere = .true.
- else
- domain % blocklist % mesh % on_a_sphere = .false.
- end if
- end if
-
- if (.not. config_do_restart) then
- input_obj % time = 1
- else
- !
- ! If doing a restart, we need to decide which time slice to read from the
- ! restart file
- !
- input_obj % time = MPAS_seekStream(input_obj % io_stream, config_start_time, MPAS_STREAM_EXACT_TIME, timeStamp, ierr)
- if (ierr == MPAS_IO_ERR) then
- write(0,*) 'Error: restart file '//trim(filename)//' did not contain time '//trim(config_start_time)
- call mpas_dmpar_abort(domain % dminfo)
- end if
-write(0,*) 'MGD DEBUGGING time = ', input_obj % time
- write(0,*) 'Restarting model from time ', timeStamp
-
- end if
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Do the actual work of reading all fields in from the input or restart file
- ! For each field:
- ! 1) Each process reads a contiguous range of cell/edge/vertex indices, which
- ! may not correspond with the cells/edges/vertices that are owned by the
- ! process
- ! 2) All processes then send the global indices that were read to the
- ! processes that own those indices based on
- ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- call mpas_read_and_distribute_fields(input_obj)
-
- call mpas_io_input_finalize(input_obj, domain % dminfo)
-
- call MPAS_io_close(inputHandle, ierr)
-
-
- !
- ! Work out halo exchange lists for cells, edges, and vertices
- ! NB: The next pointer in each element of, e.g., cellsToSend, acts as the head pointer of
- ! the list, since Fortran does not allow arrays of pointers
- !
-
- !--------- Create Cell Exchange Lists ---------!
-
- ! pass in neededList of ownedCells and halo layer 1 cells
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nCellsCumulative(2), &
- block_graph_2Halo % vertexID(1:nOwnCells), block_graph_2Halo % vertexID(1 : nCellsCumulative(2)), &
- domain % blocklist % parinfo % cellsToSend(1) % next, domain % blocklist % parinfo % cellsToRecv(1) % next)
-
- ! pass in neededList of ownedCells and halo layer 2 cells; offset of number of halo 1 cells is required
- offset = nCellsHalo(1)
- nTempIDs = nOwnCells + nCellsHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnCells) = block_graph_2Halo % vertexID(1:nOwnCells)
- tempIDs(nOwnCells+1:nTempIDs) = block_graph_2Halo % vertexID(nCellsCumulative(2)+1 : nCellsCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnCells, nTempIDs, &
- block_graph_2Halo % vertexID(1:nOwnCells), tempIDs, &
- domain % blocklist % parinfo % cellsToSend(2) % next, domain % blocklist % parinfo % cellsToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Edge Exchange Lists ---------!
-
- ! pass in neededList of ownedEdges and ownedCell perimeter edges
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nEdgesCumulative(2), &
- local_edge_list(1:nOwnEdges), local_edge_list(1 : nEdgesCumulative(2)), &
- domain % blocklist % parinfo % edgesToSend(1) % next, domain % blocklist % parinfo % edgesToRecv(1) % next)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 1 cells; offset of number of ownedCell perimeter edges is required
- offset = nEdgesHalo(1)
- nTempIDs = nOwnEdges + nEdgesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(2)+1 : nEdgesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(2) % next, domain % blocklist % parinfo % edgesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned edges and yet-to-be-included edges from halo 2 cells; offset of number of ownedCell perimeter edges and halo 1 edges is required
- offset = nEdgesHalo(1) + nEdgesHalo(2)
- nTempIDs = nOwnEdges + nEdgesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnEdges) = local_edge_list(1:nOwnEdges)
- tempIDs(nOwnEdges+1:nTempIDs) = local_edge_list(nEdgesCumulative(3)+1 : nEdgesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnEdges, nTempIDs, &
- local_edge_list(1:nOwnEdges), tempIDs, &
- domain % blocklist % parinfo % edgesToSend(3) % next, domain % blocklist % parinfo % edgesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- !--------- Create Vertex Exchange Lists ---------!
-
-
- ! pass in neededList of ownedVertices and ownedCell perimeter vertices
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nVerticesCumulative(2), &
- local_vertex_list(1:nOwnVertices), local_vertex_list(1 : nVerticesCumulative(2)), &
- domain % blocklist % parinfo % verticesToSend(1) % next, domain % blocklist % parinfo % verticesToRecv(1) % next)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 1 cells; offset of number of ownedCell perimeter vertices is required
- offset = nVerticesHalo(1)
- nTempIDs = nOwnVertices + nVerticesHalo(2)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(2)+1 : nVerticesCumulative(3))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(2) % next, domain % blocklist % parinfo % verticesToRecv(2) % next, &
- offset)
- deallocate(tempIDs)
-
- ! pass in neededList of owned vertices and yet-to-be-included vertices from halo 2 cells; offset of number of ownedCell perimeter vertices and halo 1 vertices is required
- offset = nVerticesHalo(1) + nVerticesHalo(2)
- nTempIDs = nOwnVertices + nVerticesHalo(3)
- allocate(tempIDs(nTempIDs))
- tempIDs(1:nOwnVertices) = local_vertex_list(1:nOwnVertices)
- tempIDs(nOwnVertices+1:nTempIDs) = local_vertex_list(nVerticesCumulative(3)+1 : nVerticesCumulative(4))
- call mpas_dmpar_get_owner_list(domain % dminfo, &
- nOwnVertices, nTempIDs, &
- local_vertex_list(1:nOwnVertices), tempIDs, &
- domain % blocklist % parinfo % verticesToSend(3) % next, domain % blocklist % parinfo % verticesToRecv(3) % next, &
- offset)
- deallocate(tempIDs)
-
-
- domain % blocklist % mesh % nCellsSolve = nOwnCells
- domain % blocklist % mesh % nEdgesSolve = nOwnEdges
- domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
- ! Link the sendList and recvList pointers in each field type to the appropriate lists
- ! in parinfo, e.g., cellsToSend and cellsToRecv; in future, it can also be extended to
- ! link blocks of fields to eachother
- call mpas_create_field_links(domain % blocklist)
-
-
- !
- ! Exchange halos for all of the fields that were read from the input file
- !
- call mpas_exch_input_field_halos(domain, input_obj)
-
-
- !
- ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
- !
- allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
- allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
- allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
-
- do i=1,domain % blocklist % mesh % nCells
- cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
- cellIDSorted(2,i) = i
- end do
- call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
-
- do i=1,domain % blocklist % mesh % nEdges
- edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
- edgeIDSorted(2,i) = i
- end do
- call quicksort(nlocal_edges, edgeIDSorted)
-
- do i=1,domain % blocklist % mesh % nVertices
- vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
- vertexIDSorted(2,i) = i
- end do
- call quicksort(nlocal_vertices, vertexIDSorted)
-
-
- do i=1,domain % blocklist % mesh % nCells
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
-
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
-
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nEdges
- do j=1,2
-
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
-
- k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
- end if
-
- end do
-
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
-
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nVertices
- do j=1,vertexDegree
-
- k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
- end if
-
- k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
- end if
-
- end do
- end do
-
deallocate(cellIDSorted)
deallocate(edgeIDSorted)
deallocate(vertexIDSorted)
@@ -1262,11 +1464,10 @@
deallocate(zCell)
#endif
#endif
- end subroutine mpas_input_state_for_domain
+ end subroutine mpas_input_state_for_domain!}}}
-
!CR:TODO: an identical subroutine is found in module_io_output - merge
- subroutine mpas_insert_string_suffix(stream, suffix, filename)
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)!{{{
implicit none
@@ -1289,10 +1490,9 @@
if (filename(i:i) == ':') filename(i:i) = '.'
end do
- end subroutine mpas_insert_string_suffix
+ end subroutine mpas_insert_string_suffix!}}}
-
- subroutine mpas_read_and_distribute_fields(input_obj)
+ subroutine mpas_read_and_distribute_fields(input_obj)!{{{
implicit none
@@ -1304,11 +1504,9 @@
call MPAS_readStream(input_obj % io_stream, 1, ierr)
- end subroutine mpas_read_and_distribute_fields
+ end subroutine mpas_read_and_distribute_fields!}}}
-
-
- subroutine mpas_io_input_init(input_obj, blocklist, dminfo)
+ subroutine mpas_io_input_init(input_obj, blocklist, dminfo)!{{{
implicit none
@@ -1334,10 +1532,9 @@
#include "add_input_fields.inc"
- end subroutine mpas_io_input_init
+ end subroutine mpas_io_input_init!}}}
-
- subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)
+ subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)!{{{
implicit none
@@ -1347,10 +1544,9 @@
!include "get_dimension_by_name.inc"
- end subroutine mpas_io_input_get_dimension
+ end subroutine mpas_io_input_get_dimension!}}}
-
- subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)!{{{
implicit none
@@ -1360,10 +1556,9 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_real
+ end subroutine mpas_io_input_get_att_real!}}}
-
- subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)
+ subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)!{{{
implicit none
@@ -1373,11 +1568,10 @@
integer :: nferr
- end subroutine mpas_io_input_get_att_text
+ end subroutine mpas_io_input_get_att_text!}}}
+ subroutine mpas_exch_input_field_halos(domain, input_obj)!{{{
- subroutine mpas_exch_input_field_halos(domain, input_obj)
-
implicit none
type (domain_type), intent(inout) :: domain
@@ -1385,10 +1579,9 @@
#include "exchange_input_field_halos.inc"
- end subroutine mpas_exch_input_field_halos
+ end subroutine mpas_exch_input_field_halos!}}}
-
- subroutine mpas_io_input_finalize(input_obj, dminfo)
+ subroutine mpas_io_input_finalize(input_obj, dminfo)!{{{
implicit none
@@ -1399,6 +1592,6 @@
call MPAS_closeStream(input_obj % io_stream, nferr)
- end subroutine mpas_io_input_finalize
+ end subroutine mpas_io_input_finalize!}}}
end module mpas_io_input
</font>
</pre>