<p><b>dwj07@fsu.edu</b> 2012-04-24 19:34:37 -0600 (Tue, 24 Apr 2012)</p><p><br>
        -- BRANCH COMMIT --<br>
<br>
        Modifying mpas_io_output.F and mpas_block_decomp.F to support multiple blocks.<br>
        <br>
        These are just example versions, and might not be full final versions.<br>
<br>
        The mpas_io_output changes include mapping blocks of local indices to global, and back to global.<br>
        Right now each conversion is a copy, but that could be changes to a pointer swap.<br>
<br>
        The mpas_block_decomp changes include a new routine to build exchange lists for files, which can<br>
        be built for multiple blocks.<br>
</p><hr noshade><pre><font color="gray">Modified: branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F
===================================================================
--- branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F        2012-04-25 01:23:10 UTC (rev 1803)
+++ branches/omp_blocks/multiple_blocks/src/framework/mpas_block_decomp.F        2012-04-25 01:34:37 UTC (rev 1804)
@@ -1,8 +1,10 @@
module mpas_block_decomp
use mpas_dmpar
+ use mpas_dmpar_types
use mpas_hash
use mpas_sort
+ use mpas_grid_types
type graph
integer :: nVerticesTotal
@@ -395,16 +397,28 @@
integer, intent(out) :: blocks_per_proc !< Output: Number of blocks proc_number computes on
integer :: blocks_per_proc_min, even_blocks, remaining_blocks
+ integer :: i, owning_proc, local_block_id
- blocks_per_proc_min = total_blocks / dminfo % nProcs
- remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
- even_blocks = total_blocks - remaining_blocks
+! if(.not. explicitDecomp) then
+ blocks_per_proc_min = total_blocks / dminfo % nProcs
+ remaining_blocks = total_blocks - (blocks_per_proc_min * dminfo % nProcs)
+ even_blocks = total_blocks - remaining_blocks
- blocks_per_proc = blocks_per_proc_min
+ blocks_per_proc = blocks_per_proc_min
- if(proc_number .le. remaining_blocks) then
- block_per_proc = blocks_per_proc + 1
- endif
+ if(proc_number .le. remaining_blocks) then
+ block_per_proc = blocks_per_proc + 1
+ endif
+! else
+! blocks_per_proc = 0
+! do i = 1, total_blocks
+! call mpas_get_owning_proc(dminfo, i, owning_proc)
+! if(owning_proc == proc_number) then
+! call mpas_get_local_block_id(dminfo, i, local_block_id)
+! blocks_per_proc = integer(max(real(block_per_proc), real(local_block_id)))
+! end if
+! end do
+! end if
end subroutine mpas_get_blocks_per_proc!}}}
@@ -515,4 +529,285 @@
deallocate(block_local_id_list)
end subroutine mpas_finish_block_proc_list!}}}
+ subroutine mpas_get_exchange_lists(dminfo, ownedListField, ownedBlockListField, ownedDecomposed, neededListField, neededBlockListField, neededDecomposed, sendList, recvList)!{{{
+ type (dm_info), intent(in) :: dminfo !< Input: Domain information
+ type (field1dInteger), pointer :: ownedListField !< Input: pointer to the field which contains owned elements for exchange list.
+ type (field1dInteger), pointer :: ownedBlockListField !< Input: pointer to a field which contains block id's for the elements in ownedList.
+ logical, intent(in) :: ownedDecomposed !< Input: logical flag determining if the ownedList is decomposed using block_decomp or not.
+ type (field1dInteger), pointer :: neededListField !< Input: pointer to a field which contains needed elements for exchange list.
+ type (field1dInteger), pointer :: neededBlockListField !< Input: pointer to a field which contains block id's for elements in neededList
+ logical, intent(in) :: neededDecomposed !< Input: logical flag determining if the neededList is decomposed using block_decomp or not.
+ type (exchange_list), pointer :: sendList !< Output: exchange list containing the information to send from the owned elements.
+ type (exchange_list), pointer :: recvList !< Output: exchange list containing the information to recieve for the needed elements.
+
+ type (field1dInteger), pointer :: field_ptr, field_block_ptr
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+
+ integer :: i, j, k, n, iBlock
+ integer :: nBlocksNeeded, nBlocksOwned
+ integer :: nBlocksNeededMax, nBlocksOwnedMax
+ integer :: totalBlocksOwned, totalBlocksNeeded
+ integer :: nOwnedElelemts, nNeededElements, nNeededElementsMax
+ integer :: recvNeighbor, sendNeighbor
+ integer :: current_proc, nMesgRecv, nMesgSend
+ integer :: localBlockID, globalBlockID, owningProc
+
+ integer :: mpi_ierr, mpi_rreq1, mpi_rreq2, mpi_sreq1, mpi_sreq2
+
+ integer, dimension(:), pointer :: ownedList, ownedBlockList
+ integer, dimension(:), pointer :: neededList, neededBlockList
+ integer, dimension(:), pointer :: numToSend, numToRecv
+
+ integer, dimension(:), pointer :: ownerListIn, ownerBlockListIn, ownerListOut, ownerBlockListOut
+
+ integer, dimension(:,:), pointer :: ownedListSorted, elementRecipients
+
+ allocate(sendList)
+ sendListPtr => sendList
+ nullify(sendListPtr % next)
+
+ allocate(recvList)
+ recvListPtr => recvList
+ nullify(recvListPtr % next)
+
+ if(ownedDecomposed) then
+ call mpas_get_blocks_per_proc(dminfo, dminfo % my_proc_id, nBlocksOwned)
+ totalBlocksOwned = total_blocks
+ else
+ nBlocksOwned = 1
+ totalBlocksOwned = dminfo % nProcs
+ end if
+
+ if(neededDecomposed) then
+ call mpas_get_blocks_per_proc(dminfo, dminfo % my_proc_id, nBlocksNeeded)
+ totalBlocksNeeded = total_blocks
+ else
+ nBlocksNeeded = 1
+ totalBlocksNeeded = dminfo % nProcs
+ end if
+
+ ! Determine number of blocks on current processor, and maximum number of blocks on any processor
+#ifdef _MPI
+ call MPI_Allreduce(nBlocksNeeded, nBlocksNeededMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+ call MPI_Allreduce(nBlocksOwned, nBlocksOwnedMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ nBlocksNeededMax = nBlocksNeeded
+ nBlocksOwnedMax = nBlocksNeeded
+#endif
+
+ ! Determine number of owned and needed elements by looping over linked list
+ field_ptr => ownedListField
+ nOwnedElements = 0
+ do while(associated(field_ptr))
+ nOwnedElements = nOwnedElements + size(field_ptr % array)
+ field_ptr => field_ptr % next
+ end do
+
+ field_ptr => neededListField
+ nNeededElements = 0
+ do while(associated(field_ptr))
+ nNeededElements = nNeededElements + size(field_ptr % array)
+ field_ptr => field_ptr % next
+ end do
+
+ ! Allocate ownedList, ownedBlockList, neededList, and neededBlockList to hold all elements.
+ allocate(ownedList(nOwnedElements))
+ allocate(ownedListSorted(2, nOwnedElements))
+ allocate(ownedBlockList(nOwnedElements))
+ allocate(elementRecipients(2, nOwnedElements))
+ allocate(neededList(nNeededElements))
+ allocate(neededBlockList(nNeededElements))
+
+ ! Transfer ownedList and neededList fields into 1D arrays
+ field_ptr => ownedListField
+ field_block_ptr => ownedBlockListField
+ j = 0
+ do while(associated(field_ptr))
+ do i = 1, size(field_ptr % array)
+ j = j + 1
+ ownedList(j) = field_ptr % array(i)
+ ownedBlockList(j) = field_block_ptr % array(i)
+ ownedListSorted(1, j) = ownedList(j)
+ ownedListSorted(2, j) = j
+ end do
+
+ field_ptr => field_ptr % next
+ field_block_ptr => field_block_ptr % next
+ end do
+
+ field_ptr => neededListField
+ field_block_ptr => neededBlockListField
+ j = 0
+ do while(associated(field_ptr))
+ do i = 1, size(field_ptr % array)
+ j = j + 1
+ neededList(j) = field_ptr % array(i)
+ neededBlockList(j) = field_block_ptr % array(i)
+ end do
+
+ field_ptr => field_ptr % next
+ field_block_ptr => field_block_ptr % next
+ end do
+
+ ! Sort ownedList to enable binary search
+ call quicksort(nOwnedElements, ownedListSorted)
+
+ ! Find maximum number of needed elements on any processor
+#ifdef _MPI
+ call MPI_Allreduce(nNeededElements, nNeededElementsMax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ nNeededElementsMax = nNeededElements
+#endif
+
+ ! Allocate owner arrays
+ allocate(ownerListIn(nNeededElementsMax))
+ allocate(ownerBlockListIn(nNeededElementsMax))
+ allocate(ownerListOut(nNeededElementsMax))
+ allocate(ownerBlockListOut(nNeededElementsMax))
+
+ ownerListIn(1:nNeededElements) = neededList(1:nNeededElements)
+ ownerBlockListIn(1:nNeededElements) = neededBlockList(1:nNeededElements)
+ ownerBlockListOut(1:nNeededElements) = neededBlockList(1:nNeededElements)
+ nMesgRecv = nNeededElements
+
+ ! recieve from the left, send to the right. Assume circularly connected
+ recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
+ sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
+
+ ! Allocate numToSend array. Representing the number of elements to send from owned blocks.
+ allocate(numToSend(totalBlocksOwned))
+
+ do i = 1, dminfo % nProcs
+ ! current_proc is the index to the processor that is currently being worked on by this MPI task
+ current_proc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
+ numToSend(:) = 0
+ elementRecipients(:,:) = -1
+
+ ! Check to see if this MPI task owns any of the elements in the current ownerListIn.
+ do j = 1, nMesgRecv
+
+ if(ownerListIn(j) > 0) then
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedElements, ownerListIn(j))
+ if(k <= nOwnedElements) then
+ globalBlockID = ownedBlockList(ownedListSorted(2,k))
+
+
+ ownerListOut(j) = -1 * globalBlockID
+ numToSend(globalBlockID + 1) = numToSend(globalBlockID + 1) + 1
+ elementRecipients(1, ownedListSorted(2,k)) = globalBlockID
+ elementRecipients(2, ownedListSorted(2,k)) = numToSend(globalBlockID+1)
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ end do ! j loop over nMsgRecv
+
+ ! Check to see if this MPI task has to send any messages to current_proc
+ ! If it does, build a new sendList pointer for the messages.
+ do j = 0, totalBlocksOwned-1
+ if(numToSend(j+1) > 0) then
+ allocate(sendListPtr % next)
+ sendListPtr => sendListPtr % next
+ nullify(sendListPtr % next)
+
+ sendListPtr % procID = current_proc
+ sendListPtr % blockID = j
+ sendListPtr % nList = numToSend(j+1)
+ allocate(sendListPtr % list(numToSend(j+1)))
+
+ n = 0
+ do k = 1, nOwnedElements
+ if(j == elementRecipients(1, k)) then
+ n = n + 1
+ sendListPtr % list(elementRecipients(2,k)) = k
+ end if
+ end do ! k loop over nOwnedElements
+ end if
+ end do ! j loop over nBlocksMax
+
+ nMesgSend = nMesgRecv
+#ifdef _MPI
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
+ call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
+ call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerBlockListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq1, mpi_ierr)
+ call MPI_Isend(ownerBlockListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq1, mpi_ierr)
+ call MPI_Wait(mpi_rreq1, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq1, MPI_STATUS_IGNORE, mpi_ierr)
+#else
+ ownerListIn = ownerListOut
+#endif
+
+ ownerBlockListOut(1:nMesgRecv) = ownerBlockListIn(1:nMesgRecv)
+ end do ! i loop over nProcs
+
+ ! sendLists are build. ownerListIn should be the same as the original ownerListIn
+ ! time to build recvLists
+
+ ! Allocate numToRecv. Representing the number of elements to recieve from owned blocks.
+ allocate(numToRecv(totalBlocksOwned))
+ do iBlock = 0, nBlocksNeeded-1
+ numToRecv(:) = 0
+
+ do i = 0, totalBlocksOwned-1
+ do j = 1, nNeededElements
+ if(ownerListIn(j) == -i) then
+ numToRecv(i+1) = numToRecv(i+1) + 1
+ end if
+ end do ! j loop over nNeededElements
+ end do ! i loop over totalBlocksOwned
+
+ do i = 0, totalBlocksOwned-1
+ if(numToRecv(i+1) > 0) then
+ call mpas_get_owning_proc(dminfo, i, owningProc)
+
+ allocate(recvListPtr % next)
+ recvListPtr => recvListPtr % next
+ nullify(recvListPtr % next)
+
+ recvListPtr % procID = owningProc
+ recvListPtr % blockID = i
+ recvListPtr % nList = numToRecv(i+1)
+
+ allocate(recvListPtr % list(numToRecv(i+1)))
+ n = 0
+ do j = 1, nNeededElements
+ if(ownerListIn(j) == -i) then
+ n = n + 1
+ recvListPtr % list(n) = j
+ end if
+ end do
+ end if
+ end do ! i loop over totalBlocksOwned
+ end do ! iBlock loop over nBlocksNeeded (local)
+
+ sendListPtr => sendList
+ sendList => sendList % next
+ deallocate(sendListPtr)
+
+ recvListPtr => recvList
+ recvList => recvList % next
+ deallocate(recvListPtr)
+
+ ! Deallocate all allocated memory
+ deallocate(numToSend)
+ deallocate(ownedList)
+ deallocate(ownedBlockList)
+ deallocate(elementRecipients)
+ deallocate(neededList)
+ deallocate(neededBlockList)
+ deallocate(ownerListIn)
+ deallocate(ownerBlockListIn)
+ deallocate(ownerListOut)
+ deallocate(ownerBlockListOut)
+
+ end subroutine mpas_get_exchange_lists!}}}
+
end module mpas_block_decomp
Modified: branches/omp_blocks/multiple_blocks/src/framework/mpas_io_output.F
===================================================================
--- branches/omp_blocks/multiple_blocks/src/framework/mpas_io_output.F        2012-04-25 01:23:10 UTC (rev 1803)
+++ branches/omp_blocks/multiple_blocks/src/framework/mpas_io_output.F        2012-04-25 01:34:37 UTC (rev 1804)
@@ -103,6 +103,8 @@
type (domain_type), intent(inout) :: domain
integer, intent(in) :: itime
+ type(block_type), pointer :: block_ptr
+
integer :: ierr
integer :: i, j
integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
@@ -110,8 +112,16 @@
integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
cellsOnVertex_save, edgesOnVertex_save
+ type (field2dInteger), target :: cellsOnCell1_save, edgesOnCell1_save, verticesOnCell1_save, &
+ cellsOnEdge1_save, verticesOnEdge1_save, edgesOnEdge1_save, &
+ cellsOnVertex1_save, edgesOnVertex1_save
+
+ type (field2dInteger), pointer :: cellsOnCell1_ptr, edgesOnCell1_ptr, verticesOnCell1_ptr, &
+ cellsOnEdge1_ptr, verticesOnEdge1_ptr, edgesOnEdge1_ptr, &
+ cellsOnVertex1_ptr, edgesOnVertex1_ptr
+
type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
+ type (field2dInteger), pointer :: int2d
type (field0dReal) :: real0d
type (field1dReal) :: real1d
type (field2dReal) :: real2d
@@ -121,104 +131,194 @@
output_obj % time = itime
- allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(cellsOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(verticesOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(edgesOnEdge(2 * domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nEdgesSolve))
- allocate(cellsOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
- allocate(edgesOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
-
-
!
! Convert connectivity information from local to global indices
+ ! Needs to be done block by block
!
- do i=1,domain % blocklist % mesh % nCellsSolve
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnCell % array(i)+1,domain % blocklist % mesh % maxEdges
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- end do
- end do
- do i=1,domain % blocklist % mesh % nEdgesSolve
- cellsOnEdge(1,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(1,i))
- cellsOnEdge(2,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(2,i))
- verticesOnEdge(1,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(1,i))
- verticesOnEdge(2,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(2,i))
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnEdge % array(i)+1,2*domain % blocklist % mesh % maxEdges
- if(domain % blocklist % mesh % nEdgesOnEdge % array(i) .eq. 0) then
- edgesOnEdge(j,i) = domain % blocklist % mesh % nEdgesSolve + 1
- else
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnEdge % array(i))
- endif
- end do
- end do
- do i=1,domain % blocklist % mesh % nVerticesSolve
- do j=1,domain % blocklist % mesh % vertexDegree
- cellsOnVertex(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- edgesOnVertex(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- end do
- end do
+ ! Also, backup local indices to be copied back into blocks after output is complete.
+ !
+ cellsOnCell1_ptr => cellsOnCell1_save
+ edgesOnCell1_ptr => edgesOnCell1_save
+ verticesOnCell1_ptr => verticesOnCell1_save
+ cellsOnEdge1_ptr => cellsOnEdge1_save
+ verticesOnEdge1_ptr => verticesOnEdge1_save
+ edgesOnEdge1_ptr => edgesOnEdge1_save
+ cellsOnVertex1_ptr => cellsOnVertex1_save
+ edgesOnVertex1_ptr => edgesOnVertex1_save
- cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
- edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
- verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
- cellsOnEdge_save => domain % blocklist % mesh % cellsOnEdge % array
- verticesOnEdge_save => domain % blocklist % mesh % verticesOnEdge % array
- edgesOnEdge_save => domain % blocklist % mesh % edgesOnEdge % array
- cellsOnVertex_save => domain % blocklist % mesh % cellsOnVertex % array
- edgesOnVertex_save => domain % blocklist % mesh % edgesOnVertex % array
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ allocate(cellsOnCell1_ptr % array(block_ptr % mesh % maxEdges, block_ptr % mesh % nCellsSolve))
+ allocate(edgesonCell1_ptr % array(block_ptr % mesh % maxEdges, block_ptr % mesh % nCellsSolve))
+ allocate(verticesOnCell1_ptr % array(block_ptr % mesh % maxEdges, block_ptr % mesh % nCellsSolve))
+ allocate(cellsOnEdge1_ptr % array(2, block_ptr % mesh % nEdgesSolve))
+ allocate(verticesOnEdge1_ptr % array(2, block_ptr % mesh % nEdgesSolve))
+ allocate(edgesOnEdge1_ptr % array(2 * block_ptr % mesh % maxEdges, block_ptr % mesh % nEdgesSolve))
+ allocate(cellsOnVertex1_ptr % array(block_ptr % mesh % vertexDegree, block_ptr % mesh % nVerticesSolve))
+ allocate(edgesOnVertex1_ptr % array(block_ptr % mesh % vertexDegree, block_ptr % mesh % nVerticesSolve))
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
+ do i=1,block_ptr % mesh % nCellsSolve
+ do j=1,block_ptr % mesh % nEdgesOnCell % array(i)
+ cellsonCell1_ptr % array(j, i) = block_ptr % mesh % cellsOnCell % array(j,i)
+ block_ptr % mesh % cellsOnCell % array(j,i) = block_ptr % mesh % indexToCellID % array(block_ptr % mesh % cellsOnCell % array(j,i))
+ edgesOnCell1_ptr % array(j, i) = block_ptr % mesh % edgesOnCell % array(j,i)
+ block_ptr % mesh % edgesOnCell % array(j,i) = block_ptr % mesh % indexToEdgeID % array(block_ptr % mesh % edgesOnCell % array(j,i))
+
+ verticesOnCell1_ptr % array(j, i) = block_ptr % mesh % verticesOnCell % array(j, i)
+ block_ptr % mesh % verticesOnCell % array(j,i) = block_ptr % mesh % indexToVertexID % array(block_ptr % mesh % verticesOnCell % array(j,i))
+ end do
+
+ do j=block_ptr % mesh % nEdgesOnCell % array(i)+1,block_ptr % mesh % maxEdges
+ block_ptr % mesh % cellsOnCell % array(j,i) = block_ptr % mesh % nCellsSolve+1
+ block_ptr % mesh % edgesOnCell % array(j,i) = block_ptr % mesh % nEdgessolve+1
+ block_ptr % mesh % verticesOnCell % array(j,i) = block_ptr % mesh % nVerticesSolve+1
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nEdgesSolve
+ cellsOnEdge1_ptr % array(1,i) = block_ptr % mesh % cellsOnEdge % array(1,i)
+ cellsOnEdge1_ptr % array(2,i) = block_ptr % mesh % cellsOnEdge % array(2,i)
+ block_ptr % mesh % cellsOnEdge % array(1,i) = block_ptr % mesh % indexToCellID % array(block_ptr % mesh % cellsOnEdge % array(1,i))
+ block_ptr % mesh % cellsOnEdge % array(2,i) = block_ptr % mesh % indexToCellID % array(block_ptr % mesh % cellsOnEdge % array(2,i))
+
+ verticesOnEdge1_ptr % array(1,i) = block_ptr % mesh % verticesOnEdge % array(1,i)
+ verticesOnEdge1_ptr % array(2,i) = block_ptr % mesh % verticesOnEdge % array(2,i)
+ block_ptr % mesh % verticesOnEdge % array(1,i) = block_ptr % mesh % indexToVertexID % array(block_ptr % mesh % verticesOnEdge % array(1,i))
+ block_ptr % mesh % verticesOnEdge % array(2,i) = block_ptr % mesh % indexToVertexID % array(block_ptr % mesh % verticesOnEdge % array(2,i))
+
+ do j=1,block_ptr % mesh % nEdgesOnEdge % array(i)
+ edgesOnEdge1_ptr % array(j,i) = block_ptr % mesh % edgesOnEdge % array(j,i)
+ block_ptr % mesh % edgesOnEdge % array(j,i) = block_ptr % mesh % indexToEdgeID % array(block_ptr % mesh % edgesOnEdge % array(j,i))
+ end do
+
+ do j=block_ptr % mesh % nEdgesOnEdge % array(i)+1,2*block_ptr % mesh % maxEdges
+ edgesOnEdge1_ptr % array(j,i) = block_ptr % mesh % nEdgesSolve + 1
+ block_ptr % mesh % edgesOnEdge % array(j,i) = block_ptr % mesh % nEdgesSolve + 1
+ end do
+
+ end do
+
+ do i=1,block_ptr % mesh % nVerticesSolve
+ do j=1,block_ptr % mesh % vertexDegree
+ cellsOnVertex1_ptr % array(j,i) = block_ptr % mesh % cellsOnVertex % array(j,i)
+ block_ptr % mesh % cellsOnVertex % array(j,i) = block_ptr % mesh % indexToCellID % array(block_ptr % mesh % cellsOnVertex % array(j,i))
+
+ edgesOnVertex1_ptr % array(j,i) = block_ptr % mesh % edgesOnVertex % array(j,i)
+ block_ptr % mesh % edgesOnVertex % array(j,i) = block_ptr % mesh % indexToCellID % array(block_ptr % mesh % edgesOnVertex % array(j,i))
+ end do
+ end do
+
+ block_ptr => block_ptr % next
+
+ if(associated(block_ptr)) then
+ allocate(cellsOnCell1_ptr % next)
+ allocate(edgesonCell1_ptr % next)
+ allocate(verticesOnCell1_ptr % next)
+ allocate(cellsOnEdge1_ptr % next)
+ allocate(verticesOnEdge1_ptr % next)
+ allocate(edgesOnEdge1_ptr % next)
+ allocate(cellsOnVertex1_ptr % next)
+ allocate(edgesOnVertex1_ptr % next)
+
+ cellsOnCell1_ptr => cellsOnCell1_ptr % next
+ edgesOnCell1_ptr => edgesOnCell1_ptr % next
+ verticesOnCell1_ptr => verticesOnCell1_ptr % next
+ cellsOnEdge1_ptr => cellsOnEdge1_ptr % next
+ verticesOnEdge1_ptr => verticesOnEdge1_ptr % next
+ edgesOnEdge1_ptr => edgesOnEdge1_ptr % next
+ cellsOnVertex1_ptr => cellsOnVertex1_ptr % next
+ edgesOnVertex1_ptr => edgesOnVertex1_ptr % next
+ end if
+ end do
+
+ ! Write output file
call MPAS_writeStream(output_obj % io_stream, output_obj % time, ierr)
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell_save
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge_save
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge_save
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge_save
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex_save
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex_save
+ ! Converge indices back to local indices, and deallocate all temporary arrays.
+ cellsOnCell1_ptr => cellsOnCell1_save
+ edgesOnCell1_ptr => edgesOnCell1_save
+ verticesOnCell1_ptr => verticesOnCell1_save
+ cellsOnEdge1_ptr => cellsOnEdge1_save
+ verticesOnEdge1_ptr => verticesOnEdge1_save
+ edgesOnEdge1_ptr => edgesOnEdge1_save
+ cellsOnVertex1_ptr => cellsOnVertex1_save
+ edgesOnVertex1_ptr => edgesOnVertex1_save
- deallocate(cellsOnCell)
- deallocate(edgesOnCell)
- deallocate(verticesOnCell)
- deallocate(cellsOnEdge)
- deallocate(verticesOnEdge)
- deallocate(edgesOnEdge)
- deallocate(cellsOnVertex)
- deallocate(edgesOnVertex)
+ block_ptr => domain % blocklist
+ do while(associated(block_ptr))
+ do i=1,block_ptr % mesh % nCellsSolve
+ do j=1,block_ptr % mesh % nEdgesOnCell % array(i)
+ block_ptr % mesh % cellsOnCell % array(j,i) = cellsonCell1_ptr % array(j, i)
+ block_ptr % mesh % edgesOnCell % array(j,i) = edgesOnCell1_ptr % array(j, i)
+ block_ptr % mesh % verticesOnCell % array(j,i) = verticesOnCell1_ptr % array(j, i)
+ end do
+ end do
+ do i=1,block_ptr % mesh % nEdgesSolve
+ block_ptr % mesh % cellsOnEdge % array(1,i) = cellsOnEdge1_ptr % array(1,i)
+ block_ptr % mesh % cellsOnEdge % array(2,i) = cellsOnEdge1_ptr % array(2,i)
+
+ block_ptr % mesh % verticesOnEdge % array(1,i) = verticesOnEdge1_ptr % array(1,i)
+ block_ptr % mesh % verticesOnEdge % array(2,i) = verticesOnEdge1_ptr % array(2,i)
+
+ do j=1,block_ptr % mesh % nEdgesOnEdge % array(i)
+ block_ptr % mesh % edgesOnEdge % array(j,i) = edgesOnEdge1_ptr % array(j,i)
+ end do
+ end do
+
+ do i=1,block_ptr % mesh % nVerticesSolve
+ do j=1,block_ptr % mesh % vertexDegree
+ block_ptr % mesh % cellsOnVertex % array(j,i) = cellsOnVertex1_ptr % array(j,i)
+ block_ptr % mesh % edgesOnVertex % array(j,i) = edgesOnVertex1_ptr % array(j,i)
+ end do
+ end do
+
+ deallocate(cellsOnCell1_ptr % array)
+ deallocate(edgesOnCell1_ptr % array)
+ deallocate(verticesOnCell1_ptr % array)
+ deallocate(cellsOnedge1_ptr % array)
+ deallocate(verticesOnedge1_ptr % array)
+ deallocate(edgesOnEdge1_ptr % array)
+ deallocate(cellsOnVertex1_ptr % array)
+ deallocate(edgesOnVertex1_ptr % array)
+
+ block_ptr => block_ptr % next
+ if(associated(block_ptr)) then
+ int2d => cellsOnCell1_ptr % next
+ deallocate(cellsOnCell1_ptr)
+ cellsOnCell1_ptr => int2d
+
+ int2d => edgesOnCell1_ptr % next
+ deallocate(edgesOnCell1_ptr)
+ edgesOnCell1_ptr => int2d
+
+ int2d => verticesOnCell1_ptr % next
+ deallocate(verticesOnCell1_ptr)
+ verticesOnCell1_ptr => int2d
+
+ int2d => cellsOnEdge1_ptr % next
+ deallocate(cellsOnEdge1_ptr)
+ cellsOnEdge1_ptr => int2d
+
+ int2d => verticesOnEdge1_ptr % next
+ deallocate(verticesOnEdge1_ptr)
+ verticesOnEdge1_ptr => int2d
+
+ int2d => edgesOnEdge1_ptr % next
+ deallocate(edgesOnEdge1_ptr)
+ edgesOnEdge1_ptr => int2d
+
+ int2d => cellsOnVertex1_ptr % next
+ deallocate(cellsOnVertex1_ptr)
+ cellsOnVertex1_ptr => int2d
+
+ int2d => edgesOnVertex1_ptr % next
+ deallocate(edgesOnVertex1_ptr)
+ edgesOnVertex1_ptr => int2d
+ end if
+ end do
+
end subroutine mpas_output_state_for_domain
</font>
</pre>