<p><b>duda</b> 2011-10-17 15:43:19 -0600 (Mon, 17 Oct 2011)</p><p>BRANCH COMMIT<br>
<br>
Rename modules, files, and subroutines in framework and driver directories.<br>
</p><hr noshade><pre><font color="gray">Modified: branches/source_renaming/Makefile
===================================================================
--- branches/source_renaming/Makefile        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/Makefile        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,4 +1,3 @@
-CORE=ocean
#MODEL_FORMULATION = -DNCAR_FORMULATION
MODEL_FORMULATION = -DLANL_FORMULATION
Modified: branches/source_renaming/convertFile.sh
===================================================================
--- branches/source_renaming/convertFile.sh        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/convertFile.sh        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,10 +1,10 @@
#!/bin/bash
#Convert Subroutine Names
-SEARCHFILES=`ls src/core_sw/*.F`
-REPLACEFILES=`ls src/core_sw/*.F`
-SUBPREFIX="sw_"
-FILEPREFIX="mpas_sw_"
+SEARCHFILES=`ls src/framework/*.F`
+REPLACEFILES=`ls src/*/*.F`
+SUBPREFIX="mpas_"
+FILEPREFIX="mpas_"
for FILE in $SEARCHFILES
do
Modified: branches/source_renaming/src/core_hyd_atmos/module_advection.F
===================================================================
--- branches/source_renaming/src/core_hyd_atmos/module_advection.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_hyd_atmos/module_advection.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module advection
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
Modified: branches/source_renaming/src/core_hyd_atmos/module_mpas_core.F
===================================================================
--- branches/source_renaming/src/core_hyd_atmos/module_mpas_core.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_hyd_atmos/module_mpas_core.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -18,8 +18,8 @@
subroutine mpas_core_init(domain, startTimeStamp)
- use configure
- use grid_types
+ use mpas_configure
+ use mpas_grid_types
use test_cases
implicit none
@@ -65,47 +65,47 @@
type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep
integer :: ierr
- call MPAS_setTime(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
- call MPAS_setTimeInterval(timeStep, dt=dt, ierr=ierr)
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
+ call mpas_set_timeInterval(timeStep, dt=dt, ierr=ierr)
if (trim(config_run_duration) /= "none") then
- call MPAS_setTimeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
+ call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
if(startTime + runduration /= stopTime) then
write(0,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.'
end if
end if
else if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
else
write(0,*) 'Error: Neither config_run_duration nor config_stop_time were specified.'
- call dmpar_abort(domain % dminfo)
+ call mpas_dmpar_abort(domain % dminfo)
end if
! set output alarm
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
! set restart alarm, if necessary
if (trim(config_restart_interval) /= "none") then
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
end if
- call MPAS_getTime(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
+ call mpas_get_time(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
end subroutine simulation_clock_init
subroutine mpas_init_block(block, mesh, dt)
- use grid_types
+ use mpas_grid_types
use advection
use time_integration
use rbf_interpolation
@@ -138,9 +138,9 @@
subroutine mpas_core_run(domain, output_obj, output_frame)
- use grid_types
- use io_output
- use timer
+ use mpas_grid_types
+ use mpas_io_output
+ use mpas_timer
implicit none
@@ -159,39 +159,39 @@
! Eventually, dt should be domain specific
dt = config_dt
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Initial time ', timeStamp
call write_output_frame(output_obj, output_frame, domain)
! During integration, time level 1 stores the model state at the beginning of the
! time step, and time level 2 stores the state advanced dt in time by timestep(...)
- do while (.not. MPAS_isClockStopTime(clock))
+ do while (.not. mpas_is_clock_stop_time(clock))
- call MPAS_advanceClock(clock)
+ call mpas_advance_clock(clock)
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Doing timestep ', timeStamp
- call timer_start("time integration")
+ call mpas_timer_start("time integration")
call mpas_timestep(domain, dt, timeStamp)
- call timer_stop("time integration")
+ call mpas_timer_stop("time integration")
! Move time level 2 fields back into time level 1 for next time step
- call shift_time_levels_state(domain % blocklist % state)
+ call mpas_shift_time_levels_state(domain % blocklist % state)
- if (MPAS_isAlarmRinging(clock, outputAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, outputAlarmID, ierr=ierr)
- if(output_frame == 1) call output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
+ if (mpas_is_alarm_ringing(clock, outputAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, outputAlarmID, ierr=ierr)
+ if(output_frame == 1) call mpas_output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
call write_output_frame(output_obj, output_frame, domain)
end if
- if (MPAS_isAlarmRinging(clock, restartAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, restartAlarmID, ierr=ierr)
- if (restart_frame == 1) call output_state_init(restart_obj, domain, "RESTART")
- call output_state_for_domain(restart_obj, domain, restart_frame)
+ if (mpas_is_alarm_ringing(clock, restartAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, restartAlarmID, ierr=ierr)
+ if (restart_frame == 1) call mpas_output_state_init(restart_obj, domain, "RESTART")
+ call mpas_output_state_for_domain(restart_obj, domain, restart_frame)
restart_frame = restart_frame + 1
end if
@@ -208,8 +208,8 @@
! before returning
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
- use io_output
+ use mpas_grid_types
+ use mpas_io_output
implicit none
@@ -227,7 +227,7 @@
block_ptr => block_ptr % next
end do
- call output_state_for_domain(output_obj, domain, output_frame)
+ call mpas_output_state_for_domain(output_obj, domain, output_frame)
output_frame = output_frame + 1
! if the maximum number of frames per outfile has been reached, finalize outfile and reset frame
@@ -235,7 +235,7 @@
current_outfile_frames = current_outfile_frames + 1
if(current_outfile_frames >= config_frames_per_outfile) then
current_outfile_frames = 0
- call output_state_finalize(output_obj, domain % dminfo)
+ call mpas_output_state_finalize(output_obj, domain % dminfo)
output_frame = 1
end if
end if
@@ -253,7 +253,7 @@
! Output: state - upon returning, diagnostic fields will have be computed
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
+ use mpas_grid_types
implicit none
@@ -268,7 +268,7 @@
subroutine mpas_timestep(domain, dt, timeStamp)
- use grid_types
+ use mpas_grid_types
use time_integration
implicit none
@@ -284,7 +284,7 @@
subroutine mpas_core_finalize(domain)
- use grid_types
+ use mpas_grid_types
implicit none
@@ -292,9 +292,9 @@
type (domain_type), intent(inout) :: domain
- if (restart_frame > 1) call output_state_finalize(restart_obj, domain % dminfo)
+ if (restart_frame > 1) call mpas_output_state_finalize(restart_obj, domain % dminfo)
- call MPAS_destroyClock(clock, ierr)
+ call mpas_destroy_clock(clock, ierr)
end subroutine mpas_core_finalize
Modified: branches/source_renaming/src/core_hyd_atmos/module_test_cases.F
===================================================================
--- branches/source_renaming/src/core_hyd_atmos/module_test_cases.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_hyd_atmos/module_test_cases.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module test_cases
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
@@ -37,7 +37,7 @@
do while (associated(block_ptr))
call hyd_test_case_1(block_ptr % mesh, block_ptr % state % time_levs(1) % state, config_test_case)
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
end do
block_ptr => block_ptr % next
Modified: branches/source_renaming/src/core_hyd_atmos/module_time_integration.F
===================================================================
--- branches/source_renaming/src/core_hyd_atmos/module_time_integration.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_hyd_atmos/module_time_integration.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,9 +1,9 @@
module time_integration
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
use vector_reconstruction
@@ -101,7 +101,7 @@
block => domain % blocklist
do while (associated(block))
- call copy_state( block % state % time_levs(2) % state, block % state % time_levs(1) % state )
+ call mpas_copy_state( block % state % time_levs(2) % state, block % state % time_levs(1) % state )
block => block % next
end do
@@ -116,32 +116,32 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % mesh % qtot % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % qtot % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % mesh % cqu % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % cqu % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
if (config_h_mom_eddy_visc4 > 0.0) then
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
block % mesh % nVertLevels, block % mesh % nVertices, &
block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
end if
@@ -163,10 +163,10 @@
!
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % u % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % theta % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % theta % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
@@ -183,7 +183,7 @@
! A cleaner way of preserving scalars should be added in future.
!
block % mesh % scalars_old % array(:,:,:) = block % state % time_levs(2) % state % scalars % array(:,:,:)
- call copy_state( block % state % time_levs(2) % state, block % state % time_levs(1) % state )
+ call mpas_copy_state( block % state % time_levs(2) % state, block % state % time_levs(1) % state )
block % state % time_levs(2) % state % scalars % array(:,:,:) = block % mesh % scalars_old % array(:,:,:)
block => block % next
end do
@@ -210,46 +210,46 @@
!
block => domain % blocklist
do while (associated(block))
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % u % array(:,:), &
!! block % mesh % nVertLevels, block % mesh % nEdges, &
!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % h_edge % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % mesh % uhAvg % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % uhAvg % array(:,:), &
!! block % mesh % nVertLevels, block % mesh % nEdges, &
!! block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % mesh % wwAvg % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % wwAvg % array(:,:), &
!! block % mesh % nVertLevels+1, block % mesh % nCells, &
!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % theta % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % h % array(:,:), &
!! block % mesh % nVertLevels, block % mesh % nCells, &
!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % h % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
!! block % mesh % nVertLevels, block % mesh % nCells, &
!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field1dReal(domain % dminfo, block % mesh % dpsdt % array(:), &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % mesh % dpsdt % array(:), &
block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field1dReal(domain % dminfo, block % state % time_levs(2) % state % surface_pressure % array(:), &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, block % state % time_levs(2) % state % surface_pressure % array(:), &
block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % alpha % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % ww % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % ww % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pressure % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
-!! call dmpar_exch_halo_field2dReal(domain % dminfo, block % mesh % pressure_old % array(:,:), &
+!! call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % mesh % pressure_old % array(:,:), &
!! block % mesh % nVertLevels+1, block % mesh % nCells, &
!! block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % geopotential % array(:,:), &
block % mesh % nVertLevels+1, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
@@ -284,10 +284,10 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % tend % scalars % array(:,:,:), &
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % scalars % array(:,:,:), &
block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % state % time_levs(2) % state % scalars % array(:,:,:), &
block % tend % num_scalars, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
@@ -358,10 +358,10 @@
end do
block => block % next
end do
- call dmpar_sum_real(domain % dminfo, domain_mass, global_domain_mass)
- call dmpar_sum_real(domain % dminfo, scalar_mass, global_scalar_mass)
- call dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min)
- call dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max)
+ call mpas_dmpar_sum_real(domain % dminfo, domain_mass, global_domain_mass)
+ call mpas_dmpar_sum_real(domain % dminfo, scalar_mass, global_scalar_mass)
+ call mpas_dmpar_min_real(domain % dminfo, scalar_min, global_scalar_min)
+ call mpas_dmpar_max_real(domain % dminfo, scalar_max, global_scalar_max)
write(0,*) ' mass in the domain = ',global_domain_mass
write(0,*) ' scalar mass in the domain = ',global_scalar_mass
write(0,*) ' scalar_min, scalar_max ',global_scalar_min, global_scalar_max
@@ -1695,16 +1695,16 @@
end do ! end loop over cells to compute scale factor
- call dmpar_exch_halo_field2dReal(dminfo, scale_out(:,:,1), &
+ call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,1), &
num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_out(:,:,2), &
+ call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_out(:,:,2), &
num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_in(:,:,1), &
+ call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,1), &
num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
- call dmpar_exch_halo_field2dReal(dminfo, scale_in(:,:,2), &
+ call mpas_dmpar_exch_halo_field2d_real(dminfo, scale_in(:,:,2), &
num_scalars, grid % nCells, &
cellsToSend, cellsToRecv)
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_advection.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_advection.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_advection.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module advection
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_equation_of_state
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
use ocn_equation_of_state_linear
use ocn_equation_of_state_jm
@@ -99,7 +99,7 @@
if(.not.eosOn) return
- call timer_start("ocn_equation_of_state_rho")
+ call mpas_timer_start("ocn_equation_of_state_rho")
tracers => s % tracers % array
indexT = s % index_temperature
@@ -122,7 +122,7 @@
endif
- call timer_stop("ocn_equation_of_state_rho")
+ call mpas_timer_stop("ocn_equation_of_state_rho")
end subroutine ocn_equation_of_state_rho!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_jm.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_jm.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_equation_of_state_jm
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -194,7 +194,7 @@
err = 0
- call timer_start("equation_of_state_jm")
+ call mpas_timer_start("equation_of_state_jm")
nCells = grid % nCells
maxLevelCell => grid % maxLevelCell % array
@@ -239,7 +239,7 @@
write(0,*) 'Abort: In equation_of_state_jm', &
' k_displaced must be between 1 and nVertLevels for ', &
'displacement_type = absolute'
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
if (k_displaced == 0) then
@@ -309,7 +309,7 @@
deallocate(pRefEOS,p,p2)
- call timer_stop("equation_of_state_jm")
+ call mpas_timer_stop("equation_of_state_jm")
end subroutine ocn_equation_of_state_jm_rho!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_linear.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_equation_of_state_linear.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_equation_of_state_linear
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -87,7 +87,7 @@
integer :: nCells, iCell, k
type (dm_info) :: dminfo
- call timer_start("ocn_equation_of_state_linear")
+ call mpas_timer_start("ocn_equation_of_state_linear")
maxLevelCell => grid % maxLevelCell % array
nCells = grid % nCells
@@ -103,7 +103,7 @@
end do
end do
- call timer_stop("ocn_equation_of_state_linear")
+ call mpas_timer_stop("ocn_equation_of_state_linear")
end subroutine ocn_equation_of_state_linear_rho!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_global_diagnostics.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_global_diagnostics.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_global_diagnostics.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,9 +1,9 @@
module global_diagnostics
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
implicit none
save
@@ -218,7 +218,7 @@
nMaxes = nMaxes + nVariables
! global reduction of the 5 arrays (packed into 3 to minimize global communication)
- call dmpar_sum_real_array(dminfo, nSums, sums(1:nSums), reductions(1:nSums))
+ call mpas_dmpar_sum_real_array(dminfo, nSums, sums(1:nSums), reductions(1:nSums))
sums(1:nVariables) = reductions(1:nVariables)
areaCellGlobal = reductions(nVariables+1)
areaEdgeGlobal = reductions(nVariables+2)
@@ -227,11 +227,11 @@
nEdgesGlobal = int(reductions(nVariables+5))
nVerticesGlobal = int(reductions(nVariables+6))
- call dmpar_min_real_array(dminfo, nMins, mins(1:nMins), reductions(1:nMins))
+ call mpas_dmpar_min_real_array(dminfo, nMins, mins(1:nMins), reductions(1:nMins))
mins(1:nVariables) = reductions(1:nVariables)
verticalSumMins(1:nVariables) = reductions(nMins-nVariables+1:nMins)
- call dmpar_max_real_array(dminfo, nMaxes, maxes(1:nMaxes), reductions(1:nMaxes))
+ call mpas_dmpar_max_real_array(dminfo, nMaxes, maxes(1:nMaxes), reductions(1:nMaxes))
maxes(1:nVariables) = reductions(1:nVariables)
CFLNumberGlobal = reductions(nVariables+1)
verticalSumMaxes(1:nVariables) = reductions(nMaxes-nVariables+1:nMaxes)
@@ -474,7 +474,7 @@
real (kind=RKIND) :: localSum
localSum = sum(field)
- call dmpar_sum_real(dminfo, localSum, globalSum)
+ call mpas_dmpar_sum_real(dminfo, localSum, globalSum)
end subroutine computeGlobalSum
@@ -496,7 +496,7 @@
localSum = localSum + areas(elementIndex) * sum(field(:,elementIndex))
end do
- call dmpar_sum_real(dminfo, localSum, globalSum)
+ call mpas_dmpar_sum_real(dminfo, localSum, globalSum)
end subroutine computeAreaWeightedGlobalSum
@@ -531,7 +531,7 @@
real (kind=RKIND) :: localMin
localMin = minval(field)
- call dmpar_min_real(dminfo, localMin, globalMin)
+ call mpas_dmpar_min_real(dminfo, localMin, globalMin)
end subroutine computeGlobalMin
@@ -547,7 +547,7 @@
real (kind=RKIND) :: localMax
localMax = maxval(field)
- call dmpar_max_real(dminfo, localMax, globalMax)
+ call mpas_dmpar_max_real(dminfo, localMax, globalMax)
end subroutine computeGlobalMax
@@ -563,7 +563,7 @@
real (kind=RKIND) :: localMin
localMin = minval(sum(field,1))
- call dmpar_min_real(dminfo, localMin, globalMin)
+ call mpas_dmpar_min_real(dminfo, localMin, globalMin)
end subroutine computeGlobalVertSumHorizMin
@@ -579,7 +579,7 @@
real (kind=RKIND) :: localMax
localMax = maxval(sum(field,1))
- call dmpar_max_real(dminfo, localMax, globalMax)
+ call mpas_dmpar_max_real(dminfo, localMax, globalMax)
end subroutine computeGlobalVertSumHorizMax
@@ -595,7 +595,7 @@
real (kind=RKIND) :: localMin
localMin = minval(sum(h*field,1))
- call dmpar_min_real(dminfo, localMin, globalMin)
+ call mpas_dmpar_min_real(dminfo, localMin, globalMin)
end subroutine computeGlobalVertThicknessWeightedSumHorizMin
@@ -611,7 +611,7 @@
real (kind=RKIND) :: localMax
localMax = maxval(sum(h*field,1))
- call dmpar_max_real(dminfo, localMax, globalMax)
+ call mpas_dmpar_max_real(dminfo, localMax, globalMax)
end subroutine computeGlobalVertThicknessWeightedSumHorizMax
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_mpas_core.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_mpas_core.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_mpas_core.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -2,7 +2,7 @@
use mpas_framework
use mpas_timekeeping
- use dmpar
+ use mpas_dmpar
use test_cases
use ocn_time_integration
@@ -38,8 +38,8 @@
subroutine mpas_core_init(domain, startTimeStamp)!{{{
- use configure
- use grid_types
+ use mpas_configure
+ use mpas_grid_types
implicit none
@@ -81,7 +81,7 @@
err = err .or. err_tmp
if(err) then
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
if (.not. config_do_restart) call setup_sw_test_case(domain)
@@ -96,14 +96,14 @@
else
print *, ' Incorrect choice of config_vert_grid_type:',&
config_vert_grid_type
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
if (trim(config_new_btr_variables_from) == 'btr_avg' &
.and.trim(config_time_integration) == 'unsplit_explicit') then
print *, ' unsplit_explicit option must use',&
' config_new_btr_variables_from==last_subcycle'
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
!
@@ -127,10 +127,10 @@
! input arguement into mpas_init. Ask about that later. For now, there will be
! no initial statistics write.
- ! call timer_start("global diagnostics")
+ ! call mpas_timer_start("global diagnostics")
! call computeGlobalDiagnostics(domain % dminfo, block % state % time_levs(1) % state, mesh, 0, dt)
- ! call timer_stop("global diagnostics")
- ! call output_state_init(output_obj, domain, "OUTPUT")
+ ! call mpas_timer_stop("global diagnostics")
+ ! call mpas_output_state_init(output_obj, domain, "OUTPUT")
! call write_output_frame(output_obj, domain)
restart_frame = 1
@@ -150,55 +150,55 @@
type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep
integer :: ierr
- call MPAS_setTime(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
- call MPAS_setTimeInterval(timeStep, dt=dt, ierr=ierr)
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
+ call mpas_set_timeInterval(timeStep, dt=dt, ierr=ierr)
if (trim(config_run_duration) /= "none") then
- call MPAS_setTimeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
+ call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
if(startTime + runduration /= stopTime) then
write(0,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.'
end if
end if
else if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
else
write(0,*) 'Error: Neither config_run_duration nor config_stop_time were specified.'
- call dmpar_finalize(domain % dminfo)
+ call mpas_dmpar_finalize(domain % dminfo)
end if
! set output alarm
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
! set restart alarm, if necessary
if (trim(config_restart_interval) /= "none") then
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
end if
!TODO: use this code if we desire to convert config_stats_interval to alarms
!(must also change config_stats_interval type to character)
! set stats alarm, if necessary
!if (trim(config_stats_interval) /= "none") then
- ! call MPAS_setTimeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=ierr)
+ ! call mpas_set_timeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=ierr)
! alarmStartTime = startTime + alarmTimeStep
- ! call MPAS_addClockAlarm(clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ ! call mpas_add_clock_alarm(clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
!end if
- call MPAS_getTime(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
+ call mpas_get_time(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
end subroutine simulation_clock_init!}}}
subroutine mpas_init_block(block, mesh, dt)!{{{
- use grid_types
+ use mpas_grid_types
use rbf_interpolation
use vector_reconstruction
@@ -258,7 +258,7 @@
! mrp 110808 add, so that variables are copied to * variables for split explicit
do i=2,nTimeLevs
- call copy_state(block % state % time_levs(i) % state, &
+ call mpas_copy_state(block % state % time_levs(i) % state, &
block % state % time_levs(1) % state)
end do
! mrp 110808 add end
@@ -266,7 +266,7 @@
else
do i=2,nTimeLevs
- call copy_state(block % state % time_levs(i) % state, &
+ call mpas_copy_state(block % state % time_levs(i) % state, &
block % state % time_levs(1) % state)
end do
endif
@@ -275,9 +275,9 @@
subroutine mpas_core_run(domain, output_obj, output_frame)!{{{
- use grid_types
- use io_output
- use timer
+ use mpas_grid_types
+ use mpas_io_output
+ use mpas_timer
implicit none
@@ -296,8 +296,8 @@
! Eventually, dt should be domain specific
dt = config_dt
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Initial time ', timeStamp
call write_output_frame(output_obj, output_frame, domain)
@@ -305,32 +305,32 @@
! During integration, time level 1 stores the model state at the beginning of the
! time step, and time level 2 stores the state advanced dt in time by timestep(...)
itimestep = 0
- do while (.not. MPAS_isClockStopTime(clock))
+ do while (.not. mpas_is_clock_stop_time(clock))
itimestep = itimestep + 1
- call MPAS_advanceClock(clock)
+ call mpas_advance_clock(clock)
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Doing timestep ', timeStamp
- call timer_start("time integration")
+ call mpas_timer_start("time integration")
call mpas_timestep(domain, itimestep, dt, timeStamp)
- call timer_stop("time integration")
+ call mpas_timer_stop("time integration")
! Move time level 2 fields back into time level 1 for next time step
- call shift_time_levels_state(domain % blocklist % state)
+ call mpas_shift_time_levels_state(domain % blocklist % state)
- if (MPAS_isAlarmRinging(clock, outputAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, outputAlarmID, ierr=ierr)
- if(output_frame == 1) call output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
+ if (mpas_is_alarm_ringing(clock, outputAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, outputAlarmID, ierr=ierr)
+ if(output_frame == 1) call mpas_output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
call write_output_frame(output_obj, output_frame, domain)
end if
- if (MPAS_isAlarmRinging(clock, restartAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, restartAlarmID, ierr=ierr)
- if (restart_frame == 1) call output_state_init(restart_obj, domain, "RESTART")
- call output_state_for_domain(restart_obj, domain, restart_frame)
+ if (mpas_is_alarm_ringing(clock, restartAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, restartAlarmID, ierr=ierr)
+ if (restart_frame == 1) call mpas_output_state_init(restart_obj, domain, "RESTART")
+ call mpas_output_state_for_domain(restart_obj, domain, restart_frame)
restart_frame = restart_frame + 1
end if
@@ -346,8 +346,8 @@
! before returning
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
- use io_output
+ use mpas_grid_types
+ use mpas_io_output
implicit none
@@ -365,7 +365,7 @@
block_ptr => block_ptr % next
end do
- call output_state_for_domain(output_obj, domain, output_frame)
+ call mpas_output_state_for_domain(output_obj, domain, output_frame)
output_frame = output_frame + 1
! if the maximum number of frames per outfile has been reached, finalize outfile and reset frame
@@ -373,7 +373,7 @@
current_outfile_frames = current_outfile_frames + 1
if(current_outfile_frames >= config_frames_per_outfile) then
current_outfile_frames = 0
- call output_state_finalize(output_obj, domain % dminfo)
+ call mpas_output_state_finalize(output_obj, domain % dminfo)
output_frame = 1
end if
end if
@@ -390,7 +390,7 @@
! Output: state - upon returning, diagnostic fields will have be computed
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
+ use mpas_grid_types
implicit none
@@ -404,8 +404,8 @@
subroutine mpas_timestep(domain, itimestep, dt, timeStamp)!{{{
- use grid_types
- use timer
+ use mpas_grid_types
+ use mpas_timer
use global_diagnostics
implicit none
@@ -428,17 +428,17 @@
'that there is only one block per processor.'
end if
- call timer_start("global diagnostics")
+ call mpas_timer_start("global diagnostics")
call computeGlobalDiagnostics(domain % dminfo, &
block_ptr % state % time_levs(2) % state, block_ptr % mesh, &
itimestep, dt)
- call timer_stop("global diagnostics")
+ call mpas_timer_stop("global diagnostics")
end if
end if
!TODO: replace the above code block with this if we desire to convert config_stats_interval to use alarms
- !if (MPAS_isAlarmRinging(clock, statsAlarmID, ierr=ierr)) then
- ! call MPAS_resetClockAlarm(clock, statsAlarmID, ierr=ierr)
+ !if (mpas_is_alarm_ringing(clock, statsAlarmID, ierr=ierr)) then
+ ! call mpas_reset_clock_alarm(clock, statsAlarmID, ierr=ierr)
! block_ptr => domain % blocklist
! if (associated(block_ptr % next)) then
@@ -446,11 +446,11 @@
! 'that there is only one block per processor.'
! end if
- ! call timer_start("global diagnostics")
+ ! call mpas_timer_start("global diagnostics")
! call computeGlobalDiagnostics(domain % dminfo, &
! block_ptr % state % time_levs(2) % state, block_ptr % mesh, &
! timeStamp, dt)
- ! call timer_stop("global diagnostics")
+ ! call mpas_timer_stop("global diagnostics")
!end if
end subroutine mpas_timestep!}}}
@@ -458,8 +458,8 @@
subroutine init_ZLevel(domain)!{{{
! Initialize maxLevel and bouncary grid variables.
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
@@ -622,9 +622,9 @@
subroutine compute_maxLevel(domain)!{{{
! Initialize maxLevel and bouncary grid variables.
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
implicit none
@@ -744,7 +744,7 @@
subroutine mpas_core_finalize(domain)!{{{
- use grid_types
+ use mpas_grid_types
implicit none
@@ -752,16 +752,16 @@
type (domain_type), intent(inout) :: domain
- if (restart_frame > 1) call output_state_finalize(restart_obj, domain % dminfo)
+ if (restart_frame > 1) call mpas_output_state_finalize(restart_obj, domain % dminfo)
- call MPAS_destroyClock(clock, ierr)
+ call mpas_destroy_clock(clock, ierr)
end subroutine mpas_core_finalize!}}}
subroutine compute_mesh_scaling(mesh)!{{{
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_restoring.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_restoring.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_restoring.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_restoring
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tendency.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tendency.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tendency.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -16,10 +16,10 @@
module ocn_tendency
- use grid_types
- use configure
- use constants
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_timer
use ocn_thick_hadv
use ocn_thick_vadv
@@ -131,7 +131,7 @@
real (kind=RKIND), allocatable, dimension(:,:) :: delsq_u
real (kind=RKIND), allocatable, dimension(:,:) :: delsq_circulation, delsq_vorticity
- call timer_start("ocn_tend_h")
+ call mpas_timer_start("ocn_tend_h")
h => s % h % array
u => s % u % array
@@ -192,22 +192,22 @@
!
! for z-level, only compute height tendency for top layer.
- call timer_start("ocn_tend_h-horiz adv")
+ call mpas_timer_start("ocn_tend_h-horiz adv")
call ocn_thick_hadv_tend(grid, u, h_edge, tend_h, err)
- call timer_stop("ocn_tend_h-horiz adv")
+ call mpas_timer_stop("ocn_tend_h-horiz adv")
!
! height tendency: vertical advection term -d/dz(hw)
!
! Vertical advection computed for top layer of a z grid only.
- call timer_start("ocn_tend_h-vert adv")
+ call mpas_timer_start("ocn_tend_h-vert adv")
call ocn_thick_vadv_tend(grid, wtop, tend_h, err)
- call timer_stop("ocn_tend_h-vert adv")
- call timer_stop("ocn_tend_h")
+ call mpas_timer_stop("ocn_tend_h-vert adv")
+ call mpas_timer_stop("ocn_tend_h")
end subroutine ocn_tend_h!}}}
@@ -274,7 +274,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("ocn_tend_u")
+ call mpas_timer_start("ocn_tend_u")
h => s % h % array
u => s % u % array
@@ -338,25 +338,25 @@
! velocity tendency: nonlinear Coriolis term and grad of kinetic energy
!
- call timer_start("ocn_tend_u-coriolis")
+ call mpas_timer_start("ocn_tend_u-coriolis")
call ocn_vel_coriolis_tend(grid, pv_edge, h_edge, u, ke, tend_u, err)
- call timer_stop("ocn_tend_u-coriolis")
+ call mpas_timer_stop("ocn_tend_u-coriolis")
!
! velocity tendency: vertical advection term -w du/dz
!
- call timer_start("ocn_tend_u-vert adv")
+ call mpas_timer_start("ocn_tend_u-vert adv")
call ocn_vel_vadv_tend(grid, u, wtop, tend_u, err)
- call timer_stop("ocn_tend_u-vert adv")
+ call mpas_timer_stop("ocn_tend_u-vert adv")
!
! velocity tendency: pressure gradient
!
- call timer_start("ocn_tend_u-pressure grad")
+ call mpas_timer_start("ocn_tend_u-pressure grad")
if (config_vert_grid_type.eq.'isopycnal') then
call ocn_vel_pressure_grad_tend(grid, MontPot, tend_u, err)
@@ -364,18 +364,18 @@
call ocn_vel_pressure_grad_tend(grid, pressure, tend_u, err)
end if
- call timer_stop("ocn_tend_u-pressure grad")
+ call mpas_timer_stop("ocn_tend_u-pressure grad")
!
! velocity tendency: del2 dissipation, </font>
<font color="black">u_2 </font>
<font color="black">abla^2 u
! computed as </font>
<font color="black">u( </font>
<font color="black">abla divergence + k \times </font>
<font color="gray">abla vorticity )
! strictly only valid for config_h_mom_eddy_visc2 == constant
!
- call timer_start("ocn_tend_u-horiz mix")
+ call mpas_timer_start("ocn_tend_u-horiz mix")
call ocn_vel_hmix_tend(grid, divergence, vorticity, tend_u, err)
- call timer_stop("ocn_tend_u-horiz mix")
+ call mpas_timer_stop("ocn_tend_u-horiz mix")
!
! velocity tendency: forcing and bottom drag
@@ -383,23 +383,23 @@
! mrp 101115 note: in order to include flux boundary conditions, we will need to
! know the bottom edge with nonzero velocity and place the drag there.
- call timer_start("ocn_tend_u-forcings")
+ call mpas_timer_start("ocn_tend_u-forcings")
call ocn_vel_forcing_tend(grid, u, u_src, ke_edge, h_edge, tend_u, err)
- call timer_stop("ocn_tend_u-forcings")
+ call mpas_timer_stop("ocn_tend_u-forcings")
!
! velocity tendency: vertical mixing d/dz( nu_v du/dz))
!
if (.not.config_implicit_vertical_mix) then
- call timer_start("ocn_tend_u-explicit vert mix")
+ call mpas_timer_start("ocn_tend_u-explicit vert mix")
call ocn_vel_vmix_tend_explicit(grid, u, h_edge, vertvisctopofedge, tend_u, err)
- call timer_stop("ocn_tend_u-explicit vert mix")
+ call mpas_timer_stop("ocn_tend_u-explicit vert mix")
endif
- call timer_stop("ocn_tend_u")
+ call mpas_timer_stop("ocn_tend_u")
end subroutine ocn_tend_u!}}}
@@ -465,7 +465,7 @@
integer :: index_temperature, index_salinity, rrr
real (kind=RKIND), dimension(:), pointer :: temperatureRestore, salinityRestore
- call timer_start("ocn_tend_scalar")
+ call mpas_timer_start("ocn_tend_scalar")
u => s % u % array
h => s % h % array
@@ -515,31 +515,31 @@
! and then change maxLevelEdgeTop to maxLevelEdgeBot in the following section.
! tracer_edge at the boundary will also need to be defined for flux boundaries.
- call timer_start("ocn_tend_scalar-horiz adv")
+ call mpas_timer_start("ocn_tend_scalar-horiz adv")
call ocn_tracer_hadv_tend(grid, u, h_edge, tracers, tend_tr, err)
- call timer_stop("ocn_tend_scalar-horiz adv")
+ call mpas_timer_stop("ocn_tend_scalar-horiz adv")
!
! tracer tendency: vertical advection term -d/dz( h \phi w)
!
- call timer_start("ocn_tend_scalar-vert adv")
+ call mpas_timer_start("ocn_tend_scalar-vert adv")
call ocn_tracer_vadv_tend(grid, wtop, tracers, tend_tr, err)
- call timer_stop("ocn_tend_scalar-vert adv")
+ call mpas_timer_stop("ocn_tend_scalar-vert adv")
!
! tracer tendency: del2 horizontal tracer diffusion, div(h \kappa_2 </font>
<font color="gray">abla \phi)
!
- call timer_start("ocn_tend_scalar-horiz diff")
+ call mpas_timer_start("ocn_tend_scalar-horiz diff")
call ocn_tracer_hmix_tend(grid, h_edge, tracers, tend_tr, err)
- call timer_stop("ocn_tend_scalar-horiz diff")
+ call mpas_timer_stop("ocn_tend_scalar-horiz diff")
! mrp 110516 printing
!print *, 'tend_tr 1',minval(tend_tr(3,1,1:nCells)),&
@@ -552,11 +552,11 @@
! tracer tendency: vertical diffusion h d/dz( \kappa_v d\phi/dz)
!
if (.not.config_implicit_vertical_mix) then
- call timer_start("ocn_tend_scalar-explicit vert diff")
+ call mpas_timer_start("ocn_tend_scalar-explicit vert diff")
call ocn_tracer_vmix_tend_explicit(grid, h, vertdifftopofcell, tracers, tend_tr, err)
- call timer_stop("ocn_tend_scalar-explicit vert diff")
+ call mpas_timer_stop("ocn_tend_scalar-explicit vert diff")
endif
! mrp 110516 printing
@@ -567,14 +567,14 @@
!
! add restoring to T and S in top model layer
!
- call timer_start("ocn_tend_scalar-restoring")
+ call mpas_timer_start("ocn_tend_scalar-restoring")
call ocn_restoring_tend(grid, h, s%index_temperature, s%index_salinity, tracers, tend_tr, err)
- call timer_stop("ocn_tend_scalar-restoring")
+ call mpas_timer_stop("ocn_tend_scalar-restoring")
10 format(2i8,10e20.10)
- call timer_stop("ocn_tend_scalar")
+ call mpas_timer_stop("ocn_tend_scalar")
end subroutine ocn_tend_scalar!}}}
@@ -637,7 +637,7 @@
real (kind=RKIND) :: coef_3rd_order
real (kind=RKIND) :: r, h1, h2
- call timer_start("ocn_diagnostic_solve")
+ call mpas_timer_start("ocn_diagnostic_solve")
h => s % h % array
u => s % u % array
@@ -703,14 +703,14 @@
! mrp 110516 efficiency note: For z-level, only do this on level 1. h_edge for all
! lower levels is defined by hZlevel.
- call timer_start("ocn_diagnostic_solve-hEdge")
+ call mpas_timer_start("ocn_diagnostic_solve-hEdge")
coef_3rd_order = 0.
if (config_thickness_adv_order == 3) coef_3rd_order = 1.0
if (config_thickness_adv_order == 3 .and. config_monotonic) coef_3rd_order = 0.25
if (config_thickness_adv_order == 2) then
- call timer_start("ocn_diagnostic_solve-hEdge 2")
+ call mpas_timer_start("ocn_diagnostic_solve-hEdge 2")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
@@ -719,10 +719,10 @@
h_edge(k,iEdge) = 0.5 * (h(k,cell1) + h(k,cell2))
end do
end do
- call timer_stop("ocn_diagnostic_solve-hEdge 2")
+ call mpas_timer_stop("ocn_diagnostic_solve-hEdge 2")
else if (config_thickness_adv_order == 3) then
- call timer_start("ocn_diagnostic_solve-hEdge 3")
+ call mpas_timer_start("ocn_diagnostic_solve-hEdge 3")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
@@ -770,9 +770,9 @@
end do ! do k
end do ! do iEdge
- call timer_stop("ocn_diagnostic_solve-hEdge 3")
+ call mpas_timer_stop("ocn_diagnostic_solve-hEdge 3")
else if (config_thickness_adv_order == 4) then
- call timer_start("ocn_diagnostic_solve-hEdge 4")
+ call mpas_timer_start("ocn_diagnostic_solve-hEdge 4")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
@@ -810,9 +810,9 @@
end do ! do k
end do ! do iEdge
- call timer_stop("ocn_diagnostic_solve-hEdge 4")
+ call mpas_timer_stop("ocn_diagnostic_solve-hEdge 4")
endif ! if(config_thickness_adv_order == 2)
- call timer_stop("ocn_diagnostic_solve-hEdge")
+ call mpas_timer_stop("ocn_diagnostic_solve-hEdge")
!
! set the velocity and height at dummy address
@@ -1070,7 +1070,7 @@
call ocn_wtop(s,grid)
- call timer_stop("ocn_diagnostic_solve")
+ call mpas_timer_stop("ocn_diagnostic_solve")
end subroutine ocn_diagnostic_solve!}}}
@@ -1121,7 +1121,7 @@
maxLevelCell, maxLevelEdgeTop, maxLevelEdgeBot, &
maxLevelVertexBot, maxLevelVertexTop
- call timer_start("wTop")
+ call mpas_timer_start("wTop")
u => s % u % array
wTop => s % wTop % array
@@ -1176,7 +1176,7 @@
endif
- call timer_stop("wTop")
+ call mpas_timer_stop("wTop")
end subroutine ocn_wtop!}}}
@@ -1241,7 +1241,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("ocn_fuperp")
+ call mpas_timer_start("ocn_fuperp")
h => s % h % array
u => s % u % array
@@ -1305,7 +1305,7 @@
end do
end do
- call timer_stop("ocn_fuperp")
+ call mpas_timer_stop("ocn_fuperp")
end subroutine ocn_fuperp!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_test_cases.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_test_cases.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_test_cases.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module test_cases
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
@@ -72,14 +72,14 @@
write(0,*) 'Abort: config_test_case=',config_test_case
write(0,*) 'Only test case 1, 2, 5, and 6 ', &
'are currently supported. '
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
end if
block_ptr => domain % blocklist
do while (associated(block_ptr))
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, &
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, &
block_ptr % state % time_levs(1) % state)
end do
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_thick_hadv.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_thick_hadv.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_thick_hadv.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_thick_hadv
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_thick_vadv.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_thick_vadv.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_thick_vadv.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_thick_vadv
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_time_integration.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_time_integration.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_time_integration.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,13 +14,13 @@
module ocn_time_integration
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
use vector_reconstruction
use spline_interpolation
- use timer
+ use mpas_timer
use ocn_time_integration_rk4
use ocn_time_integration_split
@@ -100,7 +100,7 @@
if (isNaN(sum(block % state % time_levs(2) % state % u % array))) then
write(0,*) 'Abort: NaN detected'
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
block => block % next
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_rk4.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_rk4.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_rk4.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -13,13 +13,13 @@
module ocn_time_integration_rk4
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
use vector_reconstruction
use spline_interpolation
- use timer
+ use mpas_timer
use ocn_tendency
@@ -95,7 +95,7 @@
block => domain % blocklist
- call allocate_state(provis, &
+ call mpas_allocate_state(provis, &
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels )
@@ -117,7 +117,7 @@
end do
end do
- call copy_state(provis, block % state % time_levs(1) % state)
+ call mpas_copy_state(provis, block % state % time_levs(1) % state)
block => block % next
end do
@@ -133,36 +133,36 @@
rk_substep_weights(4) = 0.
- call timer_start("RK4-main loop")
+ call mpas_timer_start("RK4-main loop")
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! BEGIN RK loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
do rk_step = 1, 4
! --- update halos for diagnostic variables
- call timer_start("RK4-diagnostic halo update")
+ call mpas_timer_start("RK4-diagnostic halo update")
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, provis % pv_edge % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
if (config_h_mom_eddy_visc4 > 0.0) then
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
block % mesh % nVertLevels, block % mesh % nVertices, &
block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
end if
block => block % next
end do
- call timer_stop("RK4-diagnostic halo update")
+ call mpas_timer_stop("RK4-diagnostic halo update")
! --- compute tendencies
- call timer_start("RK4-tendency computations")
+ call mpas_timer_start("RK4-tendency computations")
block => domain % blocklist
do while (associated(block))
if (.not.config_implicit_vertical_mix) then
@@ -181,29 +181,29 @@
call enforce_boundaryEdge(block % tend, block % mesh)
block => block % next
end do
- call timer_stop("RK4-tendency computations")
+ call mpas_timer_stop("RK4-tendency computations")
! --- update halos for prognostic variables
- call timer_start("RK4-pronostic halo update")
+ call mpas_timer_start("RK4-pronostic halo update")
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % u % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % h % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % tend % tracers % array(:,:,:), &
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
end do
- call timer_stop("RK4-pronostic halo update")
+ call mpas_timer_stop("RK4-pronostic halo update")
! --- compute next substep state
- call timer_start("RK4-update diagnostic variables")
+ call mpas_timer_start("RK4-update diagnostic variables")
if (rk_step < 4) then
block => domain % blocklist
do while (associated(block))
@@ -232,13 +232,13 @@
block => block % next
end do
end if
- call timer_stop("RK4-update diagnostic variables")
+ call mpas_timer_stop("RK4-update diagnostic variables")
!--- accumulate update (for RK4)
- call timer_start("RK4-RK4 accumulate update")
+ call mpas_timer_start("RK4-RK4 accumulate update")
block => domain % blocklist
do while (associated(block))
block % state % time_levs(2) % state % u % array(:,:) = block % state % time_levs(2) % state % u % array(:,:) &
@@ -257,18 +257,18 @@
block => block % next
end do
- call timer_stop("RK4-RK4 accumulate update")
+ call mpas_timer_stop("RK4-RK4 accumulate update")
end do
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! END RK loop
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- call timer_stop("RK4-main loop")
+ call mpas_timer_stop("RK4-main loop")
!
! A little clean up at the end: decouple new scalar fields and compute diagnostics for new state
!
- call timer_start("RK4-cleaup phase")
+ call mpas_timer_start("RK4-cleaup phase")
block => domain % blocklist
do while (associated(block))
@@ -294,7 +294,7 @@
end do
if (config_implicit_vertical_mix) then
- call timer_start("RK4-implicit vert mix")
+ call mpas_timer_start("RK4-implicit vert mix")
allocate(A(nVertLevels),C(nVertLevels),uTemp(nVertLevels), &
tracersTemp(num_tracers,nVertLevels))
@@ -320,7 +320,7 @@
! mrp 110725 momentum decay term
if (config_mom_decay) then
- call timer_start("RK4-momentum decay")
+ call mpas_timer_start("RK4-momentum decay")
!
! Implicit solve for momentum decay
@@ -337,7 +337,7 @@
end do
end do
- call timer_stop("RK4-momentum decay")
+ call mpas_timer_stop("RK4-momentum decay")
end if
@@ -357,9 +357,9 @@
block => block % next
end do
- call timer_stop("RK4-cleaup phase")
+ call mpas_timer_stop("RK4-cleaup phase")
- call deallocate_state(provis)
+ call mpas_deallocate_state(provis)
end subroutine ocn_time_integrator_rk4!}}}
@@ -412,7 +412,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("filter_btr_mode_tend_u")
+ call mpas_timer_start("filter_btr_mode_tend_u")
h => s % h % array
u => s % u % array
@@ -484,7 +484,7 @@
enddo ! iEdge
- call timer_stop("filter_btr_mode_tend_u")
+ call mpas_timer_stop("filter_btr_mode_tend_u")
end subroutine filter_btr_mode_tend_u!}}}
@@ -535,7 +535,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("filter_btr_mode_u")
+ call mpas_timer_start("filter_btr_mode_u")
h => s % h % array
u => s % u % array
@@ -603,7 +603,7 @@
enddo ! iEdge
- call timer_stop("filter_btr_mode_u")
+ call mpas_timer_stop("filter_btr_mode_u")
end subroutine filter_btr_mode_u!}}}
@@ -627,7 +627,7 @@
integer :: nCells, nEdges, nVertices, nVertLevels
integer :: iEdge, k
- call timer_start("enforce_boundaryEdge")
+ call mpas_timer_start("enforce_boundaryEdge")
nCells = grid % nCells
nEdges = grid % nEdges
@@ -648,7 +648,7 @@
enddo
enddo
- call timer_stop("enforce_boundaryEdge")
+ call mpas_timer_stop("enforce_boundaryEdge")
end subroutine enforce_boundaryEdge!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_split.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_split.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_time_integration_split.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,13 +15,13 @@
module ocn_time_integration_split
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
use vector_reconstruction
use spline_interpolation
- use timer
+ use mpas_timer
use ocn_tendency
@@ -99,7 +99,7 @@
real (kind=RKIND), dimension(:), allocatable:: A,C,uTemp, hNew
real (kind=RKIND), dimension(:,:), allocatable:: tracersTemp
- call timer_start("split_explicit_timestep")
+ call mpas_timer_start("split_explicit_timestep")
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
@@ -160,15 +160,15 @@
block => domain % blocklist
do while (associated(block))
! mrp 110512 not sure if I need the following three. Leave be, assume I need it.
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % pv_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
if (config_h_mom_eddy_visc4 > 0.0) then
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
block % mesh % nVertLevels, block % mesh % nVertices, &
block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
end if
@@ -267,7 +267,7 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % uBcl % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % uBcl % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
@@ -415,7 +415,7 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field1dReal(domain % dminfo, &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
@@ -479,7 +479,7 @@
! block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
- call dmpar_exch_halo_field1dReal(domain % dminfo, &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
block % state % time_levs(newBtrSubcycleTime) % state % sshSubcycle % array(:), &
block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
@@ -652,7 +652,7 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field1dReal(domain % dminfo, &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
block % state % time_levs(newBtrSubcycleTime) % state % uBtrSubcycle % array(:), &
block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
@@ -714,7 +714,7 @@
else
write(0,*) 'Abort: Unknown config_SSH_from option: '&
//trim(config_SSH_from)
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
block => block % next
@@ -725,7 +725,7 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field1dReal(domain % dminfo, &
+ call mpas_dmpar_exch_halo_field1d_real(domain % dminfo, &
block % state % time_levs(1) % state % FBtr % array(:), &
block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
@@ -878,10 +878,10 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % h % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % tend % tracers % array(:,:,:), &
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
block % tend % num_tracers, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
@@ -1021,7 +1021,7 @@
else
write(0,*) 'Abort: Unknown config_new_btr_variables_from: '&
//trim(config_time_integration)
- call dmpar_abort(dminfo)
+ call mpas_dmpar_abort(dminfo)
endif
! Recompute final u to go on to next step.
@@ -1147,7 +1147,7 @@
block => block % next
end do
- call timer_stop("split_explicit_timestep")
+ call mpas_timer_stop("split_explicit_timestep")
end subroutine ocn_time_integrator_split!}}}
@@ -1200,7 +1200,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("filter_btr_mode_tend_u")
+ call mpas_timer_start("filter_btr_mode_tend_u")
h => s % h % array
u => s % u % array
@@ -1272,7 +1272,7 @@
enddo ! iEdge
- call timer_stop("filter_btr_mode_tend_u")
+ call mpas_timer_stop("filter_btr_mode_tend_u")
end subroutine filter_btr_mode_tend_u!}}}
@@ -1323,7 +1323,7 @@
real (kind=RKIND), dimension(:,:), pointer :: u_src
real (kind=RKIND), parameter :: rho_ref = 1000.0
- call timer_start("filter_btr_mode_u")
+ call mpas_timer_start("filter_btr_mode_u")
h => s % h % array
u => s % u % array
@@ -1391,7 +1391,7 @@
enddo ! iEdge
- call timer_stop("filter_btr_mode_u")
+ call mpas_timer_stop("filter_btr_mode_u")
end subroutine filter_btr_mode_u!}}}
@@ -1415,7 +1415,7 @@
integer :: nCells, nEdges, nVertices, nVertLevels
integer :: iEdge, k
- call timer_start("enforce_boundaryEdge")
+ call mpas_timer_start("enforce_boundaryEdge")
nCells = grid % nCells
nEdges = grid % nEdges
@@ -1436,7 +1436,7 @@
enddo
enddo
- call timer_stop("enforce_boundaryEdge")
+ call mpas_timer_stop("enforce_boundaryEdge")
end subroutine enforce_boundaryEdge!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_tracer_hadv
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_tracer_hadv2
use ocn_tracer_hadv3
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv2.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv2.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv2.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_hadv2
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -127,7 +127,7 @@
if(.not.hadv2On) return
- call timer_start("compute_scalar_tend-horiz adv 2")
+ call mpas_timer_start("compute_scalar_tend-horiz adv 2")
nEdges = grid % nEdges
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
@@ -149,7 +149,7 @@
end do
end do
- call timer_stop("compute_scalar_tend-horiz adv 2")
+ call mpas_timer_stop("compute_scalar_tend-horiz adv 2")
!--------------------------------------------------------------------
end subroutine ocn_tracer_hadv2_tend!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv3.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv3.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv3.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_hadv3
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -142,7 +142,7 @@
areaCell => grid % areaCell % array
deriv_two => grid % deriv_two % array
- call timer_start("compute_scalar_tend-horiz adv 3")
+ call mpas_timer_start("compute_scalar_tend-horiz adv 3")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
cell2 = cellsOnEdge(2,iEdge)
@@ -194,7 +194,7 @@
enddo
end do
end do
- call timer_stop("compute_scalar_tend-horiz adv 3")
+ call mpas_timer_stop("compute_scalar_tend-horiz adv 3")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv4.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv4.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hadv4.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_hadv4
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -139,7 +139,7 @@
areaCell => grid % areaCell % array
deriv_two => grid % deriv_two % array
- call timer_start("compute_scalar_tend-horiz adv 4")
+ call mpas_timer_start("compute_scalar_tend-horiz adv 4")
do iEdge=1,nEdges
cell1 = cellsOnEdge(1,iEdge)
@@ -182,7 +182,7 @@
enddo
end do
end do
- call timer_stop("compute_scalar_tend-horiz adv 4")
+ call mpas_timer_stop("compute_scalar_tend-horiz adv 4")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -16,8 +16,8 @@
module ocn_tracer_hmix
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_tracer_hmix_del2
use ocn_tracer_hmix_del4
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del2.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del2.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -16,9 +16,9 @@
module ocn_tracer_hmix_del2
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -134,7 +134,7 @@
if (.not.del2On) return
- call timer_start("compute_scalar_tend-horiz diff 2")
+ call mpas_timer_start("compute_scalar_tend-horiz diff 2")
nEdges = grid % nEdges
nVertLevels = grid % nVertLevels
@@ -179,7 +179,7 @@
end do
deallocate(boundaryMask)
- call timer_stop("compute_scalar_tend-horiz diff 2")
+ call mpas_timer_stop("compute_scalar_tend-horiz diff 2")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del4.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_hmix_del4.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -16,9 +16,9 @@
module ocn_tracer_hmix_del4
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -135,7 +135,7 @@
if (.not.Del4On) return
- call timer_start("compute_scalar_tend-horiz diff 4")
+ call mpas_timer_start("compute_scalar_tend-horiz diff 4")
nEdges = grid % nEdges
nCells = grid % nCells
@@ -212,7 +212,7 @@
end do
deallocate(delsq_tracer)
- call timer_stop("compute_scalar_tend-horiz diff 4")
+ call mpas_timer_stop("compute_scalar_tend-horiz diff 4")
!--------------------------------------------------------------------
end subroutine ocn_tracer_hmix_del4_tend!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_tracer_vadv
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_tracer_vadv_stencil
use ocn_tracer_vadv_spline
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_tracer_vadv_spline
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_tracer_vadv_spline2
use ocn_tracer_vadv_spline3
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline2.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_vadv_spline2
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -125,7 +125,7 @@
if(.not.spline2On) return
! Compute tracerTop using linear interpolation.
- call timer_start("compute_scalar_tend-vert adv spline 2")
+ call mpas_timer_start("compute_scalar_tend-vert adv spline 2")
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
@@ -162,7 +162,7 @@
deallocate(tracerTop)
- call timer_stop("compute_scalar_tend-vert adv spline 2")
+ call mpas_timer_stop("compute_scalar_tend-vert adv spline 2")
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_spline2_tend!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_spline3.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_vadv_spline3
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
use spline_interpolation
implicit none
@@ -129,7 +129,7 @@
if(.not.spline3On) return
! Compute tracerTop using linear interpolation.
- call timer_start("compute_scalar_tend-vert adv spline 3")
+ call mpas_timer_start("compute_scalar_tend-vert adv spline 3")
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
@@ -191,7 +191,7 @@
deallocate(tracersIn,tracersOut, posZMidZLevel, posZTopZLevel)
deallocate(tracerTop)
- call timer_stop("compute_scalar_tend-vert adv spline 3")
+ call mpas_timer_stop("compute_scalar_tend-vert adv spline 3")
!--------------------------------------------------------------------
end subroutine ocn_tracer_vadv_spline3_tend!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_tracer_vadv_stencil
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_tracer_vadv_stencil2
use ocn_tracer_vadv_stencil3
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil2.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_vadv_stencil2
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -125,7 +125,7 @@
if(.not. stencil2On) return
- call timer_start("compute_scalar_tend-vert adv stencil 2")
+ call mpas_timer_start("compute_scalar_tend-vert adv stencil 2")
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
@@ -158,7 +158,7 @@
end do
deallocate(tracerTop)
- call timer_stop("compute_scalar_tend-vert adv stencil 2")
+ call mpas_timer_stop("compute_scalar_tend-vert adv stencil 2")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil3.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_vadv_stencil3
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -134,7 +134,7 @@
hRatioZLevelK => grid % hRatioZLevelK % array
hRatioZLevelKm1 => grid % hRatioZLevelKm1 % array
- call timer_start("compute_scalar_tend-vert adv stencil 3")
+ call mpas_timer_start("compute_scalar_tend-vert adv stencil 3")
allocate(tracerTop(num_tracers,nVertLevels+1,nCells))
@@ -181,7 +181,7 @@
end do
deallocate(tracerTop)
- call timer_stop("compute_scalar_tend-vert adv stencil 3")
+ call mpas_timer_stop("compute_scalar_tend-vert adv stencil 3")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_tracer_vadv_stencil4.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_tracer_vadv_stencil4
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -126,7 +126,7 @@
if(.not. Stencil4On) return
- call timer_start("compute_scalar_tend-vert adv stencil 4")
+ call mpas_timer_start("compute_scalar_tend-vert adv stencil 4")
nCells = grid % nCells
nCellsSolve = grid % nCellsSolve
@@ -176,7 +176,7 @@
end do
deallocate(tracerTop)
- call timer_stop("compute_scalar_tend-vert adv stencil 4")
+ call mpas_timer_stop("compute_scalar_tend-vert adv stencil 4")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_coriolis.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_coriolis.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_coriolis.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,8 +15,8 @@
module ocn_vel_coriolis
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_vel_forcing
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_vel_forcing_windstress
use ocn_vel_forcing_bottomdrag
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_bottomdrag.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_vel_forcing_bottomdrag
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_windstress.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_forcing_windstress.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,8 +14,8 @@
module ocn_vel_forcing_windstress
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -16,8 +16,8 @@
module ocn_vel_hmix
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
use ocn_vel_hmix_del2
use ocn_vel_hmix_del4
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del2.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del2.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_vel_hmix_del2
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -132,7 +132,7 @@
if(.not.hmixDel2On) return
- call timer_start("compute_tend_u-horiz mix-del2")
+ call mpas_timer_start("compute_tend_u-horiz mix-del2")
nEdgesSolve = grid % nEdgesSolve
maxLevelEdgeTop => grid % maxLevelEdgeTop % array
@@ -165,7 +165,7 @@
end do
end do
- call timer_stop("compute_tend_u-horiz mix-del2")
+ call mpas_timer_stop("compute_tend_u-horiz mix-del2")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del4.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_hmix_del4.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -14,9 +14,9 @@
module ocn_vel_hmix_del4
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
@@ -134,7 +134,7 @@
if(.not.hmixDel4On) return
- call timer_start("compute_tend-horiz mix-del4")
+ call mpas_timer_start("compute_tend-horiz mix-del4")
nCells = grid % nCells
nEdges = grid % nEdges
@@ -243,7 +243,7 @@
deallocate(delsq_circulation)
deallocate(delsq_vorticity)
- call timer_stop("compute_tend-horiz mix-del4")
+ call mpas_timer_stop("compute_tend-horiz mix-del4")
!--------------------------------------------------------------------
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_pressure_grad.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_pressure_grad.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_pressure_grad.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,8 +15,8 @@
module ocn_vel_pressure_grad
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vel_vadv.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vel_vadv.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vel_vadv.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,8 +15,8 @@
module ocn_vel_vadv
- use grid_types
- use configure
+ use mpas_grid_types
+ use mpas_configure
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vmix.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vmix.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vmix.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,9 +15,9 @@
module ocn_vmix
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
use ocn_vmix_coefs_const
use ocn_vmix_coefs_tanh
@@ -199,7 +199,7 @@
if(implicitOn) return
- call timer_start("compute_tend_u-explicit vert mix")
+ call mpas_timer_start("compute_tend_u-explicit vert mix")
nEdgessolve = grid % nEdgesSolve
nVertLevels = grid % nVertLevels
@@ -224,7 +224,7 @@
end do
deallocate(fluxVertTop)
- call timer_stop("compute_tend_u-explicit vert mix")
+ call mpas_timer_stop("compute_tend_u-explicit vert mix")
!--------------------------------------------------------------------
@@ -418,7 +418,7 @@
if(implicitOn) return
- call timer_start("compute_scalar_tend-explicit vert diff")
+ call mpas_timer_start("compute_scalar_tend-explicit vert diff")
nCellsSolve = grid % nCellsSolve
nVertLevels = grid % nVertLevels
@@ -455,7 +455,7 @@
enddo ! iCell loop
deallocate(fluxVertTop)
- call timer_stop("compute_scalar_tend-explicit vert diff")
+ call mpas_timer_stop("compute_scalar_tend-explicit vert diff")
!--------------------------------------------------------------------
@@ -637,7 +637,7 @@
real (KIND=RKIND) :: m
integer i
- call timer_start("tridiagonal_solve")
+ call mpas_timer_start("tridiagonal_solve")
! Use work variables for b and r
bTemp(1) = b(1)
@@ -656,7 +656,7 @@
x(i) = (rTemp(i) - c(i)*x(i+1))/bTemp(i)
end do
- call timer_stop("tridiagonal_solve")
+ call mpas_timer_stop("tridiagonal_solve")
end subroutine tridiagonal_solve!}}}
@@ -684,7 +684,7 @@
real (KIND=RKIND) :: m
integer i,j
- call timer_start("tridiagonal_solve_mult")
+ call mpas_timer_start("tridiagonal_solve_mult")
! Use work variables for b and r
bTemp(1) = b(1)
@@ -711,7 +711,7 @@
end do
end do
- call timer_stop("tridiagonal_solve_mult")
+ call mpas_timer_stop("tridiagonal_solve_mult")
end subroutine tridiagonal_solve_mult!}}}
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_const.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_const.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_const.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,9 +15,9 @@
module ocn_vmix_coefs_const
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_rich.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_rich.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,10 +15,10 @@
module ocn_vmix_coefs_rich
- use grid_types
- use configure
- use constants
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_timer
use ocn_equation_of_state
Modified: branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F
===================================================================
--- branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_ocean/mpas_ocn_vmix_coefs_tanh.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -15,9 +15,9 @@
module ocn_vmix_coefs_tanh
- use grid_types
- use configure
- use timer
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_timer
implicit none
private
Modified: branches/source_renaming/src/core_sw/mpas_sw_advection.F
===================================================================
--- branches/source_renaming/src/core_sw/mpas_sw_advection.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_sw/mpas_sw_advection.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module advection
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
Modified: branches/source_renaming/src/core_sw/mpas_sw_global_diagnostics.F
===================================================================
--- branches/source_renaming/src/core_sw/mpas_sw_global_diagnostics.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_sw/mpas_sw_global_diagnostics.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,9 +1,9 @@
module global_diagnostics
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
implicit none
save
@@ -313,7 +313,7 @@
real (kind=RKIND) :: localSum
localSum = sum(field)
- call dmpar_sum_real(dminfo, localSum, globalSum)
+ call mpas_dmpar_sum_real(dminfo, localSum, globalSum)
end subroutine sw_compute_global_sum
@@ -329,7 +329,7 @@
real (kind=RKIND) :: localMin
localMin = minval(field)
- call dmpar_min_real(dminfo, localMin, globalMin)
+ call mpas_dmpar_min_real(dminfo, localMin, globalMin)
end subroutine sw_compute_global_min
@@ -345,7 +345,7 @@
real (kind=RKIND) :: localMax
localMax = maxval(field)
- call dmpar_max_real(dminfo, localMax, globalMax)
+ call mpas_dmpar_max_real(dminfo, localMax, globalMax)
end subroutine sw_compute_global_max
@@ -361,7 +361,7 @@
real (kind=RKIND) :: localMin
localMin = minval(sum(field,1))
- call dmpar_min_real(dminfo, localMin, globalMin)
+ call mpas_dmpar_min_real(dminfo, localMin, globalMin)
end subroutine compute_global_vert_sum_horiz_min
@@ -377,7 +377,7 @@
real (kind=RKIND) :: localMax
localMax = maxval(sum(field,1))
- call dmpar_max_real(dminfo, localMax, globalMax)
+ call mpas_dmpar_max_real(dminfo, localMax, globalMax)
end subroutine sw_compute_global_vert_sum_horiz_max
Modified: branches/source_renaming/src/core_sw/mpas_sw_mpas_core.F
===================================================================
--- branches/source_renaming/src/core_sw/mpas_sw_mpas_core.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_sw/mpas_sw_mpas_core.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -17,8 +17,8 @@
subroutine mpas_core_init(domain, startTimeStamp)
- use configure
- use grid_types
+ use mpas_configure
+ use mpas_grid_types
use test_cases
implicit none
@@ -64,56 +64,56 @@
type (MPAS_TimeInterval_type) :: runDuration, timeStep, alarmTimeStep
integer :: ierr
- call MPAS_setTime(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
- call MPAS_setTimeInterval(timeStep, dt=dt, ierr=ierr)
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time, ierr=ierr)
+ call mpas_set_timeInterval(timeStep, dt=dt, ierr=ierr)
if (trim(config_run_duration) /= "none") then
- call MPAS_setTimeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
+ call mpas_set_timeInterval(runDuration, timeString=config_run_duration, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, runDuration=runDuration, ierr=ierr)
if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
if(startTime + runduration /= stopTime) then
write(0,*) 'Warning: config_run_duration and config_stop_time are inconsitent: using config_run_duration.'
end if
end if
else if (trim(config_stop_time) /= "none") then
- call MPAS_setTime(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
- call MPAS_createClock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
+ call mpas_set_time(curr_time=stopTime, dateTimeString=config_stop_time, ierr=ierr)
+ call mpas_create_clock(clock, startTime=startTime, timeStep=timeStep, stopTime=stopTime, ierr=ierr)
else
write(0,*) 'Error: Neither config_run_duration nor config_stop_time were specified.'
- call dmpar_abort(domain % dminfo)
+ call mpas_dmpar_abort(domain % dminfo)
end if
! set output alarm
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_output_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, outputAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
! set restart alarm, if necessary
if (trim(config_restart_interval) /= "none") then
- call MPAS_setTimeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
+ call mpas_set_timeInterval(alarmTimeStep, timeString=config_restart_interval, ierr=ierr)
alarmStartTime = startTime + alarmTimeStep
- call MPAS_addClockAlarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ call mpas_add_clock_alarm(clock, restartAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
end if
!TODO: use this code if we desire to convert config_stats_interval to alarms
!(must also change config_stats_interval type to character)
! set stats alarm, if necessary
!if (trim(config_stats_interval) /= "none") then
- ! call MPAS_setTimeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=ierr)
+ ! call mpas_set_timeInterval(alarmTimeStep, timeString=config_stats_interval, ierr=ierr)
! alarmStartTime = startTime + alarmTimeStep
- ! call MPAS_addClockAlarm(clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
+ ! call mpas_add_clock_alarm(clock, statsAlarmID, alarmStartTime, alarmTimeStep, ierr=ierr)
!end if
- call MPAS_getTime(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
+ call mpas_get_time(curr_time=startTime, dateTimeString=startTimeStamp, ierr=ierr)
end subroutine simulation_clock_init
subroutine mpas_init_block(block, mesh, dt)
- use grid_types
+ use mpas_grid_types
use time_integration
use rbf_interpolation
use vector_reconstruction
@@ -144,9 +144,9 @@
subroutine mpas_core_run(domain, output_obj, output_frame)
- use grid_types
- use io_output
- use timer
+ use mpas_grid_types
+ use mpas_io_output
+ use mpas_timer
implicit none
@@ -165,8 +165,8 @@
! Eventually, dt should be domain specific
dt = config_dt
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Initial timestep ', timeStamp
call write_output_frame(output_obj, output_frame, domain)
@@ -174,34 +174,34 @@
! During integration, time level 1 stores the model state at the beginning of the
! time step, and time level 2 stores the state advanced dt in time by timestep(...)
itimestep = 0
- do while (.not. MPAS_isClockStopTime(clock))
+ do while (.not. mpas_is_clock_stop_time(clock))
itimestep = itimestep + 1
- call MPAS_advanceClock(clock)
+ call mpas_advance_clock(clock)
- currTime = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- call MPAS_getTime(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ currTime = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
write(0,*) 'Doing timestep ', timeStamp
- call timer_start("time integration")
+ call mpas_timer_start("time integration")
call mpas_timestep(domain, itimestep, dt, timeStamp)
- call timer_stop("time integration")
+ call mpas_timer_stop("time integration")
! Move time level 2 fields back into time level 1 for next time step
- call shift_time_levels_state(domain % blocklist % state)
+ call mpas_shift_time_levels_state(domain % blocklist % state)
- !TODO: MPAS_getClockRingingAlarms is probably faster than multiple MPAS_isAlarmRinging...
+ !TODO: mpas_get_clock_ringing_alarms is probably faster than multiple mpas_is_alarm_ringing...
- if (MPAS_isAlarmRinging(clock, outputAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, outputAlarmID, ierr=ierr)
- if(output_frame == 1) call output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
+ if (mpas_is_alarm_ringing(clock, outputAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, outputAlarmID, ierr=ierr)
+ if(output_frame == 1) call mpas_output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp)) ! output_frame will always be > 1 here unless it is reset after the output file is finalized
call write_output_frame(output_obj, output_frame, domain)
end if
- if (MPAS_isAlarmRinging(clock, restartAlarmID, ierr=ierr)) then
- call MPAS_resetClockAlarm(clock, restartAlarmID, ierr=ierr)
- if (restart_frame == 1) call output_state_init(restart_obj, domain, "RESTART")
- call output_state_for_domain(restart_obj, domain, restart_frame)
+ if (mpas_is_alarm_ringing(clock, restartAlarmID, ierr=ierr)) then
+ call mpas_reset_clock_alarm(clock, restartAlarmID, ierr=ierr)
+ if (restart_frame == 1) call mpas_output_state_init(restart_obj, domain, "RESTART")
+ call mpas_output_state_for_domain(restart_obj, domain, restart_frame)
restart_frame = restart_frame + 1
end if
@@ -218,8 +218,8 @@
! before returning
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
- use io_output
+ use mpas_grid_types
+ use mpas_io_output
implicit none
@@ -237,7 +237,7 @@
block_ptr => block_ptr % next
end do
- call output_state_for_domain(output_obj, domain, output_frame)
+ call mpas_output_state_for_domain(output_obj, domain, output_frame)
output_frame = output_frame + 1
! if the maximum number of frames per outfile has been reached, finalize outfile and reset frame
@@ -245,7 +245,7 @@
current_outfile_frames = current_outfile_frames + 1
if(current_outfile_frames >= config_frames_per_outfile) then
current_outfile_frames = 0
- call output_state_finalize(output_obj, domain % dminfo)
+ call mpas_output_state_finalize(output_obj, domain % dminfo)
output_frame = 1
end if
end if
@@ -263,7 +263,7 @@
! Output: state - upon returning, diagnostic fields will have be computed
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- use grid_types
+ use mpas_grid_types
implicit none
@@ -278,9 +278,9 @@
subroutine mpas_timestep(domain, itimestep, dt, timeStamp)
- use grid_types
+ use mpas_grid_types
use time_integration
- use timer
+ use mpas_timer
use global_diagnostics
implicit none
@@ -303,17 +303,17 @@
'that there is only one block per processor.'
end if
- call timer_start("global_diagnostics")
+ call mpas_timer_start("global_diagnostics")
call sw_compute_global_diagnostics(domain % dminfo, &
block_ptr % state % time_levs(2) % state, block_ptr % mesh, &
itimestep, dt)
- call timer_stop("global_diagnostics")
+ call mpas_timer_stop("global_diagnostics")
end if
end if
!TODO: replace the above code block with this if we desire to convert config_stats_interval to use alarms
- !if (MPAS_isAlarmRinging(clock, statsAlarmID, ierr=ierr)) then
- ! call MPAS_resetClockAlarm(clock, statsAlarmID, ierr=ierr)
+ !if (mpas_is_alarm_ringing(clock, statsAlarmID, ierr=ierr)) then
+ ! call mpas_reset_clock_alarm(clock, statsAlarmID, ierr=ierr)
! block_ptr => domain % blocklist
! if(associated(block_ptr % next)) then
@@ -321,11 +321,11 @@
! 'that there is only one block per processor.'
! end if
- ! call timer_start("global_diagnostics")
+ ! call mpas_timer_start("global_diagnostics")
! call sw_compute_global_diagnostics(domain % dminfo, &
! block_ptr % state % time_levs(2) % state, block_ptr % mesh, &
! timeStamp, dt)
- ! call timer_stop("global_diagnostics")
+ ! call mpas_timer_stop("global_diagnostics")
!end if
end subroutine mpas_timestep
@@ -333,7 +333,7 @@
subroutine mpas_core_finalize(domain)
- use grid_types
+ use mpas_grid_types
implicit none
@@ -341,16 +341,16 @@
type (domain_type), intent(inout) :: domain
- if (restart_frame > 1) call output_state_finalize(restart_obj, domain % dminfo)
+ if (restart_frame > 1) call mpas_output_state_finalize(restart_obj, domain % dminfo)
- call MPAS_destroyClock(clock, ierr)
+ call mpas_destroy_clock(clock, ierr)
end subroutine mpas_core_finalize
subroutine compute_mesh_scaling(mesh)
- use grid_types
+ use mpas_grid_types
implicit none
Modified: branches/source_renaming/src/core_sw/mpas_sw_test_cases.F
===================================================================
--- branches/source_renaming/src/core_sw/mpas_sw_test_cases.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_sw/mpas_sw_test_cases.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module test_cases
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
contains
@@ -35,7 +35,7 @@
do while (associated(block_ptr))
call sw_test_case_1(block_ptr % mesh, block_ptr % state % time_levs(1) % state)
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
end do
block_ptr => block_ptr % next
@@ -49,7 +49,7 @@
do while (associated(block_ptr))
call sw_test_case_2(block_ptr % mesh, block_ptr % state % time_levs(1) % state)
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
end do
block_ptr => block_ptr % next
@@ -63,7 +63,7 @@
do while (associated(block_ptr))
call sw_test_case_5(block_ptr % mesh, block_ptr % state % time_levs(1) % state)
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
end do
block_ptr => block_ptr % next
@@ -77,7 +77,7 @@
do while (associated(block_ptr))
call sw_test_case_6(block_ptr % mesh, block_ptr % state % time_levs(1) % state)
do i=2,nTimeLevs
- call copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
+ call mpas_copy_state(block_ptr % state % time_levs(i) % state, block_ptr % state % time_levs(1) % state)
end do
block_ptr => block_ptr % next
Modified: branches/source_renaming/src/core_sw/mpas_sw_time_integration.F
===================================================================
--- branches/source_renaming/src/core_sw/mpas_sw_time_integration.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/core_sw/mpas_sw_time_integration.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,10 +1,10 @@
module time_integration
use vector_reconstruction
- use grid_types
- use configure
- use constants
- use dmpar
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
+ use mpas_dmpar
contains
@@ -70,7 +70,7 @@
real (kind=RKIND), dimension(4) :: rk_weights, rk_substep_weights
block => domain % blocklist
- call allocate_state(provis, &
+ call mpas_allocate_state(provis, &
block % mesh % nCells, block % mesh % nEdges, block % mesh % maxEdges, block % mesh % maxEdges2, &
block % mesh % nVertices, block % mesh % vertexDegree, block % mesh % nVertLevels, &
block % mesh % nTracers)
@@ -93,7 +93,7 @@
end do
end do
- call copy_state(provis, block % state % time_levs(1) % state)
+ call mpas_copy_state(provis, block % state % time_levs(1) % state)
block => block % next
end do
@@ -118,15 +118,15 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, provis % pv_edge % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, provis % pv_edge % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
if (config_h_mom_eddy_visc4 > 0.0) then
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % divergence % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % state % time_levs(2) % state % vorticity % array(:,:), &
block % mesh % nVertLevels, block % mesh % nVertices, &
block % parinfo % verticesToSend, block % parinfo % verticesToRecv)
end if
@@ -148,13 +148,13 @@
block => domain % blocklist
do while (associated(block))
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % u % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % u % array(:,:), &
block % mesh % nVertLevels, block % mesh % nEdges, &
block % parinfo % edgesToSend, block % parinfo % edgesToRecv)
- call dmpar_exch_halo_field2dReal(domain % dminfo, block % tend % h % array(:,:), &
+ call mpas_dmpar_exch_halo_field2d_real(domain % dminfo, block % tend % h % array(:,:), &
block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
- call dmpar_exch_halo_field3dReal(domain % dminfo, block % tend % tracers % array(:,:,:), &
+ call mpas_dmpar_exch_halo_field3d_real(domain % dminfo, block % tend % tracers % array(:,:,:), &
block % mesh % nTracers, block % mesh % nVertLevels, block % mesh % nCells, &
block % parinfo % cellsToSend, block % parinfo % cellsToRecv)
block => block % next
@@ -240,7 +240,7 @@
block => block % next
end do
- call deallocate_state(provis)
+ call mpas_deallocate_state(provis)
end subroutine sw_rk4
Modified: branches/source_renaming/src/driver/Makefile
===================================================================
--- branches/source_renaming/src/driver/Makefile        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/driver/Makefile        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,13 +1,13 @@
.SUFFIXES: .F .o
-OBJS = module_mpas_subdriver.o \
+OBJS = mpas_subdriver.o \
mpas.o
all: $(OBJS)
-module_mpas_subdriver.o:
+mpas_subdriver.o:
-mpas.o: module_mpas_subdriver.o
+mpas.o: mpas_subdriver.o
clean:
        $(RM) *.o *.mod *.f90
Deleted: branches/source_renaming/src/driver/module_mpas_subdriver.F
===================================================================
--- branches/source_renaming/src/driver/module_mpas_subdriver.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/driver/module_mpas_subdriver.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,93 +0,0 @@
-module mpas_subdriver
-
- use mpas_framework
- use mpas_core
-
- type (dm_info), pointer :: dminfo
- type (domain_type), pointer :: domain
- type (io_output_object) :: output_obj
- integer :: output_frame
-
-
- contains
-
-
- subroutine mpas_init()
-
- implicit none
-
- real (kind=RKIND) :: dt
- character(len=32) :: timeStamp
-
- call timer_start("total time")
- call timer_start("initialize")
-
-
- !
- ! Initialize infrastructure
- !
- call mpas_framework_init(dminfo, domain)
-
-
- call input_state_for_domain(domain)
-
-
- !
- ! Initialize core
- !
- call mpas_core_init(domain, timeStamp)
-
- call timer_stop("initialize")
-
-
- !
- ! Set up output streams to be written to by the MPAS core
- !
- output_frame = 1
-
- if(config_frames_per_outfile > 0) then
- call output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp))
- else
- call output_state_init(output_obj, domain, "OUTPUT")
- end if
-
-
- end subroutine mpas_init
-
-
- subroutine mpas_run()
-
- implicit none
-
- call mpas_core_run(domain, output_obj, output_frame)
-
- end subroutine mpas_run
-
-
- subroutine mpas_finalize()
-
- implicit none
-
- !
- ! Finalize output streams
- !
- call output_state_finalize(output_obj, domain % dminfo)
-
-
- !
- ! Finalize core
- !
- call mpas_core_finalize(domain)
-
- call timer_stop("total time")
- call timer_write()
-
-
- !
- ! Finalize infrastructure
- !
- call mpas_framework_finalize(dminfo, domain)
-
- end subroutine mpas_finalize
-
-end module mpas_subdriver
Copied: branches/source_renaming/src/driver/mpas_subdriver.F (from rev 1104, branches/source_renaming/src/driver/module_mpas_subdriver.F)
===================================================================
--- branches/source_renaming/src/driver/mpas_subdriver.F         (rev 0)
+++ branches/source_renaming/src/driver/mpas_subdriver.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,93 @@
+module mpas_subdriver
+
+ use mpas_framework
+ use mpas_core
+
+ type (dm_info), pointer :: dminfo
+ type (domain_type), pointer :: domain
+ type (io_output_object) :: output_obj
+ integer :: output_frame
+
+
+ contains
+
+
+ subroutine mpas_init()
+
+ implicit none
+
+ real (kind=RKIND) :: dt
+ character(len=32) :: timeStamp
+
+ call mpas_timer_start("total time")
+ call mpas_timer_start("initialize")
+
+
+ !
+ ! Initialize infrastructure
+ !
+ call mpas_framework_init(dminfo, domain)
+
+
+ call mpas_input_state_for_domain(domain)
+
+
+ !
+ ! Initialize core
+ !
+ call mpas_core_init(domain, timeStamp)
+
+ call mpas_timer_stop("initialize")
+
+
+ !
+ ! Set up output streams to be written to by the MPAS core
+ !
+ output_frame = 1
+
+ if(config_frames_per_outfile > 0) then
+ call mpas_output_state_init(output_obj, domain, "OUTPUT", trim(timeStamp))
+ else
+ call mpas_output_state_init(output_obj, domain, "OUTPUT")
+ end if
+
+
+ end subroutine mpas_init
+
+
+ subroutine mpas_run()
+
+ implicit none
+
+ call mpas_core_run(domain, output_obj, output_frame)
+
+ end subroutine mpas_run
+
+
+ subroutine mpas_finalize()
+
+ implicit none
+
+ !
+ ! Finalize output streams
+ !
+ call mpas_output_state_finalize(output_obj, domain % dminfo)
+
+
+ !
+ ! Finalize core
+ !
+ call mpas_core_finalize(domain)
+
+ call mpas_timer_stop("total time")
+ call mpas_timer_write()
+
+
+ !
+ ! Finalize infrastructure
+ !
+ call mpas_framework_finalize(dminfo, domain)
+
+ end subroutine mpas_finalize
+
+end module mpas_subdriver
Modified: branches/source_renaming/src/framework/Makefile
===================================================================
--- branches/source_renaming/src/framework/Makefile        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/Makefile        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,21 +1,21 @@
.SUFFIXES: .F .o
ifdef ZOLTAN_HOME
- ZOLTANOBJ = module_zoltan_interface.o
+ ZOLTANOBJ = mpas_zoltan_interface.o
endif
-OBJS = module_mpas_framework.o \
- module_timer.o \
- module_mpas_timekeeping.o \
- module_configure.o \
- module_constants.o \
- module_grid_types.o \
- module_hash.o \
- module_sort.o \
- module_block_decomp.o \
- module_dmpar.o \
- module_io_input.o \
- module_io_output.o \
+OBJS = mpas_framework.o \
+ mpas_timer.o \
+ mpas_timekeeping.o \
+ mpas_configure.o \
+ mpas_constants.o \
+ mpas_grid_types.o \
+ mpas_hash.o \
+ mpas_sort.o \
+ mpas_block_decomp.o \
+ mpas_dmpar.o \
+ mpas_io_input.o \
+ mpas_io_output.o \
$(ZOLTANOBJ) \
streams.o
@@ -24,19 +24,19 @@
framework: $(OBJS)
        ar -ru libframework.a $(OBJS)
-module_mpas_framework.o: module_dmpar.o module_io_input.o module_io_output.o module_grid_types.o module_configure.o module_timer.o
+mpas_framework.o: mpas_dmpar.o mpas_io_input.o mpas_io_output.o mpas_grid_types.o mpas_configure.o mpas_timer.o
-module_configure.o: module_dmpar.o
+mpas_configure.o: mpas_dmpar.o
-module_grid_types.o: module_dmpar.o
+mpas_grid_types.o: mpas_dmpar.o
-module_dmpar.o: module_sort.o streams.o
+mpas_dmpar.o: mpas_sort.o streams.o
-module_block_decomp.o: module_grid_types.o module_hash.o module_configure.o
+mpas_block_decomp.o: mpas_grid_types.o mpas_hash.o mpas_configure.o
-module_io_input.o: module_grid_types.o module_dmpar.o module_block_decomp.o module_sort.o module_configure.o module_mpas_timekeeping.o $(ZOLTANOBJ)
+mpas_io_input.o: mpas_grid_types.o mpas_dmpar.o mpas_block_decomp.o mpas_sort.o mpas_configure.o mpas_timekeeping.o $(ZOLTANOBJ)
-module_io_output.o: module_grid_types.o module_dmpar.o module_sort.o module_configure.o
+mpas_io_output.o: mpas_grid_types.o mpas_dmpar.o mpas_sort.o mpas_configure.o
clean:
        $(RM) *.o *.mod *.f90 libframework.a
Deleted: branches/source_renaming/src/framework/module_block_decomp.F
===================================================================
--- branches/source_renaming/src/framework/module_block_decomp.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_block_decomp.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,306 +0,0 @@
-module block_decomp
-
- use dmpar
- use hash
-
- type graph
- integer :: nVerticesTotal
- integer :: nVertices, maxDegree
- integer :: ghostStart
- integer, dimension(:), pointer :: vertexID
- integer, dimension(:), pointer :: nAdjacent
- integer, dimension(:,:), pointer :: adjacencyList
- end type graph
-
-
- contains
-
-
- subroutine block_decomp_cells_for_proc(dminfo, partial_global_graph_info, local_cell_list)
-
- use configure
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- type (graph), intent(in) :: partial_global_graph_info
- integer, dimension(:), pointer :: local_cell_list
-
- integer, dimension(:), pointer :: global_cell_list
- integer, dimension(:), pointer :: global_start
-
- integer :: i, j, owner, iunit, istatus
- integer, dimension(:), pointer :: local_nvertices
- character (len=256) :: filename
-
- if (dminfo % nprocs > 1) then
-
- allocate(local_nvertices(dminfo % nprocs))
- allocate(global_start(dminfo % nprocs))
- allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
-
- if (dminfo % my_proc_id == IO_NODE) then
-
- iunit = 50 + dminfo % my_proc_id
- if (dminfo % nprocs < 10) then
- write(filename,'(a,i1)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 100) then
- write(filename,'(a,i2)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 1000) then
- write(filename,'(a,i3)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 10000) then
- write(filename,'(a,i4)') trim(config_decomp_file_prefix), dminfo % nprocs
- else if (dminfo % nprocs < 100000) then
- write(filename,'(a,i5)') trim(config_decomp_file_prefix), dminfo % nprocs
- end if
-
- open(unit=iunit, file=trim(filename), form='formatted', status='old', iostat=istatus)
-
- if (istatus /= 0) then
- write(0,*) 'Could not open block decomposition file for ',dminfo % nprocs,' tasks.'
- write(0,*) 'Filename: ',trim(filename)
- call dmpar_abort(dminfo)
- end if
-
- local_nvertices(:) = 0
- do i=1,partial_global_graph_info % nVerticesTotal
- read(unit=iunit, fmt=*) owner
- local_nvertices(owner+1) = local_nvertices(owner+1) + 1
- end do
-
-! allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
-
- global_start(1) = 1
- do i=2,dminfo % nprocs
- global_start(i) = global_start(i-1) + local_nvertices(i-1)
- end do
-
- rewind(unit=iunit)
-
- do i=1,partial_global_graph_info % nVerticesTotal
- read(unit=iunit, fmt=*) owner
- global_cell_list(global_start(owner+1)) = i
- global_start(owner+1) = global_start(owner+1) + 1
- end do
-
- global_start(1) = 0
- do i=2,dminfo % nprocs
- global_start(i) = global_start(i-1) + local_nvertices(i-1)
- end do
-
- close(unit=iunit)
-
- call dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
- allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
-
- call dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
- global_start, local_nvertices, global_cell_list, local_cell_list)
-
- else
-
- call dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
- allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
-
- call dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
- global_start, local_nvertices, global_cell_list, local_cell_list)
-
- end if
-
- deallocate(local_nvertices)
- deallocate(global_start)
- deallocate(global_cell_list)
- else
- allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
- do i=1,size(local_cell_list)
- local_cell_list(i) = i
- end do
- endif
-
- end subroutine block_decomp_cells_for_proc
-
-
- subroutine block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)
-
- implicit none
-
- integer, intent(in) :: nCells, maxCells, nEdges
- integer, dimension(nCells), intent(in) :: cellIDList
- integer, dimension(maxCells, nEdges), intent(in) :: cellsOnEdge
- integer, dimension(nEdges), intent(inout) :: edgeIDList
- integer, intent(inout) :: ghostEdgeStart
-
- integer :: i, j, lastEdge
- integer, dimension(nEdges) :: edgeIDListLocal
- type (hashtable) :: h
-
- call hash_init(h)
-
- do i=1,nCells
- ! OPTIMIZATION: Actually, if we can assume that all cellIDs are unique, the if-test is unnecessary
- if (.not. hash_search(h, cellIDList(i))) call hash_insert(h, cellIDList(i))
- end do
-
- lastEdge = 0
- ghostEdgeStart = nEdges+1
-
- edgeIDListLocal(:) = edgeIDList(:)
-
- do i=1,nEdges
- do j=1,maxCells
- if (cellsOnEdge(j,i) /= 0) exit
- end do
- if (j > maxCells) &
- write(0,*) 'Error in block_decomp_partitioned_edge_list: ',&
- 'edge/vertex is not adjacent to any valid cells'
- if (hash_search(h, cellsOnEdge(j,i))) then
- lastEdge = lastEdge + 1
- edgeIDList(lastEdge) = edgeIDListLocal(i)
- else
- ghostEdgeStart = ghostEdgeStart - 1
- edgeIDList(ghostEdgeStart) = edgeIDListLocal(i)
- end if
- if (ghostEdgeStart <= lastEdge) then
- write(0,*) 'block_decomp_partitioned_edge_list: ',&
- 'Somehow we have more edges than we thought we should.'
- end if
- end do
-
- if (ghostEdgeStart /= lastEdge + 1) then
- write(0,*) 'block_decomp_partitioned_edge_list:',&
- ' Somehow we didn''t have enough edges to fill edgeIDList.'
- end if
-
- call hash_destroy(h)
-
- end subroutine block_decomp_partitioned_edge_list
-
-
- subroutine block_decomp_all_edges_in_block(maxEdges, nCells, nEdgesOnCell, edgesOnCell, nEdges, edgeList)
-
- implicit none
-
- integer, intent(in) :: maxEdges, nCells
- integer, dimension(nCells), intent(in) :: nEdgesOnCell
- integer, dimension(maxEdges, nCells), intent(in) :: edgesOnCell
- integer, intent(out) :: nEdges
- integer, dimension(:), pointer :: edgeList
-
- integer :: i, j, k
- type (hashtable) :: h
-
- call hash_init(h)
-
- do i=1,nCells
- do j=1,nEdgesOnCell(i)
- if (.not. hash_search(h, edgesOnCell(j,i))) call hash_insert(h, edgesOnCell(j,i))
- end do
- end do
-
- nEdges = hash_size(h)
- allocate(edgeList(nEdges))
-
- call hash_destroy(h)
-
- call hash_init(h)
-
- k = 0
- do i=1,nCells
- do j=1,nEdgesOnCell(i)
- if (.not. hash_search(h, edgesOnCell(j,i))) then
- k = k + 1
- if (k > nEdges) then
- write(0,*) 'block_decomp_all_edges_in_block: ',&
- 'Trying to add more edges than expected.'
- return
- end if
- edgeList(k) = edgesOnCell(j,i)
- call hash_insert(h, edgesOnCell(j,i))
- end if
- end do
- end do
-
- call hash_destroy(h)
-
- if (k < nEdges) then
- write(0,*) 'block_decomp_all_edges_in_block: ',&
- 'Listed fewer edges than expected.'
- end if
-
- end subroutine block_decomp_all_edges_in_block
-
-
- subroutine block_decomp_add_halo(dminfo, local_graph_info, local_graph_with_halo)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- type (graph), intent(in) :: local_graph_info
- type (graph), intent(out) :: local_graph_with_halo
-
- integer :: i, j, k
- type (hashtable) :: h
-
-
- call hash_init(h)
-
- do i=1,local_graph_info % nVertices
- call hash_insert(h, local_graph_info % vertexID(i))
- end do
-
- do i=1,local_graph_info % nVertices
- do j=1,local_graph_info % nAdjacent(i)
- if (local_graph_info % adjacencyList(j,i) /= 0) then
- if (.not. hash_search(h, local_graph_info % adjacencyList(j,i))) then
- call hash_insert(h, local_graph_info % adjacencyList(j,i))
- end if
- end if
- end do
- end do
-
-
- local_graph_with_halo % nVertices = local_graph_info % nVertices
- local_graph_with_halo % maxDegree = local_graph_info % maxDegree
- local_graph_with_halo % nVerticesTotal = hash_size(h)
- local_graph_with_halo % ghostStart = local_graph_with_halo % nVertices + 1
- allocate(local_graph_with_halo % vertexID(local_graph_with_halo % nVerticesTotal))
- allocate(local_graph_with_halo % nAdjacent(local_graph_with_halo % nVerticesTotal))
- allocate(local_graph_with_halo % adjacencyList(local_graph_with_halo % maxDegree, local_graph_with_halo % nVerticesTotal))
-
- call hash_destroy(h)
-
- call hash_init(h)
-
- do i=1,local_graph_info % nVertices
- if (hash_search(h, local_graph_info % vertexID(i))) &
- write(0,*) 'block_decomp_add_halo: ', &
- 'There appear to be duplicates in vertexID list.'
- call hash_insert(h, local_graph_info % vertexID(i))
- local_graph_with_halo % vertexID(i) = local_graph_info % vertexID(i)
- local_graph_with_halo % nAdjacent(i) = local_graph_info % nAdjacent(i)
- local_graph_with_halo % adjacencyList(:,i) = local_graph_info % adjacencyList(:,i)
- end do
-
- k = local_graph_with_halo % ghostStart
- if (hash_size(h) /= k-1) &
- write(0,*) 'block_decomp_add_halo: ',&
- 'Somehow we don''t have the right number of non-ghost cells.'
- do i=1,local_graph_info % nVertices
- do j=1,local_graph_info % nAdjacent(i)
- if (local_graph_info % adjacencyList(j,i) /= 0) then
- if (.not. hash_search(h, local_graph_info % adjacencyList(j,i))) then
- call hash_insert(h, local_graph_info % adjacencyList(j,i))
- local_graph_with_halo % vertexID(k) = local_graph_info % adjacencyList(j,i)
- k = k + 1
- end if
- end if
- end do
- end do
- if (local_graph_with_halo % nVerticesTotal /= k-1) &
- write(0,*) 'block_decomp_add_halo: ',&
- 'Somehow we don''t have the right number of total cells.'
-
- call hash_destroy(h)
-
- end subroutine block_decomp_add_halo
-
-end module block_decomp
Deleted: branches/source_renaming/src/framework/module_configure.F
===================================================================
--- branches/source_renaming/src/framework/module_configure.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_configure.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,36 +0,0 @@
-module configure
-
- use dmpar
-
-#include "config_defs.inc"
-
- contains
-
-
- subroutine read_namelist(dminfo)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
-
- integer :: funit
-
-#include "config_namelist_defs.inc"
-
- funit = 21
-
- ! Set default values for namelist options
-#include "config_set_defaults.inc"
-
- if (dminfo % my_proc_id == IO_NODE) then
- open(funit,file='namelist.input',status='old',form='formatted')
-
-#include "config_namelist_reads.inc"
- close(funit)
- end if
-
-#include "config_bcast_namelist.inc"
-
- end subroutine read_namelist
-
-end module configure
Deleted: branches/source_renaming/src/framework/module_constants.F
===================================================================
--- branches/source_renaming/src/framework/module_constants.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_constants.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,20 +0,0 @@
-module constants
-
- real (kind=RKIND), parameter :: pii = 3.141592653589793
- real (kind=RKIND), parameter :: a = 6371229.0
- real (kind=RKIND), parameter :: omega = 7.29212e-5
- real (kind=RKIND), parameter :: gravity = 9.80616
- real (kind=RKIND), parameter :: rgas = 287.
- real (kind=RKIND), parameter :: cp = 1003.
- real (kind=RKIND), parameter :: cv = 716. ! cp - rgas
- real (kind=RKIND), parameter :: cvpm = -.71385842 ! -cv/cp
- real (kind=RKIND), parameter :: prandtl = 1.0
-
-
- contains
-
- subroutine dummy()
-
- end subroutine dummy
-
-end module constants
Deleted: branches/source_renaming/src/framework/module_dmpar.F
===================================================================
--- branches/source_renaming/src/framework/module_dmpar.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_dmpar.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,1928 +0,0 @@
-module dmpar
-
- use sort
-
-#ifdef _MPI
-include 'mpif.h'
- integer, parameter :: MPI_INTEGERKIND = MPI_INTEGER
-
-#if (RKIND == 8)
- integer, parameter :: MPI_REALKIND = MPI_DOUBLE_PRECISION
-#else
- integer, parameter :: MPI_REALKIND = MPI_REAL
-#endif
-#endif
-
- integer, parameter :: IO_NODE = 0
- integer, parameter :: BUFSIZE = 6000
-
-
- type dm_info
- integer :: nprocs, my_proc_id, comm, info
- end type dm_info
-
-
- type exchange_list
- integer :: procID
- integer :: nlist
- integer, dimension(:), pointer :: list
- type (exchange_list), pointer :: next
- real (kind=RKIND), dimension(:), pointer :: rbuffer
- integer, dimension(:), pointer :: ibuffer
- integer :: reqID
- end type exchange_list
-
-
- interface dmpar_alltoall_field
- module procedure dmpar_alltoall_field1dInteger
- module procedure dmpar_alltoall_field2dInteger
- module procedure dmpar_alltoall_field1dReal
- module procedure dmpar_alltoall_field2dReal
- module procedure dmpar_alltoall_field3dReal
- end interface
-
-
- contains
-
-
- subroutine dmpar_init(dminfo)
-
- implicit none
-
- type (dm_info), intent(inout) :: dminfo
-
-#ifdef _MPI
- integer :: mpi_rank, mpi_size
- integer :: mpi_ierr
-
- ! Find out our rank and the total number of processors
- call MPI_Init(mpi_ierr)
- call MPI_Comm_rank(MPI_COMM_WORLD, mpi_rank, mpi_ierr)
- call MPI_Comm_size(MPI_COMM_WORLD, mpi_size, mpi_ierr)
-
- dminfo % comm = MPI_COMM_WORLD
-
- dminfo % nprocs = mpi_size
- dminfo % my_proc_id = mpi_rank
-
- write(0,'(a,i5,a,i5,a)') 'task ', mpi_rank, ' of ', mpi_size, &
- ' is running'
-
- call open_streams(dminfo % my_proc_id)
-
- dminfo % info = MPI_INFO_NULL
-#else
- dminfo % comm = 0
- dminfo % my_proc_id = IO_NODE
- dminfo % nprocs = 1
-#endif
-
- end subroutine dmpar_init
-
-
- subroutine dmpar_finalize(dminfo)
-
- implicit none
-
- type (dm_info), intent(inout) :: dminfo
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Finalize(mpi_ierr)
-#endif
-
- end subroutine dmpar_finalize
-
-
- subroutine dmpar_abort(dminfo)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
-
-#ifdef _MPI
- integer :: mpi_ierr, mpi_errcode
-
- call MPI_Abort(dminfo % comm, mpi_errcode, mpi_ierr)
-#endif
-
- stop
-
- end subroutine dmpar_abort
-
-
- subroutine dmpar_global_abort(mesg)
-
- implicit none
-
- character (len=*), intent(in) :: mesg
-
-#ifdef _MPI
- integer :: mpi_ierr, mpi_errcode
-
- write(0,*) trim(mesg)
- call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr)
-#endif
-
- write(0,*) trim(mesg)
- stop
-
- end subroutine dmpar_global_abort
-
-
- subroutine dmpar_bcast_int(dminfo, i)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(inout) :: i
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Bcast(i, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_bcast_int
-
-
- subroutine dmpar_bcast_ints(dminfo, n, iarray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: n
- integer, dimension(n), intent(inout) :: iarray
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Bcast(iarray, n, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_bcast_ints
-
-
- subroutine dmpar_bcast_real(dminfo, r)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- real (kind=RKIND), intent(inout) :: r
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Bcast(r, 1, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_bcast_real
-
-
- subroutine dmpar_bcast_reals(dminfo, n, rarray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: n
- real (kind=RKIND), dimension(n), intent(inout) :: rarray
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Bcast(rarray, n, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_bcast_reals
-
-
- subroutine dmpar_bcast_logical(dminfo, l)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- logical, intent(inout) :: l
-
-#ifdef _MPI
- integer :: mpi_ierr
- integer :: itemp
-
- if (dminfo % my_proc_id == IO_NODE) then
- if (l) then
- itemp = 1
- else
- itemp = 0
- end if
- end if
-
- call MPI_Bcast(itemp, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
-
- if (itemp == 1) then
- l = .true.
- else
- l = .false.
- end if
-#endif
-
- end subroutine dmpar_bcast_logical
-
-
- subroutine dmpar_bcast_char(dminfo, c)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- character (len=*), intent(inout) :: c
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Bcast(c, len(c), MPI_CHARACTER, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_bcast_char
-
-
- subroutine dmpar_sum_int(dminfo, i, isum)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: i
- integer, intent(out) :: isum
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(i, isum, 1, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr)
-#else
- isum = i
-#endif
-
- end subroutine dmpar_sum_int
-
-
- subroutine dmpar_sum_real(dminfo, r, rsum)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- real(kind=RKIND), intent(in) :: r
- real(kind=RKIND), intent(out) :: rsum
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(r, rsum, 1, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr)
-#else
- rsum = r
-#endif
-
- end subroutine dmpar_sum_real
-
-
- subroutine dmpar_min_int(dminfo, i, imin)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: i
- integer, intent(out) :: imin
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(i, imin, 1, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr)
-#else
- imin = i
-#endif
-
- end subroutine dmpar_min_int
-
-
- subroutine dmpar_min_real(dminfo, r, rmin)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- real(kind=RKIND), intent(in) :: r
- real(kind=RKIND), intent(out) :: rmin
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(r, rmin, 1, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr)
-#else
- rmin = r
-#endif
-
- end subroutine dmpar_min_real
-
-
- subroutine dmpar_max_int(dminfo, i, imax)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: i
- integer, intent(out) :: imax
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(i, imax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- imax = i
-#endif
-
- end subroutine dmpar_max_int
-
-
- subroutine dmpar_max_real(dminfo, r, rmax)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- real(kind=RKIND), intent(in) :: r
- real(kind=RKIND), intent(out) :: rmax
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(r, rmax, 1, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- rmax = r
-#endif
-
- end subroutine dmpar_max_real
-
-
- subroutine dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- integer, dimension(nElements), intent(in) :: inArray
- integer, dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_sum_int_array
-
-
- subroutine dmpar_min_int_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- integer, dimension(nElements), intent(in) :: inArray
- integer, dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_min_int_array
-
-
- subroutine dmpar_max_int_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- integer, dimension(nElements), intent(in) :: inArray
- integer, dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_max_int_array
-
-
- subroutine dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- real(kind=RKIND), dimension(nElements), intent(in) :: inArray
- real(kind=RKIND), dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_sum_real_array
-
-
- subroutine dmpar_min_real_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- real(kind=RKIND), dimension(nElements), intent(in) :: inArray
- real(kind=RKIND), dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_min_real_array
-
-
- subroutine dmpar_max_real_array(dminfo, nElements, inArray, outArray)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nElements
- real(kind=RKIND), dimension(nElements), intent(in) :: inArray
- real(kind=RKIND), dimension(nElements), intent(out) :: outArray
-
- integer :: mpi_ierr
-
-#ifdef _MPI
- call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-#else
- outArray = inArray
-#endif
-
- end subroutine dmpar_max_real_array
-
-
- subroutine dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nprocs, noutlist
- integer, dimension(nprocs), intent(in) :: displs, counts
- integer, dimension(:), pointer :: inlist
- integer, dimension(noutlist), intent(inout) :: outlist
-
-#ifdef _MPI
- integer :: mpi_ierr
-
- call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
-#endif
-
- end subroutine dmpar_scatter_ints
-
-
- subroutine dmpar_get_index_range(dminfo, &
- global_start, global_end, &
- local_start, local_end)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: global_start, global_end
- integer, intent(out) :: local_start, local_end
-
- local_start = nint(real(dminfo % my_proc_id) * real(global_end - global_start + 1) / real(dminfo % nprocs)) + 1
- local_end = nint(real(dminfo % my_proc_id + 1) * real(global_end - global_start + 1) / real(dminfo % nprocs))
-
- end subroutine dmpar_get_index_range
-
-
- subroutine dmpar_compute_index_range(dminfo, &
- local_start, local_end, &
- global_start, global_end)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: local_start, local_end
- integer, intent(inout) :: global_start, global_end
-
- integer :: n
- integer :: mpi_ierr
-
- n = local_end - local_start + 1
-
- if (dminfo % my_proc_id == 0) then
- global_start = 1
- global_end = global_start + n - 1
-
-#ifdef _MPI
- else if (dminfo % my_proc_id == dminfo % nprocs - 1) then
- call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr)
- global_end = global_start + n - 1
-
- else
- call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr)
- global_end = global_start + n
- call MPI_Send(global_end, 1, MPI_INTEGERKIND, dminfo % my_proc_id + 1, 0, dminfo % comm, mpi_ierr)
- global_end = global_end - 1
-#endif
-
- end if
-
-
- end subroutine dmpar_compute_index_range
-
-
- subroutine dmpar_get_owner_list(dminfo, &
- nOwnedList, nNeededList, &
- ownedList, neededList, &
- sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: nOwnedList, nNeededList
- integer, dimension(nOwnedList), intent(in) :: ownedList
- integer, dimension(nNeededList), intent(in) :: neededList
- type (exchange_list), pointer :: sendList
- type (exchange_list), pointer :: recvList
-
- integer :: i, j, k, kk
- integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc
- integer :: numToSend, numToRecv
- integer, dimension(nOwnedList) :: recipientList
- integer, dimension(2,nOwnedList) :: ownedListSorted
- integer, allocatable, dimension(:) :: ownerListIn, ownerListOut
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: mpi_ierr, mpi_rreq, mpi_sreq
-
-#ifdef _MPI
- allocate(sendList)
- allocate(recvList)
- nullify(sendList % next)
- nullify(recvList % next)
- sendListPtr => sendList
- recvListPtr => recvList
-
- do i=1,nOwnedList
- ownedListSorted(1,i) = ownedList(i)
- ownedListSorted(2,i) = i
- end do
- call quicksort(nOwnedList, ownedListSorted)
-
- call MPI_Allreduce(nNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
-
- allocate(ownerListIn(totalSize))
- allocate(ownerListOut(totalSize))
-
- nMesgRecv = nNeededList
- ownerListIn(1:nNeededList) = neededList(1:nNeededList)
-
- recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
- sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
-
- do i=1, dminfo % nprocs
-
- recipientList(:) = -1
- numToSend = 0
-
- currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
- do j=1,nMesgRecv
- if (ownerListIn(j) > 0) then
- k = binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
- if (k <= nOwnedList) then
- ownerListOut(j) = -1 * dminfo % my_proc_id
- numToSend = numToSend + 1
- recipientList(ownedListSorted(2,k)) = numToSend
- else
- ownerListOut(j) = ownerListIn(j)
- end if
- else
- ownerListOut(j) = ownerListIn(j)
- end if
- end do
-
- if (numToSend > 0) then
- allocate(sendListPtr % next)
- sendListPtr => sendListPtr % next
- sendListPtr % procID = currentProc
- sendListPtr % nlist = numToSend
- allocate(sendListPtr % list(numToSend))
- nullify(sendListPtr % next)
- kk = 1
- do j=1,nOwnedList
- if (recipientList(j) /= -1) then
- sendListPtr % list(recipientList(j)) = j
- kk = kk + 1
- end if
- end do
- end if
-
- nMesgSend = nMesgRecv
- call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
- call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
- call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
- call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
- end do
-
- do i=0, dminfo % nprocs - 1
-
- numToRecv = 0
- do j=1,nNeededList
- if (ownerListIn(j) == -i) numToRecv = numToRecv + 1
- end do
- if (numToRecv > 0) then
- allocate(recvListPtr % next)
- recvListPtr => recvListPtr % next
- recvListPtr % procID = i
- recvListPtr % nlist = numToRecv
- allocate(recvListPtr % list(numToRecv))
- nullify(recvListPtr % next)
- kk = 1
- do j=1,nNeededList
- if (ownerListIn(j) == -i) then
- recvListPtr % list(kk) = j
- kk = kk + 1
- end if
- end do
- end if
-
- end do
-
- deallocate(ownerListIn)
- deallocate(ownerListOut)
-
- sendListPtr => sendList
- sendList => sendList % next
- deallocate(sendListPtr)
-
- recvListPtr => recvList
- recvList => recvList % next
- deallocate(recvListPtr)
-
-#else
- allocate(recvList)
- recvList % procID = dminfo % my_proc_id
- recvList % nlist = nNeededList
- allocate(recvList % list(nNeededList))
- nullify(recvList % next)
- do j=1,nNeededList
- recvList % list(j) = j
- end do
-
- allocate(sendList)
- sendList % procID = dminfo % my_proc_id
- sendList % nlist = nOwnedList
- allocate(sendList % list(nOwnedList))
- nullify(sendList % next)
- do j=1,nOwnedList
- sendList % list(j) = j
- end do
-#endif
-
- end subroutine dmpar_get_owner_list
-
-
- subroutine dmpar_alltoall_field1dInteger(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, dimension(*), intent(in) :: arrayIn
- integer, dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
-
-#ifdef _MPI
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
-
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
- end do
- end if
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call packSendBuf1dInteger(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call unpackRecvBuf1dInteger(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
-#endif
-
- end subroutine dmpar_alltoall_field1dInteger
-
-
- subroutine dmpar_alltoall_field2dInteger(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- integer, dimension(dim1,*), intent(in) :: arrayIn
- integer, dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
-
-#ifdef _MPI
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
-
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
- end do
- end if
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call packSendBuf2dInteger(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call unpackRecvBuf2dInteger(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
-#endif
-
- end subroutine dmpar_alltoall_field2dInteger
-
-
- subroutine dmpar_alltoall_field1dReal(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- real (kind=RKIND), dimension(*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(*), intent(inout) :: arrayOut
- integer, intent(in) :: nOwnedList, nNeededList
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i
-
-#ifdef _MPI
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
-
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
- end do
- end if
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call packSendBuf1dReal(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call unpackRecvBuf1dReal(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call dmpar_abort(dminfo)
- else
- arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
- end if
-#endif
-
- end subroutine dmpar_alltoall_field1dReal
-
-
- subroutine dmpar_alltoall_field2dReal(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d2
-
-#ifdef _MPI
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
-
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
- end do
- end if
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call packSendBuf2dReal(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call unpackRecvBuf2dReal(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call dmpar_abort(dminfo)
- else
- arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
- end if
-#endif
-
- end subroutine dmpar_alltoall_field2dReal
-
-
- subroutine dmpar_alltoall_field3dReal(dminfo, arrayIn, arrayOut, dim1, dim2, nOwnedList, nNeededList, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, nOwnedList, nNeededList
- real (kind=RKIND), dimension(dim1,dim2,*), intent(in) :: arrayIn
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: arrayOut
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: i, d3
-
-#ifdef _MPI
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID == dminfo % my_proc_id) exit
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID == dminfo % my_proc_id) exit
- recvListPtr => recvListPtr % next
- end do
-
- if (associated(recvListPtr) .and. associated(sendListPtr)) then
- do i=1,recvListPtr % nlist
- arrayOut(:,:,recvListPtr % list(i)) = arrayIn(:,:,sendListPtr % list(i))
- end do
- end if
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call packSendBuf3dReal(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call unpackRecvBuf3dReal(1, dim1, 1, dim2, nNeededList, arrayOut, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#else
- if (nOwnedList /= nNeededList) then
- write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
- 'arrayIn and arrayOut dims must match.'
- call dmpar_abort(dminfo)
- else
- arrayOut(:,:,1:nNeededList) = arrayIn(:,:,1:nOwnedList)
- end if
-#endif
-
- end subroutine dmpar_alltoall_field3dReal
-
-
- subroutine packSendBuf1dInteger(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: nField, nBuffer, startPackIdx
- integer, dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf1dInteger
-
-
- subroutine packSendBuf2dInteger(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- integer, dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i, n
-
- n = de-ds+1
-
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf2dInteger
-
-
- subroutine packSendBuf3dInteger(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- integer, dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i, j, k, n
-
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dInteger: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
- end do
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf3dInteger
-
-
- subroutine packSendBuf1dReal(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + 1
- if (nPacked > nBuffer) then
- nPacked = nPacked - 1
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked) = field(sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf1dReal
-
-
- subroutine packSendBuf2dReal(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i, n
-
- n = de-ds+1
-
- if (n > nBuffer) then
- write(0,*) 'packSendBuf2dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf2dReal
-
-
- subroutine packSendBuf3dReal(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
-
- implicit none
-
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
- type (exchange_list), intent(in) :: sendList
- real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
- integer, intent(inout) :: nPacked, lastPackedIdx
-
- integer :: i, j, k, n
-
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- if (n > nBuffer) then
- write(0,*) 'packSendBuf3dReal: Not enough space in buffer', &
- ' to fit a single slice.'
- return
- end if
-
- nPacked = 0
- do i=startPackIdx, sendList % nlist
- nPacked = nPacked + n
- if (nPacked > nBuffer) then
- nPacked = nPacked - n
- lastPackedIdx = i - 1
- return
- end if
- k = nPacked-n+1
- do j=d2s,d2e
- buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
- k = k + d1e-d1s+1
- end do
- end do
- lastPackedIdx = sendList % nlist
-
- end subroutine packSendBuf3dReal
-
-
- subroutine unpackRecvBuf1dInteger(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- integer, dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf1dInteger
-
-
- subroutine unpackRecvBuf2dInteger(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- integer, dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i, n
-
- n = de-ds+1
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf2dInteger
-
-
- subroutine unpackRecvBuf3dInteger(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- integer, dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- integer, dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i, j, k, n
-
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf3dInteger
-
-
- subroutine dmpar_exch_halo_field1dInteger(dminfo, array, dim1, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- integer, dimension(*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % ibuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % ibuffer(sendListPtr % nlist))
- call packSendBuf1dInteger(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call unpackRecvBuf1dInteger(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field1dInteger
-
-
- subroutine dmpar_exch_halo_field2dInteger(dminfo, array, dim1, dim2, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- integer, dimension(dim1,*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d2
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d2))
- call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d2))
- call packSendBuf2dInteger(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call unpackRecvBuf2dInteger(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field2dInteger
-
-
- subroutine dmpar_exch_halo_field3dInteger(dminfo, array, dim1, dim2, dim3, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- integer, dimension(dim1,dim2,*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d3
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % ibuffer(d3))
- call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % ibuffer(d3))
- call packSendBuf3dInteger(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
- sendListPtr % ibuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call unpackRecvBuf3dInteger(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
- recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % ibuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % ibuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field3dInteger
-
-
- subroutine unpackRecvBuf1dReal(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + 1
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - 1
- lastUnpackedIdx = i - 1
- return
- end if
- field(recvList % list(i)) = buffer(nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf1dReal
-
-
- subroutine unpackRecvBuf2dReal(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(ds:de,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i, n
-
- n = de-ds+1
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf2dReal
-
-
- subroutine unpackRecvBuf3dReal(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
- nUnpacked, lastUnpackedIdx)
-
- implicit none
-
- integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
- real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
- type (exchange_list), intent(in) :: recvList
- real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
- integer, intent(inout) :: nUnpacked, lastUnpackedIdx
-
- integer :: i, j, k, n
-
- n = (d1e-d1s+1) * (d2e-d2s+1)
-
- nUnpacked = 0
- do i=startUnpackIdx, recvList % nlist
- nUnpacked = nUnpacked + n
- if (nUnpacked > nBuffer) then
- nUnpacked = nUnpacked - n
- lastUnpackedIdx = i - 1
- return
- end if
- k = nUnpacked-n+1
- do j=d2s,d2e
- field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
- k = k + d1e-d1s+1
- end do
- end do
- lastUnpackedIdx = recvList % nlist
-
- end subroutine unpackRecvBuf3dReal
-
-
- subroutine dmpar_exch_halo_field1dReal(dminfo, array, dim1, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1
- real (kind=RKIND), dimension(*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- allocate(recvListPtr % rbuffer(recvListPtr % nlist))
- call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- allocate(sendListPtr % rbuffer(sendListPtr % nlist))
- call packSendBuf1dReal(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- call unpackRecvBuf1dReal(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field1dReal
-
-
- subroutine dmpar_exch_halo_field2dReal(dminfo, array, dim1, dim2, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2
- real (kind=RKIND), dimension(dim1,*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d2
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d2))
- call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d2 = dim1 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d2))
- call packSendBuf2dReal(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d2 = dim1 * recvListPtr % nlist
- call unpackRecvBuf2dReal(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field2dReal
-
-
- subroutine dmpar_exch_halo_field3dReal(dminfo, array, dim1, dim2, dim3, sendList, recvList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- integer, intent(in) :: dim1, dim2, dim3
- real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: array
- type (exchange_list), pointer :: sendList, recvList
-
- type (exchange_list), pointer :: sendListPtr, recvListPtr
- integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
- integer :: mpi_ierr
- integer :: d3
-
-#ifdef _MPI
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * recvListPtr % nlist
- allocate(recvListPtr % rbuffer(d3))
- call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
- recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- d3 = dim1 * dim2 * sendListPtr % nlist
- allocate(sendListPtr % rbuffer(d3))
- call packSendBuf3dReal(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
- sendListPtr % rbuffer, nPacked, lastPackedIdx)
- call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
- sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
- end if
- sendListPtr => sendListPtr % next
- end do
-
- recvListPtr => recvList
- do while (associated(recvListPtr))
- if (recvListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- d3 = dim1 * dim2 * recvListPtr % nlist
- call unpackRecvBuf3dReal(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
- recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
- deallocate(recvListPtr % rbuffer)
- end if
- recvListPtr => recvListPtr % next
- end do
-
- sendListPtr => sendList
- do while (associated(sendListPtr))
- if (sendListPtr % procID /= dminfo % my_proc_id) then
- call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
- deallocate(sendListPtr % rbuffer)
- end if
- sendListPtr => sendListPtr % next
- end do
-
-#endif
-
- end subroutine dmpar_exch_halo_field3dReal
-
-
-end module dmpar
Deleted: branches/source_renaming/src/framework/module_grid_types.F
===================================================================
--- branches/source_renaming/src/framework/module_grid_types.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_grid_types.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,219 +0,0 @@
-module grid_types
-
- use dmpar
-
- integer, parameter :: nTimeLevs = 2
-
-
- ! Derived type describing info for doing I/O specific to a field
- type io_info
- character (len=1024) :: fieldName
- integer, dimension(4) :: start
- integer, dimension(4) :: count
- logical :: input
- logical :: sfc
- logical :: restart
- logical :: output
- end type io_info
-
-
- ! Derived type for storing fields
- type field3DReal
- type (block_type), pointer :: block
- real (kind=RKIND), dimension(:,:,:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field3DReal
-
-
- ! Derived type for storing fields
- type field2DReal
- type (block_type), pointer :: block
- real (kind=RKIND), dimension(:,:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field2DReal
-
-
- ! Derived type for storing fields
- type field1DReal
- type (block_type), pointer :: block
- real (kind=RKIND), dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field1DReal
-
-
- ! Derived type for storing fields
- type field0DReal
- type (block_type), pointer :: block
- real (kind=RKIND) :: scalar
- type (io_info), pointer :: ioinfo
- end type field0DReal
-
-
- ! Derived type for storing fields
- type field2DInteger
- type (block_type), pointer :: block
- integer, dimension(:,:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field2DInteger
-
-
- ! Derived type for storing fields
- type field1DInteger
- type (block_type), pointer :: block
- integer, dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field1DInteger
-
-
- ! Derived type for storing fields
- type field1DChar
- type (block_type), pointer :: block
- character (len=64), dimension(:), pointer :: array
- type (io_info), pointer :: ioinfo
- end type field1DChar
-
-
- ! Derived type for storing fields
- type field0DChar
- type (block_type), pointer :: block
- character (len=64) :: scalar
- type (io_info), pointer :: ioinfo
- end type field0DChar
-
-
- ! Derived type for storing grid meta-data
- type mesh_type
-
-#include "field_dimensions.inc"
-
- logical :: on_a_sphere
- real (kind=RKIND) :: sphere_radius
-
-#include "time_invariant_fields.inc"
-
- end type mesh_type
-
-
-#include "variable_groups.inc"
-
-
- ! Type for storing (possibly architecture specific) information concerning to parallelism
- type parallel_info
- type (exchange_list), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
- type (exchange_list), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
- type (exchange_list), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
- type (exchange_list), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
- type (exchange_list), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
- type (exchange_list), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
- end type parallel_info
-
-
- ! Derived type for storing part of a domain; used as a basic unit of work for a process
- type block_type
-
-#include "block_group_members.inc"
-
- type (domain_type), pointer :: domain
-
- type (parallel_info), pointer :: parinfo
-
- type (block_type), pointer :: prev, next
- end type block_type
-
-
- ! Derived type for storing list of blocks from a domain to be handled by a process
- type domain_type
- type (block_type), pointer :: blocklist
-
- ! Also store parallelization info here
- type (dm_info), pointer :: dminfo
- end type domain_type
-
-
- contains
-
-
- subroutine allocate_domain(dom, dminfo)
-
- implicit none
-
- type (domain_type), pointer :: dom
- type (dm_info), pointer :: dminfo
-
- allocate(dom)
- nullify(dom % blocklist)
- dom % dminfo => dminfo
-
- end subroutine allocate_domain
-
-
- subroutine allocate_block(b, dom, &
-#include "dim_dummy_args.inc"
- )
-
- implicit none
-
- type (block_type), pointer :: b
- type (domain_type), pointer :: dom
-#include "dim_dummy_decls.inc"
-
- integer :: i
-
- nullify(b % prev)
- nullify(b % next)
-
- allocate(b % parinfo)
-
- b % domain => dom
-
-#include "block_allocs.inc"
-
- end subroutine allocate_block
-
-
-#include "group_alloc_routines.inc"
-
-
- subroutine deallocate_domain(dom)
-
- implicit none
-
- type (domain_type), pointer :: dom
-
- type (block_type), pointer :: block_ptr
-
- block_ptr => dom % blocklist
- do while (associated(block_ptr))
- call deallocate_block(block_ptr)
- block_ptr => block_ptr % next
- end do
-
- deallocate(dom)
-
- end subroutine deallocate_domain
-
-
- subroutine deallocate_block(b)
-
- implicit none
-
- type (block_type), intent(inout) :: b
-
- integer :: i
-
- deallocate(b % parinfo)
-
-#include "block_deallocs.inc"
-
- end subroutine deallocate_block
-
-
-#include "group_dealloc_routines.inc"
-
-
-#include "group_copy_routines.inc"
-
-
-#include "group_shift_level_routines.inc"
-
-end module grid_types
Deleted: branches/source_renaming/src/framework/module_hash.F
===================================================================
--- branches/source_renaming/src/framework/module_hash.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_hash.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,175 +0,0 @@
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-! MODULE HASH
-!
-! Purpose: This module provides a dictionary/hashtable with insert, search, and
-! remove routines.
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-module hash
-
- ! Parameters
- integer, parameter :: TABLESIZE=27183 ! Number of spaces in the table (the
- ! number of linked lists)
-
- type hashnode
- integer :: key
- type (hashnode), pointer :: next
- end type hashnode
-
- type hashnode_ptr
- type (hashnode), pointer :: p ! Pointer to a list of entries
- end type hashnode_ptr
-
- type hashtable
- integer :: size
- type (hashnode_ptr), dimension(TABLESIZE) :: table ! The hashtable array
- end type hashtable
-
-
- contains
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Name: hash_init
- !
- ! Purpose: To initialize a hashtable
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine hash_init(h)
-
- implicit none
-
- ! Arguments
- type (hashtable), intent(inout) :: h
-
- ! Local variables
- integer :: i
-
- h%size = 0
-
- do i=1,TABLESIZE
- nullify(h%table(i)%p)
- end do
-
- end subroutine hash_init
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Name: hash_insert
- !
- ! Purpose: Given a hashtable h and a key to be inserted into the hashtable,
- ! this routine adds key to the table.
- !
- ! NOTE: If the key already exists in the table, a second copy of the
- ! key is added to the table
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine hash_insert(h, key)
-
- implicit none
-
- ! Arguments
- integer, intent(in) :: key
- type (hashtable), intent(inout) :: h
-
- ! Local variables
- integer :: hashval, i
- type (hashnode), pointer :: hn
-
- hashval = mod(key, TABLESIZE) + 1
-
- allocate(hn)
- hn%key = key
- hn%next => h%table(hashval)%p
- h%table(hashval)%p => hn
-
- h%size = h%size + 1
-
- end subroutine hash_insert
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Name: hash_search
- !
- ! Purpose: This function returns TRUE if the specified key was found in the
- ! hashtable h, and FALSE otherwise.
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- logical function hash_search(h, key)
-
- implicit none
-
- ! Arguments
- integer, intent(in) :: key
- type (hashtable), intent(inout) :: h
-
- ! Local variables
- integer :: hashval, i
- type (hashnode), pointer :: cursor
-
- hash_search = .false.
-
- hashval = mod(key, TABLESIZE) + 1
-
- cursor => h%table(hashval)%p
- do while(associated(cursor))
- if (cursor%key == key) then
- hash_search = .true.
- return
- else
- cursor => cursor%next
- end if
- end do
-
- return
-
- end function hash_search
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Name: hash_size
- !
- ! Purpose: Returns the number of items in the hash table h.
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer function hash_size(h)
-
- implicit none
-
- ! Arguments
- type (hashtable) :: h
-
- hash_size = h%size
-
- return
-
- end function hash_size
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Name: hash_destroy
- !
- ! Purpose: Frees all memory associated with hashtable h. This routine may be
- ! used to remove all entries from a hashtable.
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine hash_destroy(h)
-
- implicit none
-
- ! Arguments
- type (hashtable), intent(inout) :: h
-
- ! Local variables
- integer :: i
- type (hashnode), pointer :: cursor, cursor_prev
-
- do i=1,TABLESIZE
- cursor => h%table(i)%p
- do while(associated(cursor))
- cursor_prev => cursor
- cursor => cursor%next
- deallocate(cursor_prev)
- end do
- nullify(h%table(i)%p)
- end do
-
- h%size = 0
-
- end subroutine hash_destroy
-
-end module hash
Deleted: branches/source_renaming/src/framework/module_io_input.F
===================================================================
--- branches/source_renaming/src/framework/module_io_input.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_io_input.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,1614 +0,0 @@
-module io_input
-
- use grid_types
- use dmpar
- use block_decomp
- use sort
- use configure
- use mpas_timekeeping
-
-
-#ifdef HAVE_ZOLTAN
- use zoltan_interface
-#endif
-
- integer, parameter :: STREAM_INPUT=1, STREAM_SFC=2, STREAM_RESTART=3
-
- type io_input_object
- character (len=1024) :: filename
- integer :: rd_ncid
- integer :: stream
-
- integer :: time
-
-#include "io_input_obj_decls.inc"
- end type io_input_object
-
-
- interface io_input_field
- module procedure io_input_field0dReal
- module procedure io_input_field1dReal
- module procedure io_input_field2dReal
- module procedure io_input_field3dReal
- module procedure io_input_field1dInteger
- module procedure io_input_field2dInteger
- module procedure io_input_field0dChar
- module procedure io_input_field1dChar
- end interface io_input_field
-
- interface io_input_field_time
- module procedure io_input_field0dReal_time
- module procedure io_input_field1dReal_time
- module procedure io_input_field2dReal_time
- module procedure io_input_field3dReal_time
- module procedure io_input_field1dInteger_time
- module procedure io_input_field0dChar_time
- module procedure io_input_field1dChar_time
- end interface io_input_field_time
-
- type (exchange_list), pointer :: sendCellList, recvCellList
- type (exchange_list), pointer :: sendEdgeList, recvEdgeList
- type (exchange_list), pointer :: sendVertexList, recvVertexList
- type (exchange_list), pointer :: sendVertLevelList, recvVertLevelList
-
- integer :: readCellStart, readCellEnd, nReadCells
- integer :: readEdgeStart, readEdgeEnd, nReadEdges
- integer :: readVertexStart, readVertexEnd, nReadVertices
- integer :: readVertLevelStart, readVertLevelEnd, nReadVertLevels
-
-
- contains
-
-
- subroutine input_state_for_domain(domain)
-
- implicit none
-
- type (domain_type), pointer :: domain
-
- integer :: i, j, k
- type (io_input_object) :: input_obj
-#include "dim_decls.inc"
-
- character (len=16) :: c_on_a_sphere
- real (kind=RKIND) :: r_sphere_radius
-
- type (field1dInteger) :: indexToCellIDField
- type (field1dInteger) :: indexToEdgeIDField
- type (field1dInteger) :: indexToVertexIDField
- type (field1dInteger) :: nEdgesOnCellField
- type (field2dInteger) :: cellsOnCellField
- type (field2dInteger) :: edgesOnCellField
- type (field2dInteger) :: verticesOnCellField
- type (field2dInteger) :: cellsOnEdgeField
- type (field2dInteger) :: cellsOnVertexField
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- type (field1dReal) :: xCellField, yCellField, zCellField
- type (field1dReal) :: xEdgeField, yEdgeField, zEdgeField
- type (field1dReal) :: xVertexField, yVertexField, zVertexField
-#endif
-#endif
-
- type (field1DChar) :: xtime
-
- integer, dimension(:), pointer :: indexToCellID_0Halo
- integer, dimension(:), pointer :: nEdgesOnCell_0Halo
- integer, dimension(:,:), pointer :: cellsOnCell_0Halo
-
- integer, dimension(:,:), pointer :: edgesOnCell_2Halo
- integer, dimension(:,:), pointer :: verticesOnCell_2Halo
- integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
- integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
-
- integer, dimension(:,:), pointer :: cellIDSorted
- integer, dimension(:,:), pointer :: edgeIDSorted
- integer, dimension(:,:), pointer :: vertexIDSorted
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- real (kind=RKIND), dimension(:), pointer :: xCell, yCell, zCell
- real (kind=RKIND), dimension(:), pointer :: xEdge, yEdge, zEdge
- real (kind=RKIND), dimension(:), pointer :: xVertex, yVertex, zVertex
-#endif
-#endif
-
- integer, dimension(:), pointer :: local_cell_list, local_edge_list, local_vertex_list
- integer, dimension(:), pointer :: local_vertlevel_list, needed_vertlevel_list
- integer :: nlocal_edges, nlocal_vertices
- type (exchange_list), pointer :: send1Halo, recv1Halo
- type (exchange_list), pointer :: send2Halo, recv2Halo
- type (graph) :: partial_global_graph_info
- type (graph) :: block_graph_0Halo, block_graph_1Halo, block_graph_2Halo
- integer :: ghostEdgeStart, ghostVertexStart
-
- type (MPAS_Time_type) :: startTime
- type (MPAS_Time_type) :: sliceTime
- type (MPAS_TimeInterval_type) :: timeDiff
- type (MPAS_TimeInterval_type) :: minTimeDiff
- character(len=32) :: timeStamp
-
- if (config_do_restart) then
- input_obj % filename = trim(config_restart_name)
- input_obj % stream = STREAM_RESTART
- else
- input_obj % filename = trim(config_input_name)
- input_obj % stream = STREAM_INPUT
- end if
- call io_input_init(input_obj, domain % dminfo)
-
-
- !
- ! Read global number of cells/edges/vertices
- !
-#include "read_dims.inc"
-
- !
- ! Determine the range of cells/edges/vertices that a processor will initially read
- ! from the input file
- !
- call dmpar_get_index_range(domain % dminfo, 1, nCells, readCellStart, readCellEnd)
- nReadCells = readCellEnd - readCellStart + 1
-
- call dmpar_get_index_range(domain % dminfo, 1, nEdges, readEdgeStart, readEdgeEnd)
- nReadEdges = readEdgeEnd - readEdgeStart + 1
-
- call dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
- nReadVertices = readVertexEnd - readVertexStart + 1
-
- readVertLevelStart = 1
- readVertLevelEnd = nVertLevels
- nReadVertLevels = nVertLevels
-
-
- !
- ! Allocate and read fields that we will need in order to ultimately work out
- ! which cells/edges/vertices are owned by each block, and which are ghost
- !
-
- ! Global cell indices
- allocate(indexToCellIDField % ioinfo)
- indexToCellIDField % ioinfo % fieldName = 'indexToCellID'
- indexToCellIDField % ioinfo % start(1) = readCellStart
- indexToCellIDField % ioinfo % count(1) = nReadCells
- allocate(indexToCellIDField % array(nReadCells))
- call io_input_field(input_obj, indexToCellIDField)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Cell x-coordinates (in 3d Cartesian space)
- allocate(xCellField % ioinfo)
- xCellField % ioinfo % fieldName = 'xCell'
- xCellField % ioinfo % start(1) = readCellStart
- xCellField % ioinfo % count(1) = nReadCells
- allocate(xCellField % array(nReadCells))
- call io_input_field(input_obj, xCellField)
-
- ! Cell y-coordinates (in 3d Cartesian space)
- allocate(yCellField % ioinfo)
- yCellField % ioinfo % fieldName = 'yCell'
- yCellField % ioinfo % start(1) = readCellStart
- yCellField % ioinfo % count(1) = nReadCells
- allocate(yCellField % array(nReadCells))
- call io_input_field(input_obj, yCellField)
-
- ! Cell z-coordinates (in 3d Cartesian space)
- allocate(zCellField % ioinfo)
- zCellField % ioinfo % fieldName = 'zCell'
- zCellField % ioinfo % start(1) = readCellStart
- zCellField % ioinfo % count(1) = nReadCells
- allocate(zCellField % array(nReadCells))
- call io_input_field(input_obj, zCellField)
-#endif
-#endif
-
-
- ! Global edge indices
- allocate(indexToEdgeIDField % ioinfo)
- indexToEdgeIDField % ioinfo % fieldName = 'indexToEdgeID'
- indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
- indexToEdgeIDField % ioinfo % count(1) = nReadEdges
- allocate(indexToEdgeIDField % array(nReadEdges))
- call io_input_field(input_obj, indexToEdgeIDField)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Edge x-coordinates (in 3d Cartesian space)
- allocate(xEdgeField % ioinfo)
- xEdgeField % ioinfo % fieldName = 'xEdge'
- xEdgeField % ioinfo % start(1) = readEdgeStart
- xEdgeField % ioinfo % count(1) = nReadEdges
- allocate(xEdgeField % array(nReadEdges))
- call io_input_field(input_obj, xEdgeField)
-
- ! Edge y-coordinates (in 3d Cartesian space)
- allocate(yEdgeField % ioinfo)
- yEdgeField % ioinfo % fieldName = 'yEdge'
- yEdgeField % ioinfo % start(1) = readEdgeStart
- yEdgeField % ioinfo % count(1) = nReadEdges
- allocate(yEdgeField % array(nReadEdges))
- call io_input_field(input_obj, yEdgeField)
-
- ! Edge z-coordinates (in 3d Cartesian space)
- allocate(zEdgeField % ioinfo)
- zEdgeField % ioinfo % fieldName = 'zEdge'
- zEdgeField % ioinfo % start(1) = readEdgeStart
- zEdgeField % ioinfo % count(1) = nReadEdges
- allocate(zEdgeField % array(nReadEdges))
- call io_input_field(input_obj, zEdgeField)
-#endif
-#endif
-
- ! Global vertex indices
- allocate(indexToVertexIDField % ioinfo)
- indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
- indexToVertexIDField % ioinfo % start(1) = readVertexStart
- indexToVertexIDField % ioinfo % count(1) = nReadVertices
- allocate(indexToVertexIDField % array(nReadVertices))
- call io_input_field(input_obj, indexToVertexIDField)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- ! Vertex x-coordinates (in 3d Cartesian space)
- allocate(xVertexField % ioinfo)
- xVertexField % ioinfo % fieldName = 'xVertex'
- xVertexField % ioinfo % start(1) = readVertexStart
- xVertexField % ioinfo % count(1) = nReadVertices
- allocate(xVertexField % array(nReadVertices))
- call io_input_field(input_obj, xVertexField)
-
- ! Vertex y-coordinates (in 3d Cartesian space)
- allocate(yVertexField % ioinfo)
- yVertexField % ioinfo % fieldName = 'yVertex'
- yVertexField % ioinfo % start(1) = readVertexStart
- yVertexField % ioinfo % count(1) = nReadVertices
- allocate(yVertexField % array(nReadVertices))
- call io_input_field(input_obj, yVertexField)
-
- ! Vertex z-coordinates (in 3d Cartesian space)
- allocate(zVertexField % ioinfo)
- zVertexField % ioinfo % fieldName = 'zVertex'
- zVertexField % ioinfo % start(1) = readVertexStart
- zVertexField % ioinfo % count(1) = nReadVertices
- allocate(zVertexField % array(nReadVertices))
- call io_input_field(input_obj, zVertexField)
-#endif
-#endif
-
- ! Number of cell/edges/vertices adjacent to each cell
- allocate(nEdgesOnCellField % ioinfo)
- nEdgesOnCellField % ioinfo % fieldName = 'nEdgesOnCell'
- nEdgesOnCellField % ioinfo % start(1) = readCellStart
- nEdgesOnCellField % ioinfo % count(1) = nReadCells
- allocate(nEdgesOnCellField % array(nReadCells))
- call io_input_field(input_obj, nEdgesOnCellField)
-
- ! Global indices of cells adjacent to each cell
- allocate(cellsOnCellField % ioinfo)
- cellsOnCellField % ioinfo % fieldName = 'cellsOnCell'
- cellsOnCellField % ioinfo % start(1) = 1
- cellsOnCellField % ioinfo % start(2) = readCellStart
- cellsOnCellField % ioinfo % count(1) = maxEdges
- cellsOnCellField % ioinfo % count(2) = nReadCells
- allocate(cellsOnCellField % array(maxEdges,nReadCells))
- call io_input_field(input_obj, cellsOnCellField)
-
- ! Global indices of edges adjacent to each cell
- allocate(edgesOnCellField % ioinfo)
- edgesOnCellField % ioinfo % fieldName = 'edgesOnCell'
- edgesOnCellField % ioinfo % start(1) = 1
- edgesOnCellField % ioinfo % start(2) = readCellStart
- edgesOnCellField % ioinfo % count(1) = maxEdges
- edgesOnCellField % ioinfo % count(2) = nReadCells
- allocate(edgesOnCellField % array(maxEdges,nReadCells))
- call io_input_field(input_obj, edgesOnCellField)
-
- ! Global indices of vertices adjacent to each cell
- allocate(verticesOnCellField % ioinfo)
- verticesOnCellField % ioinfo % fieldName = 'verticesOnCell'
- verticesOnCellField % ioinfo % start(1) = 1
- verticesOnCellField % ioinfo % start(2) = readCellStart
- verticesOnCellField % ioinfo % count(1) = maxEdges
- verticesOnCellField % ioinfo % count(2) = nReadCells
- allocate(verticesOnCellField % array(maxEdges,nReadCells))
- call io_input_field(input_obj, verticesOnCellField)
-
- ! Global indices of cells adjacent to each edge
- ! used for determining which edges are owned by a block, where
- ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
- allocate(cellsOnEdgeField % ioinfo)
- cellsOnEdgeField % ioinfo % fieldName = 'cellsOnEdge'
- cellsOnEdgeField % ioinfo % start(1) = 1
- cellsOnEdgeField % ioinfo % start(2) = readEdgeStart
- cellsOnEdgeField % ioinfo % count(1) = 2
- cellsOnEdgeField % ioinfo % count(2) = nReadEdges
- allocate(cellsOnEdgeField % array(2,nReadEdges))
- call io_input_field(input_obj, cellsOnEdgeField)
-
- ! Global indices of cells adjacent to each vertex
- ! used for determining which vertices are owned by a block, where
- ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
- allocate(cellsOnVertexField % ioinfo)
- cellsOnVertexField % ioinfo % fieldName = 'cellsOnVertex'
- cellsOnVertexField % ioinfo % start(1) = 1
- cellsOnVertexField % ioinfo % start(2) = readVertexStart
- cellsOnVertexField % ioinfo % count(1) = vertexDegree
- cellsOnVertexField % ioinfo % count(2) = nReadVertices
- allocate(cellsOnVertexField % array(vertexDegree,nReadVertices))
- call io_input_field(input_obj, cellsOnVertexField)
-
-
- !
- ! Set up a graph derived data type describing the connectivity for the cells
- ! that were read by this process
- ! A partial description is passed to the block decomp module by each process,
- ! and the block decomp module returns with a list of global cell indices
- ! that belong to the block on this process
- !
- partial_global_graph_info % nVertices = nReadCells
- partial_global_graph_info % nVerticesTotal = nCells
- partial_global_graph_info % maxDegree = maxEdges
- partial_global_graph_info % ghostStart = nVertices+1
- allocate(partial_global_graph_info % vertexID(nReadCells))
- allocate(partial_global_graph_info % nAdjacent(nReadCells))
- allocate(partial_global_graph_info % adjacencyList(maxEdges, nReadCells))
-
- partial_global_graph_info % vertexID(:) = indexToCellIDField % array(:)
- partial_global_graph_info % nAdjacent(:) = nEdgesOnCellField % array(:)
- partial_global_graph_info % adjacencyList(:,:) = cellsOnCellField % array(:,:)
-
-
- ! TODO: Ensure (by renaming or exchanging) that initial cell range on each proc is contiguous
- ! This situation may occur when reading a restart file with cells/edges/vertices written
- ! in a scrambled order
-
-
- ! Determine which cells are owned by this process
- call block_decomp_cells_for_proc(domain % dminfo, partial_global_graph_info, local_cell_list)
-
- deallocate(partial_global_graph_info % vertexID)
- deallocate(partial_global_graph_info % nAdjacent)
- deallocate(partial_global_graph_info % adjacencyList)
-
-
- allocate(indexToCellID_0Halo(size(local_cell_list)))
- allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
- allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xCell(size(local_cell_list)))
- allocate(yCell(size(local_cell_list)))
- allocate(zCell(size(local_cell_list)))
-#endif
-#endif
-
- !
- ! Now that each process has a list of cells that it owns, exchange cell connectivity
- ! information between the processes that read info for a cell and those that own that cell
- !
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- indexToCellIDField % array, local_cell_list, &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
- size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
- size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
- size(xCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
- size(yCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
- size(zCellField % array), size(local_cell_list), &
- sendCellList, recvCellList)
-#endif
-#endif
-
-
- deallocate(sendCellList % list)
- deallocate(sendCellList)
- deallocate(recvCellList % list)
- deallocate(recvCellList)
-
-
-
- !
- ! Build a graph of cell connectivity based on cells owned by this process
- !
- block_graph_0Halo % nVerticesTotal = size(local_cell_list)
- block_graph_0Halo % nVertices = size(local_cell_list)
- block_graph_0Halo % maxDegree = maxEdges
- block_graph_0Halo % ghostStart = size(local_cell_list) + 1
- allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
- allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
- allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
-
- block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
- block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
- block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
-
- ! Get back a graph describing the owned cells plus the cells in the 1-halo
- call block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
-
-
- !
- ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
- !
- call dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- send1Halo, recv1Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
- send1Halo, recv1Halo)
-
-
- !
- ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
- !
- block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
- block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
-
- ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
- call block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
-
- block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
- block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- !! For now, only use Zoltan with MPI
- !! Zoltan initialization
- call zoltanStart()
-
- !! Zoltan hook for cells
- call zoltanOrderLocHSFC_Cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
-#endif
-#endif
-
- call dmpar_get_owner_list(domain % dminfo, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- send2Halo, recv2Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
- block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
- call dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
- block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- send2Halo, recv2Halo)
-
-
-
- !
- ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
- ! on each cell and which vertices are on each cell from the processes that read these
- ! fields for each cell to the processes that own the cells
- !
- allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
- allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
- indexToCellIDField % array, block_graph_2Halo % vertexID, &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
- call dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
- maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
- sendCellList, recvCellList)
-
-
- !
- ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
- !
- call block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- edgesOnCell_2Halo, nlocal_edges, local_edge_list)
- call block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
- verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-
-
- !
- ! Work out which edges and vertices are owned by this process, and which are ghost
- !
- allocate(cellsOnEdge_2Halo(2,nlocal_edges))
- allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
-
- call dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
- 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
- vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
- sendVertexList, recvVertexList)
-
-
- call block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
- 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
- call block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
- vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
-
-
- ! At this point, local_edge_list(1;ghostEdgeStart-1) contains all of the owned edges for this block
- ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
-
- ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
- ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
-
- ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
- ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
- ! contains all of the ghost cells
-
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- allocate(xEdge(nlocal_edges))
- allocate(yEdge(nlocal_edges))
- allocate(zEdge(nlocal_edges))
- allocate(xVertex(nlocal_vertices))
- allocate(yVertex(nlocal_vertices))
- allocate(zVertex(nlocal_vertices))
-#endif
-#endif
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- call dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
- size(xEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
- size(yEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
- call dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
- size(zEdgeField % array), nlocal_edges, &
- sendEdgeList, recvEdgeList)
-
- call dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
- size(xVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
- size(yVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- call dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
- size(zVertexField % array), nlocal_vertices, &
- sendVertexList, recvVertexList)
- !!!!!!!!!!!!!!!!!!
- !! Reorder edges
- !!!!!!!!!!!!!!!!!!
- call zoltanOrderLocHSFC_Edges(ghostEdgeStart-1,local_edge_list,3,xEdge,yEdge,zEdge)
- !!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!
- !! Reorder vertices
- !!!!!!!!!!!!!!!!!!
- call zoltanOrderLocHSFC_Verts(ghostVertexStart-1,local_vertex_list,3,xVertex,yVertex,zVertex)
- !!!!!!!!!!!!!!!!!!
-
- deallocate(sendEdgeList % list)
- deallocate(sendEdgeList)
- deallocate(recvEdgeList % list)
- deallocate(recvEdgeList)
-
- deallocate(sendVertexList % list)
- deallocate(sendVertexList)
- deallocate(recvVertexList % list)
- deallocate(recvVertexList)
-
- !
- ! Knowing which edges/vertices are owned by this block and which are actually read
- ! from the input or restart file, we can build exchange lists to perform
- ! all-to-all field exchanges from process that reads a field to the processes that
- ! need them
- !
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToEdgeIDField % array), nlocal_edges, &
- indexToEdgeIDField % array, local_edge_list, &
- sendEdgeList, recvEdgeList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(indexToVertexIDField % array), nlocal_vertices, &
- indexToVertexIDField % array, local_vertex_list, &
- sendVertexList, recvVertexList)
-
-#endif
-#endif
-
- !
- ! Build ownership and exchange lists for vertical levels
- ! Essentially, process 0 owns all vertical levels when reading and writing,
- ! and it distributes them or gathers them to/from all other processes
- !
- if (domain % dminfo % my_proc_id == 0) then
- allocate(local_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- local_vertlevel_list(i) = i
- end do
- else
- allocate(local_vertlevel_list(0))
- end if
- allocate(needed_vertlevel_list(nVertLevels))
- do i=1,nVertLevels
- needed_vertlevel_list(i) = i
- end do
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(local_vertlevel_list), size(needed_vertlevel_list), &
- local_vertlevel_list, needed_vertlevel_list, &
- sendVertLevelList, recvVertLevelList)
-
- deallocate(local_vertlevel_list)
- deallocate(needed_vertlevel_list)
-
-
- !
- ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
- !
- allocate(domain % blocklist)
-
- nCells = block_graph_2Halo % nVerticesTotal
- nEdges = nlocal_edges
- nVertices = nlocal_vertices
-
- call allocate_block(domain % blocklist, domain, &
-#include "dim_dummy_args.inc"
- )
-
- !
- ! Read attributes
- !
- call io_input_get_att_text(input_obj, 'on_a_sphere', c_on_a_sphere)
- call io_input_get_att_real(input_obj, 'sphere_radius', r_sphere_radius)
- if (index(c_on_a_sphere, 'YES') /= 0) then
- domain % blocklist % mesh % on_a_sphere = .true.
- else
- domain % blocklist % mesh % on_a_sphere = .false.
- end if
- domain % blocklist % mesh % sphere_radius = r_sphere_radius
-
- if (.not. config_do_restart) then
- input_obj % time = 1
- else
- input_obj % time = 1
-
- !
- ! If doing a restart, we need to decide which time slice to read from the
- ! restart file
- !
- if (input_obj % rdLocalTime <= 0) then
- write(0,*) 'Error: Couldn''t find any times in restart file.'
- call dmpar_abort(domain % dminfo)
- end if
- if (domain % dminfo % my_proc_id == IO_NODE) then
- allocate(xtime % ioinfo)
- xtime % ioinfo % start(1) = 1
- xtime % ioinfo % count(1) = input_obj % rdLocalTime
- allocate(xtime % array(input_obj % rdLocalTime))
-
- xtime % ioinfo % fieldName = 'xtime'
- call io_input_field(input_obj, xtime)
-
- call MPAS_setTimeInterval(interval=minTimeDiff, DD=10000)
- call MPAS_setTime(curr_time=startTime, dateTimeString=config_start_time)
-
- do i=1,input_obj % rdLocalTime
- call MPAS_setTime(curr_time=sliceTime, dateTimeString=xtime % array(i))
- timeDiff = abs(sliceTime - startTime)
- if (timeDiff < minTimeDiff) then
- minTimeDiff = timeDiff
- input_obj % time = i
- end if
- end do
-
- timeStamp = xtime % array(input_obj % time)
-
- deallocate(xtime % ioinfo)
- deallocate(xtime % array)
- end if
-
- call dmpar_bcast_int(domain % dminfo, input_obj % time)
- call dmpar_bcast_char(domain % dminfo, timeStamp)
-
- write(0,*) 'Restarting model from time ', timeStamp
-
- end if
-
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ! Do the actual work of reading all fields in from the input or restart file
- ! For each field:
- ! 1) Each process reads a contiguous range of cell/edge/vertex indices, which
- ! may not correspond with the cells/edges/vertices that are owned by the
- ! process
- ! 2) All processes then send the global indices that were read to the
- ! processes that own those indices based on
- ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- call read_and_distribute_fields(domain % dminfo, input_obj, domain % blocklist, &
- readCellStart, nReadCells, readEdgeStart, nReadEdges, readVertexStart, nReadVertices, &
- readVertLevelStart, nReadVertLevels, &
- sendCellList, recvCellList, sendEdgeList, recvEdgeList, sendVertexList, recvVertexList, &
- sendVertLevelList, recvVertLevelList)
-
-
- call io_input_finalize(input_obj, domain % dminfo)
-
-
- !
- ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
- !
- allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
- allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
- allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
-
- do i=1,domain % blocklist % mesh % nCells
- cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
- cellIDSorted(2,i) = i
- end do
- call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
-
- do i=1,domain % blocklist % mesh % nEdges
- edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
- edgeIDSorted(2,i) = i
- end do
- call quicksort(nlocal_edges, edgeIDSorted)
-
- do i=1,domain % blocklist % mesh % nVertices
- vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
- vertexIDSorted(2,i) = i
- end do
- call quicksort(nlocal_vertices, vertexIDSorted)
-
-
- do i=1,domain % blocklist % mesh % nCells
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
-
- k = binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnCell % array(j,i) = 0
- end if
-
- k = binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnCell % array(j,i) = 0
- end if
-
- k = binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
-! domain % blocklist % mesh % verticesOnCell % array(j,i) = 0
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nEdges
- do j=1,2
-
- k = binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnEdge % array(j,i) = 0
- end if
-
- k = binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
- domain % blocklist % mesh % verticesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nVertices) then
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
- else
- domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
-! domain % blocklist % mesh % verticesOnEdge % array(j,i) = 0
- end if
-
- end do
-
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
-
- k = binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnEdge % array(j,i) = 0
- end if
-
- end do
- end do
-
- do i=1,domain % blocklist % mesh % nVertices
- do j=1,vertexDegree
-
- k = binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nCells) then
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
- else
- domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
-! domain % blocklist % mesh % cellsOnVertex % array(j,i) = 0
- end if
-
- k = binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- if (k <= domain % blocklist % mesh % nEdges) then
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
- else
- domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
-! domain % blocklist % mesh % edgesOnVertex % array(j,i) = 0
- end if
-
- end do
- end do
-
- deallocate(cellIDSorted)
- deallocate(edgeIDSorted)
- deallocate(vertexIDSorted)
-
-
- !
- ! Work out halo exchange lists for cells, edges, and vertices
- !
- call dmpar_get_owner_list(domain % dminfo, &
- block_graph_2Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
- block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), block_graph_2Halo % vertexID, &
- domain % blocklist % parinfo % cellsToSend, domain % blocklist % parinfo % cellsToRecv)
-
- call dmpar_get_owner_list(domain % dminfo, &
- ghostEdgeStart-1, nlocal_edges, &
- local_edge_list(1:ghostEdgeStart-1), local_edge_list, &
- domain % blocklist % parinfo % edgesToSend, domain % blocklist % parinfo % edgesToRecv)
-
- call dmpar_get_owner_list(domain % dminfo, &
- ghostVertexStart-1, nlocal_vertices, &
- local_vertex_list(1:ghostVertexStart-1), local_vertex_list, &
- domain % blocklist % parinfo % verticesToSend, domain % blocklist % parinfo % verticesToRecv)
-
- domain % blocklist % mesh % nCellsSolve = block_graph_2Halo % nVertices
- domain % blocklist % mesh % nEdgesSolve = ghostEdgeStart-1
- domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
- domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
-
-
- !
- ! Deallocate fields, graphs, and other memory
- !
- deallocate(indexToCellIDField % ioinfo)
- deallocate(indexToCellIDField % array)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCellField % ioinfo)
- deallocate(xCellField % array)
- deallocate(yCellField % ioinfo)
- deallocate(yCellField % array)
- deallocate(zCellField % ioinfo)
- deallocate(zCellField % array)
-#endif
-#endif
- deallocate(indexToEdgeIDField % ioinfo)
- deallocate(indexToEdgeIDField % array)
- deallocate(indexToVertexIDField % ioinfo)
- deallocate(indexToVertexIDField % array)
- deallocate(cellsOnCellField % ioinfo)
- deallocate(cellsOnCellField % array)
- deallocate(edgesOnCellField % ioinfo)
- deallocate(edgesOnCellField % array)
- deallocate(verticesOnCellField % ioinfo)
- deallocate(verticesOnCellField % array)
- deallocate(cellsOnEdgeField % ioinfo)
- deallocate(cellsOnEdgeField % array)
- deallocate(cellsOnVertexField % ioinfo)
- deallocate(cellsOnVertexField % array)
- deallocate(cellsOnCell_0Halo)
- deallocate(nEdgesOnCell_0Halo)
- deallocate(indexToCellID_0Halo)
- deallocate(cellsOnEdge_2Halo)
- deallocate(cellsOnVertex_2Halo)
- deallocate(edgesOnCell_2Halo)
- deallocate(verticesOnCell_2Halo)
- deallocate(block_graph_0Halo % vertexID)
- deallocate(block_graph_0Halo % nAdjacent)
- deallocate(block_graph_0Halo % adjacencyList)
-#ifdef HAVE_ZOLTAN
-#ifdef _MPI
- deallocate(xCell)
- deallocate(yCell)
- deallocate(zCell)
-#endif
-#endif
- end subroutine input_state_for_domain
-
-
- subroutine read_and_distribute_fields(dminfo, input_obj, block, &
- readCellsStart, readCellsCount, &
- readEdgesStart, readEdgesCount, &
- readVerticesStart, readVerticesCount, &
- readVertLevelsStart, readVertLevelsCount, &
- sendCellsList, recvCellsList, &
- sendEdgesList, recvEdgesList, &
- sendVerticesList, recvVerticesList, &
- sendVertLevelsList, recvVertLevelsList)
-
- implicit none
-
- type (dm_info), intent(in) :: dminfo
- type (io_input_object), intent(in) :: input_obj
- type (block_type), intent(inout) :: block
- integer, intent(in) :: readCellsStart, readCellsCount, readEdgesStart, readEdgesCount, readVerticesStart, readVerticesCount
- integer, intent(in) :: readVertLevelsStart, readVertLevelsCount
- type (exchange_list), pointer :: sendCellsList, recvCellsList
- type (exchange_list), pointer :: sendEdgesList, recvEdgesList
- type (exchange_list), pointer :: sendVerticesList, recvVerticesList
- type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
-
- type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
- type (field0dReal) :: real0d
- type (field1dReal) :: real1d
- type (field2dReal) :: real2d
- type (field3dReal) :: real3d
- type (field0dChar) :: char0d
- type (field1dChar) :: char1d
-
- integer :: i1, i2, i3, i4
-
- integer, dimension(:), pointer :: super_int1d
- integer, dimension(:,:), pointer :: super_int2d
- real (kind=RKIND) :: super_real0d
- real (kind=RKIND), dimension(:), pointer :: super_real1d
- real (kind=RKIND), dimension(:,:), pointer :: super_real2d
- real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
- character (len=64) :: super_char0d
- character (len=64), dimension(:), pointer :: super_char1d
-
- integer :: i, k
-
-#include "nondecomp_dims.inc"
-
- allocate(int1d % ioinfo)
- allocate(int2d % ioinfo)
- allocate(real0d % ioinfo)
- allocate(real1d % ioinfo)
- allocate(real2d % ioinfo)
- allocate(real3d % ioinfo)
- allocate(char0d % ioinfo)
- allocate(char1d % ioinfo)
-
-
-#include "io_input_fields.inc"
-
-#include "nondecomp_dims_dealloc.inc"
-
- end subroutine read_and_distribute_fields
-
-
-
- subroutine io_input_init(input_obj, dminfo)
-
- implicit none
-
- type (io_input_object), intent(inout) :: input_obj
- type (dm_info), intent(in) :: dminfo
-
- include 'netcdf.inc'
-
- integer :: nferr
-
-
-#ifdef OFFSET64BIT
- nferr = nf_open(trim(input_obj % filename), ior(NF_SHARE,NF_64BIT_OFFSET), input_obj % rd_ncid)
-#else
- nferr = nf_open(trim(input_obj % filename), NF_SHARE, input_obj % rd_ncid)
-#endif
-
- if (nferr /= NF_NOERR) then
- write(0,*) ' '
- if (input_obj % stream == STREAM_RESTART) then
- write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_INPUT) then
- write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
- else if (input_obj % stream == STREAM_SFC) then
- write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
- end if
- write(0,*) ' '
- call dmpar_abort(dminfo)
- end if
-
-#include "netcdf_read_ids.inc"
-
- end subroutine io_input_init
-
-
- subroutine io_input_get_dimension(input_obj, dimname, dimsize)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- character (len=*), intent(in) :: dimname
- integer, intent(out) :: dimsize
-
-#include "get_dimension_by_name.inc"
-
- end subroutine io_input_get_dimension
-
-
- subroutine io_input_get_att_real(input_obj, attname, attvalue)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- character (len=*), intent(in) :: attname
- real (kind=RKIND), intent(out) :: attvalue
-
- include 'netcdf.inc'
-
- integer :: nferr
-
- if (RKIND == 8) then
- nferr = nf_get_att_double(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- else
- nferr = nf_get_att_real(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- end if
- if (nferr /= NF_NOERR) then
- write(0,*) 'Warning: Attribute '//trim(attname)//&
- ' not found in '//trim(input_obj % filename)
- if (index(attname, 'sphere_radius') /= 0) then
- write(0,*) ' Setting '//trim(attname)//' to 1.0'
- attvalue = 1.0
- end if
- end if
-
- end subroutine io_input_get_att_real
-
-
- subroutine io_input_get_att_text(input_obj, attname, attvalue)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- character (len=*), intent(in) :: attname
- character (len=*), intent(out) :: attvalue
-
- include 'netcdf.inc'
-
- integer :: nferr
-
- nferr = nf_get_att_text(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
- if (nferr /= NF_NOERR) then
- write(0,*) 'Warning: Attribute '//trim(attname)//&
- ' not found in '//trim(input_obj % filename)
- if (index(attname, 'on_a_sphere') /= 0) then
- write(0,*) ' Setting '//trim(attname)//' to ''YES'''
- attvalue = 'YES'
- end if
- end if
-
- end subroutine io_input_get_att_text
-
-
- subroutine io_input_field0dReal(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = 1
- count1(1) = 1
-
-#include "input_field0dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#endif
-
- end subroutine io_input_field0dReal
-
-
- subroutine io_input_field1dReal(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
- !
- ! Special case: we may want to read the xtime variable across the
- ! time dimension as a 1d array.
- !
- if (trim(field % ioinfo % fieldName) == 'xtime') then
- varID = input_obj % rdVarIDxtime
- end if
-
-#include "input_field1dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % array)
-#endif
-
- end subroutine io_input_field1dReal
-
-
- subroutine io_input_field2dReal(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "input_field2dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
-#endif
-
- end subroutine io_input_field2dReal
-
-
- subroutine io_input_field3dReal(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = field % ioinfo % start(3)
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = field % ioinfo % count(3)
-
-#include "input_field3dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
-#endif
-
- end subroutine io_input_field3dReal
-
-
- subroutine io_input_field0dReal_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = input_obj % time
- count1(1) = 1
-
-#include "input_field0dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-#endif
-
- end subroutine io_input_field0dReal_time
-
-
- subroutine io_input_field1dReal_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = input_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "input_field1dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
-#endif
-
- end subroutine io_input_field1dReal_time
-
-
- subroutine io_input_field2dReal_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = input_obj % time
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = 1
-
-#include "input_field2dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
-#endif
-
- end subroutine io_input_field2dReal_time
-
-
- subroutine io_input_field3dReal_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(4) :: start4, count4
-
- start4(1) = field % ioinfo % start(1)
- start4(2) = field % ioinfo % start(2)
- start4(3) = field % ioinfo % start(3)
- start4(4) = input_obj % time
- count4(1) = field % ioinfo % count(1)
- count4(2) = field % ioinfo % count(2)
- count4(3) = field % ioinfo % count(3)
- count4(4) = 1
-
-#include "input_field3dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start4, count4, field % array)
-#else
- nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start4, count4, field % array)
-#endif
-
- end subroutine io_input_field3dReal_time
-
-
- subroutine io_input_field1dInteger(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "input_field1dinteger.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start1, count1, field % array)
-
- end subroutine io_input_field1dInteger
-
-
- subroutine io_input_field2dInteger(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field2dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "input_field2dinteger.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine io_input_field2dInteger
-
-
- subroutine io_input_field1dInteger_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = input_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "input_field1dinteger_time.inc"
-
- nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine io_input_field1dInteger_time
-
-
- subroutine io_input_field0dChar_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = input_obj % time
- count1(2) = 1
-
-#include "input_field0dchar_time.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-
- end subroutine io_input_field0dChar_time
-
-
- subroutine io_input_field1dChar_time(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start2, count2
-
- start2(1) = 1
- start2(2) = field % ioinfo % start(1)
- start2(3) = input_obj % time
- count2(1) = 64
- count2(2) = field % ioinfo % count(1)
- count2(3) = 1
-
-#include "input_field1dchar_time.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start2, count2, field % array)
-
- end subroutine io_input_field1dChar_time
-
-
- subroutine io_input_field0dChar(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = 1
- count1(2) = 1
-
-#include "input_field0dchar.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
-
- end subroutine io_input_field0dChar
-
-
- subroutine io_input_field1dChar(input_obj, field)
-
- implicit none
-
- type (io_input_object), intent(in) :: input_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = field % ioinfo % start(1)
- count1(2) = field % ioinfo % count(1)
-
- !
- ! Special case: we may want to read the xtime variable across the
- ! time dimension as a 1d array.
- !
- if (trim(field % ioinfo % fieldName) == 'xtime') then
- varID = input_obj % rdVarIDxtime
- end if
-
-#include "input_field1dchar.inc"
-
- nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % array)
-
- end subroutine io_input_field1dChar
-
-
- subroutine io_input_finalize(input_obj, dminfo)
-
- implicit none
-
- type (io_input_object), intent(inout) :: input_obj
- type (dm_info), intent(in) :: dminfo
-
- include 'netcdf.inc'
-
- integer :: nferr
-
- nferr = nf_close(input_obj % rd_ncid)
-
- end subroutine io_input_finalize
-
-end module io_input
Deleted: branches/source_renaming/src/framework/module_io_output.F
===================================================================
--- branches/source_renaming/src/framework/module_io_output.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_io_output.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,866 +0,0 @@
-module io_output
-
- use grid_types
- use dmpar
- use sort
- use configure
-
- integer, parameter :: OUTPUT = 1
- integer, parameter :: RESTART = 2
- integer, parameter :: SFC = 3
-
- type io_output_object
- integer :: wr_ncid
- character (len=1024) :: filename
-
- integer :: time
-
- integer :: stream
-
- integer :: wrDimIDStrLen
-#include "io_output_obj_decls.inc"
-
- logical :: validExchangeLists
- type (exchange_list), pointer :: sendCellsList, recvCellsList
- type (exchange_list), pointer :: sendEdgesList, recvEdgesList
- type (exchange_list), pointer :: sendVerticesList, recvVerticesList
- type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
- end type io_output_object
-
-
- interface io_output_field
- module procedure io_output_field0dReal
- module procedure io_output_field1dReal
- module procedure io_output_field2dReal
- module procedure io_output_field3dReal
- module procedure io_output_field1dInteger
- module procedure io_output_field2dInteger
- module procedure io_output_field0dChar
- module procedure io_output_field1dChar
- end interface io_output_field
-
- interface io_output_field_time
- module procedure io_output_field0dReal_time
- module procedure io_output_field1dReal_time
- module procedure io_output_field2dReal_time
- module procedure io_output_field3dReal_time
- module procedure io_output_field1dInteger_time
- module procedure io_output_field0dChar_time
- module procedure io_output_field1dChar_time
- end interface io_output_field_time
-
-
- contains
-
-
- subroutine output_state_init(output_obj, domain, stream, outputSuffix)
-
- implicit none
-
- type (io_output_object), intent(inout) :: output_obj
- type (domain_type), intent(in) :: domain
- character (len=*) :: stream
- character (len=*), optional :: outputSuffix
-
- character (len=128) :: tempfilename
-
- type (block_type), pointer :: block_ptr
-#include "output_dim_actual_decls.inc"
-
- block_ptr => domain % blocklist
- nullify(output_obj % sendCellsList)
- nullify(output_obj % recvCellsList)
- nullify(output_obj % sendEdgesList)
- nullify(output_obj % recvEdgesList)
- nullify(output_obj % sendVerticesList)
- nullify(output_obj % recvVerticesList)
- nullify(output_obj % sendVertLevelsList)
- nullify(output_obj % recvVertLevelsList)
- output_obj % validExchangeLists = .false.
-
-#include "output_dim_inits.inc"
-
- call dmpar_sum_int(domain % dminfo, block_ptr % mesh % nCellsSolve, nCellsGlobal)
- call dmpar_sum_int(domain % dminfo, block_ptr % mesh % nEdgesSolve, nEdgesGlobal)
- call dmpar_sum_int(domain % dminfo, block_ptr % mesh % nVerticesSolve, nVerticesGlobal)
- nVertLevelsGlobal = block_ptr % mesh % nVertLevels
-
- if (trim(stream) == 'OUTPUT') then
- if(present(outputSuffix)) then
- call insert_string_suffix(config_output_name, outputSuffix, tempfilename)
- else
- tempfilename = config_output_name
- end if
- output_obj % filename = trim(tempfilename)
- output_obj % stream = OUTPUT
- else if (trim(stream) == 'RESTART') then
- output_obj % filename = trim(config_restart_name)
- output_obj % stream = RESTART
- else if (trim(stream) == 'SFC') then
- ! Keep filename as whatever was set by the user
- output_obj % stream = SFC
- end if
-
- ! For now, we assume that a domain consists only of one block,
- ! although in future, work needs to be done to write model state
- ! from many distributed blocks
- call io_output_init(output_obj, domain % dminfo, &
- block_ptr % mesh, &
-#include "output_dim_actual_args.inc"
- )
-
- end subroutine output_state_init
-
-
- subroutine insert_string_suffix(stream, suffix, filename)
-
- implicit none
-
- character (len=*), intent(in) :: stream
- character (len=*), intent(in) :: suffix
- character (len=*), intent(out) :: filename
- integer :: length, i
-
- filename = trim(stream) // '.' // trim(suffix)
-
- length = len_trim(stream)
- do i=length-1,1,-1
- if(stream(i:i) == '.') then
- filename = trim(stream(:i)) // trim(suffix) // trim(stream(i:))
- exit
- end if
- end do
-
- end subroutine insert_string_suffix
-
-
- subroutine output_state_for_domain(output_obj, domain, itime)
-
- implicit none
-
- type (io_output_object), intent(inout) :: output_obj
- type (domain_type), intent(inout) :: domain
- integer, intent(in) :: itime
-
- integer :: i, j
- integer :: nCellsGlobal
- integer :: nEdgesGlobal
- integer :: nVerticesGlobal
- integer :: nVertLevelsGlobal
- integer, dimension(:), pointer :: neededCellList
- integer, dimension(:), pointer :: neededEdgeList
- integer, dimension(:), pointer :: neededVertexList
- integer, dimension(:), pointer :: neededVertLevelList
- integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
- cellsOnEdge, verticesOnEdge, edgesOnEdge, cellsOnVertex, edgesOnVertex
- integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
- cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
- cellsOnVertex_save, edgesOnVertex_save
- type (field1dInteger) :: int1d
- type (field2dInteger) :: int2d
- type (field0dReal) :: real0d
- type (field1dReal) :: real1d
- type (field2dReal) :: real2d
- type (field3dReal) :: real3d
- type (field0dChar) :: char0d
- type (field1dChar) :: char1d
-
- integer :: i1, i2, i3, i4
-
- integer, dimension(:), pointer :: super_int1d
- integer, dimension(:,:), pointer :: super_int2d
- real (kind=RKIND) :: super_real0d
- real (kind=RKIND), dimension(:), pointer :: super_real1d
- real (kind=RKIND), dimension(:,:), pointer :: super_real2d
- real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
- character (len=64) :: super_char0d
- character (len=64), dimension(:), pointer :: super_char1d
-
-#include "nondecomp_outputs.inc"
-
- output_obj % time = itime
-
- allocate(int1d % ioinfo)
- allocate(int2d % ioinfo)
- allocate(real0d % ioinfo)
- allocate(real1d % ioinfo)
- allocate(real2d % ioinfo)
- allocate(real3d % ioinfo)
- allocate(char0d % ioinfo)
- allocate(char1d % ioinfo)
-
- call dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nCellsSolve, nCellsGlobal)
- call dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nEdgesSolve, nEdgesGlobal)
- call dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nVerticesSolve, nVerticesGlobal)
- nVertLevelsGlobal = domain % blocklist % mesh % nVertLevels
-
- allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
- allocate(cellsOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(verticesOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
- allocate(edgesOnEdge(2 * domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nEdgesSolve))
- allocate(cellsOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
- allocate(edgesOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
-
-
- !
- ! Convert connectivity information from local to global indices
- !
- do i=1,domain % blocklist % mesh % nCellsSolve
- do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnCell % array(j,i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnCell % array(j,i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnCell % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnCell % array(i)+1,domain % blocklist % mesh % maxEdges
- cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % nEdgesOnCell % array(i))
- end do
- end do
- do i=1,domain % blocklist % mesh % nEdgesSolve
- cellsOnEdge(1,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(1,i))
- cellsOnEdge(2,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(2,i))
- verticesOnEdge(1,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(1,i))
- verticesOnEdge(2,i) = domain % blocklist % mesh % indexToVertexID % array( &
- domain % blocklist % mesh % verticesOnEdge % array(2,i))
- do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnEdge % array(j,i))
- end do
- do j=domain % blocklist % mesh % nEdgesOnEdge % array(i)+1,2*domain % blocklist % mesh % maxEdges
- if(domain % blocklist % mesh % nEdgesOnEdge % array(i) .eq. 0) then
- edgesOnEdge(j,i) = domain % blocklist % mesh % nEdgesSolve + 1
- else
- edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % nEdgesOnEdge % array(i))
- endif
- end do
- end do
- do i=1,domain % blocklist % mesh % nVerticesSolve
- do j=1,domain % blocklist % mesh % vertexDegree
- cellsOnVertex(j,i) = domain % blocklist % mesh % indexToCellID % array( &
- domain % blocklist % mesh % cellsOnVertex % array(j,i))
- edgesOnVertex(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
- domain % blocklist % mesh % edgesOnVertex % array(j,i))
- end do
- end do
-
- if (domain % dminfo % my_proc_id == 0) then
- allocate(neededCellList(nCellsGlobal))
- allocate(neededEdgeList(nEdgesGlobal))
- allocate(neededVertexList(nVerticesGlobal))
- allocate(neededVertLevelList(nVertLevelsGlobal))
- do i=1,nCellsGlobal
- neededCellList(i) = i
- end do
- do i=1,nEdgesGlobal
- neededEdgeList(i) = i
- end do
- do i=1,nVerticesGlobal
- neededVertexList(i) = i
- end do
- do i=1,nVertLevelsGlobal
- neededVertLevelList(i) = i
- end do
- else
- allocate(neededCellList(0))
- allocate(neededEdgeList(0))
- allocate(neededVertexList(0))
- allocate(neededVertLevelList(0))
- end if
-
- if (.not. output_obj % validExchangeLists) then
- call dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nCellsSolve, size(neededCellList), &
- domain % blocklist % mesh % indexToCellID % array, neededCellList, &
- output_obj % sendCellsList, output_obj % recvCellsList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nEdgesSolve, size(neededEdgeList), &
- domain % blocklist % mesh % indexToEdgeID % array, neededEdgeList, &
- output_obj % sendEdgesList, output_obj % recvEdgesList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- domain % blocklist % mesh % nVerticesSolve, size(neededVertexList), &
- domain % blocklist % mesh % indexToVertexID % array, neededVertexList, &
- output_obj % sendVerticesList, output_obj % recvVerticesList)
-
- call dmpar_get_owner_list(domain % dminfo, &
- size(neededVertLevelList), size(neededVertLevelList), &
- neededVertLevelList, neededVertLevelList, &
- output_obj % sendVertLevelsList, output_obj % recvVertLevelsList)
-
- output_obj % validExchangeLists = .true.
- end if
-
- deallocate(neededCellList)
- deallocate(neededEdgeList)
- deallocate(neededVertexList)
-
- cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
- edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
- verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
- cellsOnEdge_save => domain % blocklist % mesh % cellsOnEdge % array
- verticesOnEdge_save => domain % blocklist % mesh % verticesOnEdge % array
- edgesOnEdge_save => domain % blocklist % mesh % edgesOnEdge % array
- cellsOnVertex_save => domain % blocklist % mesh % cellsOnVertex % array
- edgesOnVertex_save => domain % blocklist % mesh % edgesOnVertex % array
-
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
-
-#include "io_output_fields.inc"
-
- domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
- domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
- domain % blocklist % mesh % verticesOnCell % array => verticesOnCell_save
- domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge_save
- domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge_save
- domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge_save
- domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex_save
- domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex_save
-
- deallocate(cellsOnCell)
- deallocate(edgesOnCell)
- deallocate(verticesOnCell)
- deallocate(cellsOnEdge)
- deallocate(verticesOnEdge)
- deallocate(edgesOnEdge)
- deallocate(cellsOnVertex)
- deallocate(edgesOnVertex)
-
-#include "nondecomp_outputs_dealloc.inc"
-
- end subroutine output_state_for_domain
-
-
- subroutine output_state_finalize(output_obj, dminfo)
-
- implicit none
-
- type (io_output_object), intent(inout) :: output_obj
- type (dm_info), intent(in) :: dminfo
-
- call io_output_finalize(output_obj, dminfo)
-
- end subroutine output_state_finalize
-
-
- subroutine io_output_init( output_obj, &
- dminfo, &
- mesh, &
-#include "dim_dummy_args.inc"
- )
-
- implicit none
-
- include 'netcdf.inc'
-
- type (io_output_object), intent(inout) :: output_obj
- type (dm_info), intent(in) :: dminfo
- type (mesh_type), intent(in) :: mesh
-#include "dim_dummy_decls.inc"
-
- integer :: nferr
- integer, dimension(10) :: dimlist
-
- if (dminfo % my_proc_id == 0) then
-#ifdef OFFSET64BIT
- nferr = nf_create(trim(output_obj % filename), ior(NF_CLOBBER,NF_64BIT_OFFSET), output_obj % wr_ncid)
-#else
- nferr = nf_create(trim(output_obj % filename), NF_CLOBBER, output_obj % wr_ncid)
-#endif
-
- nferr = nf_def_dim(output_obj % wr_ncid, 'StrLen', 64, output_obj % wrDimIDStrLen)
-#include "netcdf_def_dims_vars.inc"
-
- if (mesh % on_a_sphere) then
- nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'YES ')
- else
- nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'NO ')
- end if
- if (RKIND == 8) then
- nferr = nf_put_att_double(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_DOUBLE, 1, mesh % sphere_radius)
- else
- nferr = nf_put_att_real(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_FLOAT, 1, mesh % sphere_radius)
- end if
-
- nferr = nf_enddef(output_obj % wr_ncid)
- end if
-
- end subroutine io_output_init
-
-
- subroutine io_output_field0dReal(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = 1
- count1(1) = 1
-
-#include "output_field0dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field0dReal
-
-
- subroutine io_output_field1dReal(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "output_field1dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, VarID, start1, count1, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, VarID, start1, count1, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dReal
-
-
- subroutine io_output_field2dReal(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "output_field2dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field2dReal
-
-
- subroutine io_output_field3dReal(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = field % ioinfo % start(3)
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = field % ioinfo % count(3)
-
-#include "output_field3dreal.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field3dReal
-
-
- subroutine io_output_field0dReal_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = output_obj % time
- count1(1) = 1
-
-#include "output_field0dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field0dReal_time
-
-
- subroutine io_output_field1dReal_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = output_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "output_field1dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dReal_time
-
-
- subroutine io_output_field2dReal_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start3, count3
-
- start3(1) = field % ioinfo % start(1)
- start3(2) = field % ioinfo % start(2)
- start3(3) = output_obj % time
- count3(1) = field % ioinfo % count(1)
- count3(2) = field % ioinfo % count(2)
- count3(3) = 1
-
-#include "output_field2dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field2dReal_time
-
-
- subroutine io_output_field3dReal_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field3dReal), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(4) :: start4, count4
-
- start4(1) = field % ioinfo % start(1)
- start4(2) = field % ioinfo % start(2)
- start4(3) = field % ioinfo % start(3)
- start4(4) = output_obj % time
- count4(1) = field % ioinfo % count(1)
- count4(2) = field % ioinfo % count(2)
- count4(3) = field % ioinfo % count(3)
- count4(4) = 1
-
-#include "output_field3dreal_time.inc"
-
-#if (RKIND == 8)
- nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start4, count4, field % array)
-#else
- nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start4, count4, field % array)
-#endif
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field3dReal_time
-
-
- subroutine io_output_field1dInteger(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(1) :: start1, count1
-
- start1(1) = field % ioinfo % start(1)
- count1(1) = field % ioinfo % count(1)
-
-#include "output_field1dinteger.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start1, count1, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dInteger
-
-
- subroutine io_output_field2dInteger(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field2dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = field % ioinfo % start(2)
- count2(1) = field % ioinfo % count(1)
- count2(2) = field % ioinfo % count(2)
-
-#include "output_field2dinteger.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field2dInteger
-
-
- subroutine io_output_field1dInteger_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dInteger), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start2, count2
-
- start2(1) = field % ioinfo % start(1)
- start2(2) = output_obj % time
- count2(1) = field % ioinfo % count(1)
- count2(2) = 1
-
-#include "output_field1dinteger_time.inc"
-
- nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dInteger_time
-
-
- subroutine io_output_field0dChar_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = output_obj % time
- count1(2) = 1
-
-#include "output_field0dchar_time.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field0dChar_time
-
-
- subroutine io_output_field1dChar_time(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(3) :: start2, count2
-
- start2(1) = 1
- start2(2) = field % ioinfo % start(1)
- start2(3) = output_obj % time
- count2(1) = 64
- count2(2) = field % ioinfo % count(1)
- count2(3) = 1
-
-#include "output_field1dchar_time.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start2, count2, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dChar_time
-
-
- subroutine io_output_field0dChar(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field0dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = 1
- count1(2) = 1
-
-#include "output_field0dchar.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field0dChar
-
-
- subroutine io_output_field1dChar(output_obj, field)
-
- implicit none
-
- type (io_output_object), intent(in) :: output_obj
- type (field1dChar), intent(inout) :: field
-
- include 'netcdf.inc'
-
- integer :: nferr
- integer :: varID
- integer, dimension(2) :: start1, count1
-
- start1(1) = 1
- count1(1) = 64
- start1(2) = field % ioinfo % start(1)
- count1(2) = field % ioinfo % count(1)
-
-#include "output_field1dchar.inc"
-
- nferr = nf_put_vara_text(output_obj % wr_ncid, VarID, start1, count1, field % array)
-
- nferr = nf_sync(output_obj % wr_ncid)
-
- end subroutine io_output_field1dChar
-
-
- subroutine io_output_finalize(output_obj, dminfo)
-
- implicit none
-
- include 'netcdf.inc'
-
- type (io_output_object), intent(inout) :: output_obj
- type (dm_info), intent(in) :: dminfo
-
- integer :: nferr
-
- if (dminfo % my_proc_id == 0) then
- nferr = nf_close(output_obj % wr_ncid)
- end if
-
- end subroutine io_output_finalize
-
-end module io_output
-
Deleted: branches/source_renaming/src/framework/module_mpas_framework.F
===================================================================
--- branches/source_renaming/src/framework/module_mpas_framework.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_mpas_framework.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,49 +0,0 @@
-module mpas_framework
-
- use dmpar
- use grid_types
- use io_input
- use io_output
- use configure
- use timer
- use mpas_timekeeping
-
-
- contains
-
-
- subroutine mpas_framework_init(dminfo, domain)
-
- implicit none
-
- type (dm_info), pointer :: dminfo
- type (domain_type), pointer :: domain
-
- allocate(dminfo)
- call dmpar_init(dminfo)
-
- call read_namelist(dminfo)
-
- call allocate_domain(domain, dminfo)
-
- call mpas_timekeeping_init(config_calendar_type)
-
- end subroutine mpas_framework_init
-
-
- subroutine mpas_framework_finalize(dminfo, domain)
-
- implicit none
-
- type (dm_info), pointer :: dminfo
- type (domain_type), pointer :: domain
-
- call deallocate_domain(domain)
-
- call dmpar_finalize(dminfo)
-
- call mpas_timekeeping_finalize()
-
- end subroutine mpas_framework_finalize
-
-end module mpas_framework
Deleted: branches/source_renaming/src/framework/module_mpas_timekeeping.F
===================================================================
--- branches/source_renaming/src/framework/module_mpas_timekeeping.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_mpas_timekeeping.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,1625 +0,0 @@
-module mpas_timekeeping
-
- use ESMF_BaseMod
- use ESMF_Stubs
- use ESMF_CalendarMod
- use ESMF_ClockMod
- use ESMF_TimeMod
- use ESMF_TimeIntervalMod
-
- private :: MPAS_CalibrateAlarms
- private :: MPAS_inRingingEnvelope
-
- integer, parameter :: MPAS_MAX_ALARMS = 20
- integer, parameter :: MPAS_NOW = 0, &
- MPAS_START_TIME = 1, &
- MPAS_STOP_TIME = 2
- integer, parameter :: MPAS_FORWARD = 1, &
- MPAS_BACKWARD = -1
- integer, parameter :: MPAS_GREGORIAN = 0, &
- MPAS_GREGORIAN_NOLEAP = 1, &
- MPAS_360DAY = 2
-
- integer :: TheCalendar
-
- integer, dimension(12), parameter :: daysInMonth = (/31,28,31,30,31,30,31,31,30,31,30,31/)
- integer, dimension(12), parameter :: daysInMonthLeap = (/31,29,31,30,31,30,31,31,30,31,30,31/)
-
-
- type MPAS_Time_type
- type (ESMF_Time) :: t
- end type
-
- type MPAS_TimeInterval_type
- type (ESMF_TimeInterval) :: ti
- end type
-
- type MPAS_Alarm_type
- integer :: alarmID
- logical :: isRecurring
- logical :: isSet
- type (MPAS_Time_type) :: ringTime
- type (MPAS_Time_type) :: prevRingTime
- type (MPAS_TimeInterval_type) :: ringTimeInterval
- type (MPAS_Alarm_type), pointer :: next
- end type
-
- type MPAS_Clock_type
- integer :: direction
- integer :: nAlarms
- type (ESMF_Clock) :: c
- type (MPAS_Alarm_type), pointer :: alarmListHead
- end type
-
- interface operator (+)
- module procedure add_t_ti
- module procedure add_ti_ti
- end interface
-
- interface operator (-)
- module procedure sub_t_t
- module procedure sub_t_ti
- module procedure sub_ti_ti
- module procedure neg_ti
- end interface
-
- interface operator (*)
- module procedure mul_ti_n
- end interface
-
- interface operator (/)
- module procedure div_ti_n
- end interface
-
- interface operator (.EQ.)
- module procedure eq_t_t
- module procedure eq_ti_ti
- end interface
-
- interface operator (.NE.)
- module procedure ne_t_t
- module procedure ne_ti_ti
- end interface
-
- interface operator (.LT.)
- module procedure lt_t_t
- module procedure lt_ti_ti
- end interface
-
- interface operator (.GT.)
- module procedure gt_t_t
- module procedure gt_ti_ti
- end interface
-
- interface operator (.LE.)
- module procedure le_t_t
- module procedure le_ti_ti
- end interface
-
- interface operator (.GE.)
- module procedure ge_t_t
- module procedure ge_ti_ti
- end interface
-
- interface abs
- module procedure abs_ti
- end interface
-
-
- contains
-
-
- subroutine mpas_timekeeping_init(calendar)
-
- implicit none
-
- integer, intent(in) :: calendar
-
- TheCalendar = calendar
-
- if (TheCalendar == MPAS_GREGORIAN) then
- call ESMF_Initialize(defaultCalendar=ESMF_CAL_GREGORIAN)
- else if (TheCalendar == MPAS_GREGORIAN_NOLEAP) then
- call ESMF_Initialize(defaultCalendar=ESMF_CAL_NOLEAP)
- else if (TheCalendar == MPAS_360DAY) then
- call ESMF_Initialize(defaultCalendar=ESMF_CAL_360DAY)
- else
- write(0,*) 'ERROR: mpas_timekeeping_init: Invalid calendar type'
- end if
-
- end subroutine mpas_timekeeping_init
-
-
- subroutine mpas_timekeeping_finalize()
-
- implicit none
-
- call ESMF_Finalize()
-
- end subroutine mpas_timekeeping_finalize
-
-
- subroutine MPAS_createClock(clock, startTime, timeStep, stopTime, runDuration, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(out) :: clock
- type (MPAS_Time_type), intent(in) :: startTime
- type (MPAS_TimeInterval_type), intent(in) :: timeStep
- type (MPAS_Time_type), intent(in), optional :: stopTime
- type (MPAS_TimeInterval_type), intent(in), optional :: runDuration
- integer, intent(out), optional :: ierr
-
- type (MPAS_Time_type) :: stop_time
-
- if (present(runDuration)) then
- stop_time = startTime + runDuration
- if (present(stopTime)) then
- if (stopTime /= stop_time) then
- if (present(ierr)) ierr = 1 ! stopTime and runDuration are inconsistent
- write(0,*) 'ERROR: MPAS_createClock: stopTime and runDuration are inconsistent'
- return
- end if
- end if
- else if (present(stopTime)) then
- stop_time = stopTime
- else
- if (present(ierr)) ierr = 1 ! neither stopTime nor runDuration are specified
- write(0,*) 'ERROR: MPAS_createClock: neither stopTime nor runDuration are specified'
- return
- end if
-
- clock % c = ESMF_ClockCreate(TimeStep=timeStep%ti, StartTime=startTime%t, StopTime=stop_time%t, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
- clock % direction = MPAS_FORWARD
- clock % nAlarms = 0
- nullify(clock % alarmListHead)
-
- end subroutine MPAS_createClock
-
-
- subroutine MPAS_destroyClock(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- integer, intent(out), optional :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
- clock % alarmListHead => alarmPtr % next
- deallocate(alarmPtr)
- alarmPtr => clock % alarmListHead
- end do
-
- call ESMF_ClockDestroy(clock % c, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_destroyClock
-
-
- logical function MPAS_isClockStartTime(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out), optional :: ierr
-
- type (ESMF_Time) :: currTime, startTime, stopTime
-
- call ESMF_ClockGet(clock % c, CurrTime=currTime, rc=ierr)
- call ESMF_ClockGet(clock % c, StartTime=startTime, rc=ierr)
- call ESMF_ClockGet(clock % c, StopTime=stopTime, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- if (startTime <= stopTime) then
- MPAS_isClockStartTime = (currTime <= startTime)
- else
- MPAS_isClockStartTime = (currTime >= startTime)
- end if
-
- end function MPAS_isClockStartTime
-
-
- logical function MPAS_isClockStopTime(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out), optional :: ierr
-
- type (ESMF_Time) :: currTime, startTime, stopTime
-
- call ESMF_ClockGet(clock % c, CurrTime=currTime, rc=ierr)
- call ESMF_ClockGet(clock % c, StartTime=startTime, rc=ierr)
- call ESMF_ClockGet(clock % c, StopTime=stopTime, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- if (startTime <= stopTime) then
- MPAS_isClockStopTime = (currTime >= stopTime)
- else
- MPAS_isClockStopTime = (currTime <= stopTime)
- end if
-
- end function MPAS_isClockStopTime
-
-
- subroutine MPAS_setClockDirection(clock, direction, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- integer, intent(in) :: direction
- integer, intent(out), optional :: ierr
-
- type (MPAS_TimeInterval_type) :: timeStep
-
- if (direction == MPAS_FORWARD .and. clock % direction == MPAS_FORWARD) return
- if (direction == MPAS_BACKWARD .and. clock % direction == MPAS_BACKWARD) return
-
- clock % direction = direction
- call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr)
- timeStep = neg_ti(timeStep)
- call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr)
-
- ! specify a valid previousRingTime for each alarm
- call MPAS_CalibrateAlarms(clock, ierr);
-
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_setClockDirection
-
-
-
- integer function MPAS_getClockDirection(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out), optional :: ierr
-
- if (present(ierr)) ierr = 0
-
- MPAS_getClockDirection = clock % direction
-
- end function MPAS_getClockDirection
-
-
- subroutine MPAS_setClockTimeStep(clock, timeStep, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- type (MPAS_TimeInterval_type), intent(in) :: timeStep
- integer, intent(out), optional :: ierr
-
- call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_setClockTimeStep
-
-
- type (MPAS_TimeInterval_type) function MPAS_getClockTimeStep(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out), optional :: ierr
-
- type (MPAS_TimeInterval_type) :: timeStep
-
- call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- MPAS_getClockTimeStep = timeStep
-
- end function MPAS_getClockTimeStep
-
-
- subroutine MPAS_advanceClock(clock, timeStep, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- type (MPAS_TimeInterval_type), intent(in), optional :: timeStep
- integer, intent(out), optional :: ierr
-
- type (ESMF_TimeInterval) :: time_step
-
- if (present(timeStep)) then
- call ESMF_ClockGet(clock % c, TimeStep=time_step, rc=ierr)
- call ESMF_ClockSet(clock % c, TimeStep=timeStep % ti, rc=ierr)
- call ESMF_ClockAdvance(clock % c, rc=ierr)
- call ESMF_ClockSet(clock % c, TimeStep=time_step, rc=ierr)
- else
- call ESMF_ClockAdvance(clock % c, rc=ierr)
- end if
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_advanceClock
-
-
- subroutine MPAS_setClockTime(clock, clock_time, whichTime, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- type (MPAS_Time_type), intent(in) :: clock_time
- integer, intent(in) :: whichTime
- integer, intent(out), optional :: ierr
-
- if (whichTime == MPAS_NOW) then
- call ESMF_ClockSet(clock % c, CurrTime=clock_time%t, rc=ierr)
- call MPAS_CalibrateAlarms(clock, ierr);
- else if (whichTime == MPAS_START_TIME) then
- call ESMF_ClockSet(clock % c, StartTime=clock_time%t, rc=ierr)
- else if (whichTime == MPAS_STOP_TIME) then
- call ESMF_ClockSet(clock % c, StopTime=clock_time%t, rc=ierr)
- else if (present(ierr)) then
- ierr = 1
- end if
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_setClockTime
-
-
- type (MPAS_Time_type) function MPAS_getClockTime(clock, whichTime, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(in) :: whichTime
- integer, intent(out), optional :: ierr
-
- type (MPAS_Time_type) :: clock_time
-
- if (whichTime == MPAS_NOW) then
- call ESMF_ClockGet(clock % c, CurrTime=clock_time%t, rc=ierr)
- else if (whichTime == MPAS_START_TIME) then
- call ESMF_ClockGet(clock % c, StartTime=clock_time%t, rc=ierr)
- else if (whichTime == MPAS_STOP_TIME) then
- call ESMF_ClockGet(clock % c, StopTime=clock_time%t, rc=ierr)
- else if (present(ierr)) then
- ierr = 1
- end if
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- MPAS_getClockTime = clock_time
-
- end function MPAS_getClockTime
-
-
- subroutine MPAS_addClockAlarm(clock, alarmID, alarmTime, alarmTimeInterval, ierr)
-! TODO: possibly add a stop time for recurring alarms
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- integer, intent(in) :: alarmID
- type (MPAS_Time_type), intent(in) :: alarmTime
- type (MPAS_TimeInterval_type), intent(in), optional :: alarmTimeInterval
- integer, intent(out), optional :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- ! Add a new entry to the linked list of alarms for this clock
- if (.not. associated(clock % alarmListHead)) then
- allocate(clock % alarmListHead)
- nullify(clock % alarmListHead % next)
- alarmPtr => clock % alarmListHead
- else
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr % next))
- if (alarmPtr % alarmID == alarmID) then
- write(0,*) 'OOPS -- we have a duplicate alarmID', alarmID
- if (present(ierr)) ierr = 1
- return
- end if
- alarmPtr => alarmPtr % next
- end do
- if (alarmPtr % alarmID == alarmID) then
- write(0,*) 'OOPS -- we have a duplicate alarmID', alarmID
- if (present(ierr)) ierr = 1
- return
- end if
- allocate(alarmPtr % next)
- alarmPtr => alarmPtr % next
- nullify(alarmPtr % next)
- end if
-
- alarmPtr % alarmID = alarmID
-
- clock % nAlarms = clock % nAlarms + 1
-
- alarmPtr % isSet = .true.
- alarmPtr % ringTime = alarmTime
-
-
- if (present(alarmTimeInterval)) then
- alarmPtr % isRecurring = .true.
- alarmPtr % ringTimeInterval = alarmTimeInterval
- if(clock % direction == MPAS_FORWARD) then
- alarmPtr % prevRingTime = alarmTime - alarmTimeInterval
- else
- alarmPtr % prevRingTime = alarmTime + alarmTimeInterval
- end if
- else
- alarmPtr % isRecurring = .false.
- alarmPtr % prevRingTime = alarmTime
- end if
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_addClockAlarm
-
-
- subroutine MPAS_removeClockAlarm(clock, alarmID, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- integer, intent(in) :: alarmID
- integer, intent(out), optional :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
- type (MPAS_Alarm_type), pointer :: alarmParentPtr
-
- if (present(ierr)) ierr = 0
-
- alarmPtr => clock % alarmListHead
- alarmParentPtr = alarmPtr
- do while (associated(alarmPtr))
- if (alarmPtr % alarmID == alarmID) then
- alarmParentPtr % next => alarmPtr % next
- deallocate(alarmPtr)
- exit
- end if
- alarmParentPtr = alarmPtr
- alarmPtr => alarmPtr % next
- end do
-
- end subroutine MPAS_removeClockAlarm
-
-
-
- subroutine MPAS_printAlarm(clock, alarmID, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(in) :: alarmID
- integer, intent(out) :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- type (MPAS_TimeInterval_type) :: alarmTimeInterval
- type (MPAS_Time_type) :: alarmTime
- character (len=32) :: printString
-
- ierr = 0
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
- if (alarmPtr % alarmID == alarmID) then
- write(0,*) 'ALARM ', alarmID
-
- write(0,*) 'isRecurring', alarmPtr % isRecurring
-
- write(0,*) 'isSet', alarmPtr % isSet
-
- call MPAS_getTime(alarmPtr % ringTime, dateTimeString=printString, ierr=ierr)
- write(0,*) 'ringTime', printString
-
- call MPAS_getTime(alarmPtr % prevRingTime, dateTimeString=printString, ierr=ierr)
- write(0,*) 'prevRingTime', printString
-
- call MPAS_getTimeInterval(alarmPtr % ringTimeInterval, timeString=printString, ierr=ierr)
- write(0,*) 'ringTimeInterval', printString
-
- exit
- end if
- alarmPtr => alarmPtr % next
- end do
-
- end subroutine MPAS_printAlarm
-
-
-
- logical function MPAS_isAlarmRinging(clock, alarmID, interval, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(in) :: alarmID
- type (MPAS_TimeInterval_type), intent(in), optional :: interval
- integer, intent(out), optional :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- if (present(ierr)) ierr = 0
-
- MPAS_isAlarmRinging = .false.
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
- if (alarmPtr % alarmID == alarmID) then
- if (alarmPtr % isSet) then
- if (MPAS_inRingingEnvelope(clock, alarmPtr, interval, ierr)) then
- MPAS_isAlarmRinging = .true.
- end if
- end if
- exit
- end if
- alarmPtr => alarmPtr % next
- end do
-
- end function MPAS_isAlarmRinging
-
-
-
- subroutine MPAS_getClockRingingAlarms(clock, nAlarms, alarmList, interval, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out) :: nAlarms
- integer, dimension(MPAS_MAX_ALARMS), intent(out) :: alarmList
- type (MPAS_TimeInterval_type), intent(in), optional :: interval
- integer, intent(out), optional :: ierr
-
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- if (present(ierr)) ierr = 0
-
- nAlarms = 0
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
- if (alarmPtr % isSet) then
- if (MPAS_inRingingEnvelope(clock, alarmPtr, interval, ierr)) then
- nAlarms = nAlarms + 1
- alarmList(nAlarms) = alarmPtr % alarmID
- end if
- end if
- alarmPtr => alarmPtr % next
- end do
-
- end subroutine MPAS_getClockRingingAlarms
-
-
- logical function MPAS_inRingingEnvelope(clock, alarmPtr, interval, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- type (MPAS_Alarm_type), pointer, intent(in) :: alarmPtr
- type (MPAS_TimeInterval_type), intent(in), optional :: interval
- integer, intent(out), optional :: ierr
-
- type (MPAS_Time_type) :: alarmNow
- type (MPAS_Time_type) :: alarmThreshold
-
- alarmNow = MPAS_getClockTime(clock, MPAS_NOW, ierr)
- alarmThreshold = alarmPtr % ringTime
-
- MPAS_inRingingEnvelope = .false.
-
- if(clock % direction == MPAS_FORWARD) then
-
- if (present(interval)) then
- alarmNow = alarmNow + interval;
- end if
-
- if (alarmPtr % isRecurring) then
- alarmThreshold = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
- end if
-
- if (alarmThreshold <= alarmNow) then
- MPAS_inRingingEnvelope = .true.
- end if
- else
-
- if (present(interval)) then
- alarmNow = alarmNow - interval;
- end if
-
- if (alarmPtr % isRecurring) then
- alarmThreshold = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
- end if
-
- if (alarmThreshold >= alarmNow) then
- MPAS_inRingingEnvelope = .true.
- end if
- end if
-
- end function MPAS_inRingingEnvelope
-
-
-
- subroutine MPAS_resetClockAlarm(clock, alarmID, interval, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(inout) :: clock
- integer, intent(in) :: alarmID
- type (MPAS_TimeInterval_type), intent(in), optional :: interval
- integer, intent(out), optional :: ierr
-
- type (MPAS_Time_type) :: alarmNow
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- if (present(ierr)) ierr = 0
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
-
- if (alarmPtr % alarmID == alarmID) then
-
- if (MPAS_inRingingEnvelope(clock, alarmPtr, interval, ierr)) then
-
- if (.not. alarmPtr % isRecurring) then
- alarmPtr % isSet = .false.
- else
- alarmNow = MPAS_getClockTime(clock, MPAS_NOW, ierr)
-
- if(clock % direction == MPAS_FORWARD) then
- if (present(interval)) then
- alarmNow = alarmNow + interval
- end if
-
- do while(alarmPtr % prevRingTime <= alarmNow)
- alarmPtr % prevRingTime = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
- end do
- alarmPtr % prevRingTime = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
- else
- if (present(interval)) then
- alarmNow = alarmNow - interval
- end if
-
- do while(alarmPtr % prevRingTime >= alarmNow)
- alarmPtr % prevRingTime = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
- end do
- alarmPtr % prevRingTime = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
- end if
- end if
- end if
- exit
- end if
- alarmPtr => alarmPtr % next
- end do
-
- end subroutine MPAS_resetClockAlarm
-
-
-
- ! specify a valid previousRingTime for each alarm
- subroutine MPAS_CalibrateAlarms(clock, ierr)
-
- implicit none
-
- type (MPAS_Clock_type), intent(in) :: clock
- integer, intent(out), optional :: ierr
-
- type (MPAS_Time_type) :: now
- type (MPAS_Time_type) :: previousRingTime
- type (MPAS_Time_type) :: negativeNeighborRingTime
- type (MPAS_Time_type) :: positiveNeighborRingTime
- type (MPAS_TimeInterval_type) :: ringTimeInterval
- type (MPAS_Alarm_type), pointer :: alarmPtr
-
- now = MPAS_getClockTime(clock, MPAS_NOW, ierr)
-
- alarmPtr => clock % alarmListHead
- do while (associated(alarmPtr))
-
- if (.not. alarmPtr % isRecurring) then
- alarmPtr % isSet = .true.
- else
-
- previousRingTime = alarmPtr % prevRingTime
-
- if (previousRingTime <= now) then
-
- do while(previousRingTime <= now)
- previousRingTime = previousRingTime + alarmPtr % ringTimeInterval
- end do
- positiveNeighborRingTime = previousRingTime
-
- do while(previousRingTime >= now)
- previousRingTime = previousRingTime - alarmPtr % ringTimeInterval
- end do
- negativeNeighborRingTime = previousRingTime
-
- else
-
- do while(previousRingTime >= now)
- previousRingTime = previousRingTime - alarmPtr % ringTimeInterval
- end do
- negativeNeighborRingTime = previousRingTime
-
- do while(previousRingTime <= now)
- previousRingTime = previousRingTime + alarmPtr % ringTimeInterval
- end do
- positiveNeighborRingTime = previousRingTime
-
- end if
-
- if (clock % direction == MPAS_FORWARD) then
- alarmPtr % prevRingTime = negativeNeighborRingTime
- else
- alarmPtr % prevRingTime = positiveNeighborRingTime
- end if
-
- end if
-
- alarmPtr => alarmPtr % next
-
- end do
-
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_CalibrateAlarms
-
-
- subroutine MPAS_setTime(curr_time, YYYY, MM, DD, DoY, H, M, S, S_n, S_d, dateTimeString, ierr)
-
- implicit none
-
- type (MPAS_Time_type), intent(out) :: curr_time
- integer, intent(in), optional :: YYYY
- integer, intent(in), optional :: MM
- integer, intent(in), optional :: DD
- integer, intent(in), optional :: DoY
- integer, intent(in), optional :: H
- integer, intent(in), optional :: M
- integer, intent(in), optional :: S
- integer, intent(in), optional :: S_n
- integer, intent(in), optional :: S_d
- character (len=*), intent(in), optional :: dateTimeString
- integer, intent(out), optional :: ierr
-
- integer, parameter :: integerMaxDigits = 8
- integer :: year, month, day, hour, min, sec
- integer :: numerator, denominator, denominatorPower
-
- character (len=50) :: dateTimeString_
- character (len=50) :: dateSubString
- character (len=50) :: timeSubString
- character (len=50) :: secDecSubString
- character(len=50), pointer, dimension(:) :: subStrings
-
- if (present(dateTimeString)) then
-
- dateTimeString_ = dateTimeString
- numerator = 0
- denominator = 1
-
- call SplitString(dateTimeString_, ".", subStrings)
- if (size(subStrings) == 2) then ! contains second decimals
- dateTimeString_ = subStrings(1)
- secDecSubString = subStrings(2)(:integerMaxDigits)
- deallocate(subStrings)
- denominatorPower = len_trim(secDecSubString)
- if(denominatorPower > 0) then
- read(secDecSubString,*) numerator
- if(numerator > 0) then
- denominator = 10**denominatorPower
- end if
- end if
- else if (size(subStrings) /= 1) then
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid DateTime string', dateTimeString
- return
- end if
-
- call SplitString(dateTimeString_, "_", subStrings)
-
- if(size(subStrings) == 2) then ! contains a date and time
- dateSubString = subStrings(1)
- timeSubString = subStrings(2)
- deallocate(subStrings)
-
- call SplitString(timeSubString, ":", subStrings)
-
- if (size(subStrings) == 3) then
- read(subStrings(1),*) hour
- read(subStrings(2),*) min
- read(subStrings(3),*) sec
- deallocate(subStrings)
- else
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid DateTime string (invalid time substring)', dateTimeString
- return
- end if
-
- else if(size(subStrings) == 1) then ! contains only a date- assume all time values are 0
- dateSubString = subStrings(1)
- deallocate(subStrings)
-
- hour = 0
- min = 0
- sec = 0
-
- else
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid DateTime string', dateTimeString
- return
- end if
-
- call SplitString(dateSubString, "-", subStrings)
-
- if (size(subStrings) == 3) then
- read(subStrings(1),*) year
- read(subStrings(2),*) month
- read(subStrings(3),*) day
- deallocate(subStrings)
- else
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid DateTime string (invalid date substring)', dateTimeString
- return
- end if
-
- call ESMF_TimeSet(curr_time % t, YY=year, MM=month, DD=day, H=hour, M=min, S=sec, Sn=numerator, Sd=denominator, rc=ierr)
-
- else
-
- if (present(DoY)) then
- call getMonthDay(YYYY, DoY, month, day)
-
- ! consistency check
- if (present(MM)) then
- if (MM /= month) then
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: MPAS_setTime : DoY and MM are inconsistent - using DoY'
- end if
- end if
- if (present(DD)) then
- if (DD /= day) then
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: MPAS_setTime : DoY and DD are inconsistent - using DoY'
- end if
- end if
- else
- if (present(MM)) then
- month = MM
- else
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: MPAS_setTime : Neither DoY nor MM are specified'
- return
- end if
-
- if (present(DD)) then
- day = DD
- else
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: MPAS_setTime : Neither DoY nor DD are specified'
- return
- end if
- end if
-
- if (.not. isValidDate(YYYY,month,day)) then
- write(0,*) 'ERROR: MPAS_setTime : Invalid date'
- return
- end if
-
- call ESMF_TimeSet(curr_time % t, YY=YYYY, MM=month, DD=day, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
-
- end if
-
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_setTime
-
-
- subroutine MPAS_getTime(curr_time, YYYY, MM, DD, DoY, H, M, S, S_n, S_d, dateTimeString, ierr)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: curr_time
- integer, intent(out), optional :: YYYY
- integer, intent(out), optional :: MM
- integer, intent(out), optional :: DD
- integer, intent(out), optional :: DoY
- integer, intent(out), optional :: H
- integer, intent(out), optional :: M
- integer, intent(out), optional :: S
- integer, intent(out), optional :: S_n
- integer, intent(out), optional :: S_d
- character (len=32), intent(out), optional :: dateTimeString
- integer, intent(out), optional :: ierr
-
- call ESMF_TimeGet(curr_time % t, YY=YYYY, MM=MM, DD=DD, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
- call ESMF_TimeGet(curr_time % t, dayOfYear=DoY, rc=ierr)
- call ESMF_TimeGet(curr_time % t, timeString=dateTimeString, rc=ierr)
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_getTime
-
-
- subroutine MPAS_setTimeInterval(interval, DD, H, M, S, S_n, S_d, timeString, dt, ierr)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(out) :: interval
- integer, intent(in), optional :: DD
- integer, intent(in), optional :: H
- integer, intent(in), optional :: M
- integer, intent(in), optional :: S
- integer, intent(in), optional :: S_n
- integer, intent(in), optional :: S_d
- character (len=*), intent(in), optional :: timeString
- real (kind=RKIND), intent(in), optional :: dt
- integer, intent(out), optional :: ierr
-
- integer, parameter :: integerMaxDigits = 8
- integer :: days, hours, minutes, seconds
- integer :: numerator, denominator, denominatorPower
- type (MPAS_TimeInterval_type) :: zeroInterval
-
- integer :: day, hour, min, sec
- character (len=50) :: timeString_
- character (len=50) :: daySubString
- character (len=50) :: timeSubString
- character (len=50) :: secDecSubString
- character(len=50), pointer, dimension(:) :: subStrings
-
-! if (present(DD)) then
-! days = DD
-! else
-! days = 0
-! end if
-
-! if (present(H)) then
-! hours = H
-! else
-! hours = 0
-! end if
-
-! if (present(M)) then
-! minutes = M
-! else
-! minutes = 0
-! end if
-
-! if (present(S)) then
-! seconds = S
-! else
-! seconds = 0
-! end if
-
-
- !
- ! Reduce minute count to something less than one hour
- !
-! do while (minutes > 1440)
-! days = days + 1
-! minutes = minutes - 1440
-! end do
-! do while (minutes > 60)
-! hours = hours + 1
-! minutes = minutes - 60
-! end do
-! do while (minutes < -1440)
-! days = days - 1
-! minutes = minutes + 1440
-! end do
-! do while (minutes < -60)
-! hours = hours - 1
-! minutes = minutes + 60
-! end do
-
- !
- ! Reduce hour count to something less than one day
- !
-! do while (hours > 24)
-! days = days + 1
-! hours = hours - 24
-! end do
-! do while (hours < -24)
-! days = days - 1
-! hours = hours + 24
-! end do
-
- !
- ! Any leftover minutes and hours are given to the second count
- !
-! seconds = seconds + hours*3600 + minutes*60
-
-! call ESMF_TimeIntervalSet(interval % ti, D=days, S=seconds, Sn=S_n, Sd=S_d, rc=ierr)
-
-
- if (present(timeString) .or. present(dt)) then
-
-
- if(present(dt)) then
- write (timeString_,*) "00:00:", dt
- else
- timeString_ = timeString
- end if
-
- numerator = 0
- denominator = 1
-
- call SplitString(timeString_, ".", subStrings)
-
- if (size(subStrings) == 2) then ! contains second decimals
- timeString_ = subStrings(1)
- secDecSubString = subStrings(2)(:integerMaxDigits)
- deallocate(subStrings)
-
- denominatorPower = len_trim(secDecSubString)
- if(denominatorPower > 0) then
- read(secDecSubString,*) numerator
- if(numerator > 0) then
- denominator = 10**denominatorPower
- end if
- end if
- else if (size(subStrings) /= 1) then
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid TimeInterval string', timeString
- return
- end if
-
- call SplitString(timeString_, "_", subStrings)
-
- if(size(subStrings) == 2) then ! contains a day and time
- daySubString = subStrings(1)
- timeSubString = subStrings(2)
- deallocate(subStrings)
- read(daySubString,*) day
- else if(size(subStrings) == 1) then ! contains only a time- assume day is 0
- timeSubString = subStrings(1)
- deallocate(subStrings)
- day = 0
- else
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid TimeInterval string', timeString
- return
- end if
-
- call SplitString(timeSubString, ":", subStrings)
-
- if (size(subStrings) == 3) then
- read(subStrings(1),*) hour
- read(subStrings(2),*) min
- read(subStrings(3),*) sec
- deallocate(subStrings)
- else
- deallocate(subStrings)
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: Invalid TimeInterval string (invalid time substring)', timeString
- return
- end if
-
- call ESMF_TimeIntervalSet(interval % ti, D=day, H=hour, M=min, S=sec, Sn=numerator, Sd=denominator, rc=ierr)
-
- else
-
- call ESMF_TimeIntervalSet(interval % ti, D=DD, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
-
- end if
-
- ! verify that time interval is positive
- call ESMF_TimeIntervalSet(zeroInterval % ti, D=0, H=0, M=0, S=0, rc=ierr)
-
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- if (interval <= zeroInterval) then
- if (present(ierr)) ierr = 1
- write(0,*) 'ERROR: TimeInterval must be greater than 0', timeString !'ERROR: TimeInterval cannot be negative'
- end if
-
-
-
- end subroutine MPAS_setTimeInterval
-
-
- subroutine MPAS_getTimeInterval(interval, DD, H, M, S, S_n, S_d, timeString, dt, ierr)
-! TODO: add double-precision seconds
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: interval
- integer, intent(out), optional :: DD
- integer, intent(out), optional :: H
- integer, intent(out), optional :: M
- integer, intent(out), optional :: S
- integer, intent(out), optional :: S_n
- integer, intent(out), optional :: S_d
- character (len=32), intent(out), optional :: timeString
- real (kind=RKIND), intent(out), optional :: dt
- integer, intent(out), optional :: ierr
-
- integer :: days, seconds, sn, sd
-
- call ESMF_TimeIntervalGet(interval % ti, D=days, S=seconds, Sn=sn, Sd=sd, rc=ierr)
-
- if (present(dt)) then
- dt = (days * 24 * 60 * 60) + seconds + (sn / sd)
- end if
-
- if (present(DD)) then
- DD = days
- days = 0
- end if
-
- if (present(H)) then
- H = (seconds - mod(seconds,3600)) / 3600
- seconds = seconds - H*3600
- H = H + days * 24
- days = 0
- end if
-
- if (present(M)) then
- M = (seconds - mod(seconds,60)) / 60
- seconds = seconds - M*60
- M = M + days * 1440
- days = 0
- end if
-
- if (present(S)) then
- S = seconds
- end if
-
- if (present(S_n)) then
- S_n = sn
- end if
-
- if (present(S_d)) then
- S_d = sd
- end if
-
- if (present(timeString)) then
- call ESMF_TimeIntervalGet(interval % ti, timeString=timeString, rc=ierr)
- end if
-
- if (present(ierr)) then
- if (ierr == ESMF_SUCCESS) ierr = 0
- end if
-
- end subroutine MPAS_getTimeInterval
-
-
- type (MPAS_Time_type) function add_t_ti(t, ti)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t
- type (MPAS_TimeInterval_type), intent(in) :: ti
-
- add_t_ti % t = t % t + ti % ti
-
- end function add_t_ti
-
-
- type (MPAS_TimeInterval_type) function add_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- add_ti_ti % ti = ti1 % ti + ti2 % ti
-
- end function add_ti_ti
-
-
- type (MPAS_TimeInterval_type) function sub_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- sub_t_t % ti = t1 % t - t2 % t
-
- end function sub_t_t
-
-
- type (MPAS_Time_type) function sub_t_ti(t, ti)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t
- type (MPAS_TimeInterval_type), intent(in) :: ti
-
- sub_t_ti % t = t % t - ti % ti
-
- end function sub_t_ti
-
-
- type (MPAS_TimeInterval_type) function sub_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- sub_ti_ti % ti = ti1 % ti - ti2 % ti
-
- end function sub_ti_ti
-
-
- type (MPAS_TimeInterval_type) function mul_ti_n(ti, n)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti
- integer, intent(in) :: n
-
- mul_ti_n % ti = ti % ti * n
-
- end function mul_ti_n
-
-
- type (MPAS_TimeInterval_type) function div_ti_n(ti, n)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti
- integer, intent(in) :: n
-
- div_ti_n % ti = ti % ti / n
-
- end function div_ti_n
-
-
- logical function eq_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- eq_t_t = (t1 % t == t2 % t)
-
- end function eq_t_t
-
-
- logical function ne_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- ne_t_t = (t1 % t /= t2 % t)
-
- end function ne_t_t
-
-
- logical function lt_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- lt_t_t = (t1 % t < t2 % t)
-
- end function lt_t_t
-
-
- logical function gt_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- gt_t_t = (t1 % t > t2 % t)
-
- end function gt_t_t
-
-
- logical function le_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- le_t_t = (t1 % t <= t2 % t)
-
- end function le_t_t
-
-
- logical function ge_t_t(t1, t2)
-
- implicit none
-
- type (MPAS_Time_type), intent(in) :: t1, t2
-
- ge_t_t = (t1 % t >= t2 % t)
-
- end function ge_t_t
-
-
- logical function eq_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- eq_ti_ti = (ti1 % ti == ti2 % ti)
-
- end function eq_ti_ti
-
-
- logical function ne_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- ne_ti_ti = (ti1 % ti /= ti2 % ti)
-
- end function ne_ti_ti
-
-
- logical function lt_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- lt_ti_ti = (ti1 % ti < ti2 % ti)
-
- end function lt_ti_ti
-
-
- logical function gt_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- gt_ti_ti = (ti1 % ti > ti2 % ti)
-
- end function gt_ti_ti
-
-
- logical function le_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- le_ti_ti = (ti1 % ti <= ti2 % ti)
-
- end function le_ti_ti
-
-
- logical function ge_ti_ti(ti1, ti2)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-
- ge_ti_ti = (ti1 % ti >= ti2 % ti)
-
- end function ge_ti_ti
-
-
- type (MPAS_TimeInterval_type) function neg_ti(ti)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti
-
- integer :: rc
- integer :: D, S, Sn, Sd
-
- call ESMF_TimeIntervalGet(ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
- D = -D
- S = -S
- Sn = -Sn
- call ESMF_TimeIntervalSet(neg_ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
-
- end function neg_ti
-
-
- type (MPAS_TimeInterval_type) function abs_ti(ti)
-
- implicit none
-
- type (MPAS_TimeInterval_type), intent(in) :: ti
-
- type (MPAS_TimeInterval_type) :: zeroInterval
- integer :: rc
- integer :: D, S, Sn, Sd
-
- call ESMF_TimeIntervalSet(zeroInterval % ti, D=0, H=0, M=0, S=0, rc=rc)
-
- if(ti < zeroInterval) then
- call ESMF_TimeIntervalGet(ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
- D = -D
- S = -S
- Sn = -Sn
- call ESMF_TimeIntervalSet(abs_ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
- else
- abs_ti = ti
- end if
-
- end function abs_ti
-
-
-! TODO: Implement this function
-! type (MPAS_TimeInterval_type) function mod(ti1, ti2)
-!
-! implicit none
-!
-! type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
-!
-! mod % ti = mod(ti1 % ti, ti2 % ti)
-!
-! end function mod
-
-
- subroutine SplitString(string, delimiter, subStrings)
-
- implicit none
-
- character(len=*), intent(in) :: string
- character, intent(in) :: delimiter
- character(len=*), pointer, dimension(:) :: subStrings
-
- integer :: i, start, index
-
- index = 1
- do i = 1, len(string)
- if(string(i:i) == delimiter) then
- index = index + 1
- end if
- end do
-
- allocate(subStrings(1:index))
-
- start = 1
- index = 1
- do i = 1, len(string)
- if(string(i:i) == delimiter) then
- subStrings(index) = string(start:i-1)
- index = index + 1
- start = i + 1
- end if
- end do
- subStrings(index) = string(start:len(string))
-
- end subroutine SplitString
-
-
- subroutine getMonthDay(YYYY, DoY, month, day)
-
- implicit none
-
- integer, intent(in) :: YYYY, DoY
- integer, intent(out) :: month, day
-
- integer, dimension(12) :: dpm
-
- if (isLeapYear(YYYY)) then
- dpm(:) = daysInMonthLeap
- else
- dpm(:) = daysInMonth
- end if
-
- month = 1
- day = DoY
- do while (day > dpm(month))
- day = day - dpm(month)
- month = month + 1
- end do
-
- end subroutine getMonthDay
-
-
- logical function isValidDate(YYYY, MM, DD)
-
- integer, intent(in) :: YYYY, MM, DD
- integer :: daysInMM
-
- isValidDate = .true.
-
- ! TODO: ???? Gregorian calendar has no year zero, but perhaps 0 = 1 BC ???
- !if (YYYY == 0) then
- ! isValidDate = .false.
- ! return
- !end if
-
- if (MM < 1 .or. MM > 12) then
- isValidDate = .false.
- return
- end if
-
- if (DD < 1) then
- isValidDate = .false.
- return
- end if
-
- if(TheCalendar == MPAS_360DAY) then
- daysInMM = 30
- else
- if (TheCalendar == MPAS_GREGORIAN .and. isLeapYear(YYYY)) then
- daysInMM = daysInMonthLeap(MM)
- else
- daysInMM = daysInMonth(MM)
- end if
- end if
-
- if (DD > daysInMM) then
- isValidDate = .false.
- return
- end if
-
- end function
-
-
- logical function isLeapYear(year)
-
- implicit none
-
- integer, intent(in) :: year
-
- isLeapYear = .false.
-
- if (mod(year,4) == 0) then
- if (mod(year,100) == 0) then
- if (mod(year,400) == 0) then
- isLeapYear = .true.
- end if
- else
- isLeapYear = .true.
- end if
- end if
-
- end function isLeapYear
-
-
-
-
-
-end module mpas_timekeeping
-
-
-
-subroutine wrf_error_fatal(msg)
-
- implicit none
-
- character (len=*) :: msg
-
- write(0,*) 'MPAS_TIMEKEEPING: '//trim(msg)
-
- stop
-
-end subroutine wrf_error_fatal
Deleted: branches/source_renaming/src/framework/module_sort.F
===================================================================
--- branches/source_renaming/src/framework/module_sort.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_sort.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,230 +0,0 @@
-module sort
-
- interface quicksort
- module procedure quicksort_int
- module procedure quicksort_real
- end interface
-
-
- contains
-
-
- recursive subroutine mergesort(array, d1, n1, n2)
-
- implicit none
-
- ! Arguments
- integer, intent(in) :: n1, n2, d1
- integer, dimension(1:d1,n1:n2), intent(inout) :: array
-
- ! Local variables
- integer :: i, j, k
- integer :: rtemp
- integer, dimension(1:d1,1:n2-n1+1) :: temp
-
- if (n1 >= n2) return
-
- if (n2 - n1 == 1) then
- if (array(1,n1) > array(1,n2)) then
- do i=1,d1
- rtemp = array(i,n1)
- array(i,n1) = array(i,n2)
- array(i,n2) = rtemp
- end do
- end if
- return
- end if
-
- call mergesort(array(1:d1,n1:n1+(n2-n1+1)/2), d1, n1, n1+(n2-n1+1)/2)
- call mergesort(array(1:d1,n1+((n2-n1+1)/2)+1:n2), d1, n1+((n2-n1+1)/2)+1, n2)
-
- i = n1
- j = n1 + ((n2-n1+1)/2) + 1
- k = 1
- do while (i <= n1+(n2-n1+1)/2 .and. j <= n2)
- if (array(1,i) < array(1,j)) then
- temp(1:d1,k) = array(1:d1,i)
- k = k + 1
- i = i + 1
- else
- temp(1:d1,k) = array(1:d1,j)
- k = k + 1
- j = j + 1
- end if
- end do
-
- if (i <= n1+(n2-n1+1)/2) then
- do while (i <= n1+(n2-n1+1)/2)
- temp(1:d1,k) = array(1:d1,i)
- i = i + 1
- k = k + 1
- end do
- else
- do while (j <= n2)
- temp(1:d1,k) = array(1:d1,j)
- j = j + 1
- k = k + 1
- end do
- end if
-
- array(1:d1,n1:n2) = temp(1:d1,1:k-1)
-
- end subroutine mergesort
-
-
- subroutine quicksort_int(nArray, array)
-
- implicit none
-
- integer, intent(in) :: nArray
- integer, dimension(2,nArray), intent(inout) :: array
-
- integer :: i, j, top, l, r, pivot, s
- integer :: pivot_value
- integer, dimension(2) :: temp
- integer, dimension(1000) :: lstack, rstack
-
- if (nArray < 1) return
-
- top = 1
- lstack(top) = 1
- rstack(top) = nArray
-
- do while (top > 0)
-
- l = lstack(top)
- r = rstack(top)
- top = top - 1
-
- pivot = (l+r)/2
-
- pivot_value = array(1,pivot)
- temp(:) = array(:,pivot)
- array(:,pivot) = array(:,r)
- array(:,r) = temp(:)
-
- s = l
- do i=l,r-1
- if (array(1,i) <= pivot_value) then
- temp(:) = array(:,s)
- array(:,s) = array(:,i)
- array(:,i) = temp(:)
- s = s + 1
- end if
- end do
-
- temp(:) = array(:,s)
- array(:,s) = array(:,r)
- array(:,r) = temp(:)
-
- if (s-1 > l) then
- top = top + 1
-if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
- lstack(top) = l
- rstack(top) = s-1
- end if
-
- if (r > s+1) then
- top = top + 1
-if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
- lstack(top) = s+1
- rstack(top) = r
- end if
- end do
-
- end subroutine quicksort_int
-
-
- subroutine quicksort_real(nArray, array)
-
- implicit none
-
- integer, intent(in) :: nArray
- real (kind=RKIND), dimension(2,nArray), intent(inout) :: array
-
- integer :: i, j, top, l, r, pivot, s
- real (kind=RKIND) :: pivot_value
- real (kind=RKIND), dimension(2) :: temp
- integer, dimension(1000) :: lstack, rstack
-
- if (nArray < 1) return
-
- top = 1
- lstack(top) = 1
- rstack(top) = nArray
-
- do while (top > 0)
-
- l = lstack(top)
- r = rstack(top)
- top = top - 1
-
- pivot = (l+r)/2
-
- pivot_value = array(1,pivot)
- temp(:) = array(:,pivot)
- array(:,pivot) = array(:,r)
- array(:,r) = temp(:)
-
- s = l
- do i=l,r-1
- if (array(1,i) <= pivot_value) then
- temp(:) = array(:,s)
- array(:,s) = array(:,i)
- array(:,i) = temp(:)
- s = s + 1
- end if
- end do
-
- temp(:) = array(:,s)
- array(:,s) = array(:,r)
- array(:,r) = temp(:)
-
- if (s-1 > l) then
- top = top + 1
-if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
- lstack(top) = l
- rstack(top) = s-1
- end if
-
- if (r > s+1) then
- top = top + 1
-if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
- lstack(top) = s+1
- rstack(top) = r
- end if
- end do
-
- end subroutine quicksort_real
-
-
- integer function binary_search(array, d1, n1, n2, key)
-
- implicit none
-
- integer, intent(in) :: d1, n1, n2, key
- integer, dimension(d1,n1:n2), intent(in) :: array
-
- integer :: l, u, k
-
- binary_search = n2+1
-
- l = n1
- u = n2
- k = (l+u)/2
- do while (u >= l)
- if (array(1,k) == key) then
- binary_search = k
- exit
- else if (array(1,k) < key) then
- l = k + 1
- k = (l+u)/2
- else
- u = k - 1
- k = (l+u)/2
- end if
- end do
-
- end function binary_search
-
-end module sort
Deleted: branches/source_renaming/src/framework/module_timer.F
===================================================================
--- branches/source_renaming/src/framework/module_timer.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_timer.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,293 +0,0 @@
- module timer
-
- implicit none
- save
-! private
-
-#ifdef _PAPI
- include 'f90papi.h'
-#endif
-
-#ifdef _MPI
- include 'mpif.h'
-#endif
-
- type timer_node
- character (len=72) :: timer_name
- logical :: running, printable
- integer :: levels, calls
- real (kind=RKIND) :: start_time, end_time, total_time
- real (kind=RKIND) :: max_time, min_time, avg_time
- type (timer_node), pointer :: next
- end type timer_node
-
- type (timer_node), pointer :: all_timers
- integer :: levels
-
- public :: timer_start, &
- timer_stop, &
- timer_write
-
- contains
-
- subroutine timer_start(timer_name, clear_timer, timer_ptr)!{{{
- character (len=*), intent (in) :: timer_name !< Input: name of timer, stored as name of timer
- logical, optional, intent(in) :: clear_timer !< Input: flag to clear timer
- type (timer_node), optional, pointer, intent(out) :: timer_ptr !< Output: pointer to store timer in module
-
- logical :: timer_added, timer_found, string_equal, check_flag
- type (timer_node), pointer :: current, temp
-
- integer :: clock, hz, usecs
-
- timer_added = .false.
- timer_found = .false.
-
- if(.not.associated(all_timers)) then
- timer_added = .true.
- allocate(all_timers)
- allocate(all_timers%next)
- levels = 0
-
- all_timers%timer_name = ''
- current => all_timers%next
- nullify(current%next)
- else
- current => all_timers%next
- timer_search: do while ((.not.timer_found) .and. associated(current))
- string_equal = (trim(current%timer_name) == trim(timer_name))
- if(string_equal) then
- timer_found = .true.
- else
- current => current%next
- endif
- end do timer_search
- endif
-
- if(present(timer_ptr)) then
- timer_found = .true.
- if(.not.associated(timer_ptr)) then
- current => all_timers
- find_end_ptr: do while((.not.timer_added) .and. (associated(current%next)))
- current => current%next
- end do find_end_ptr
-
- allocate(timer_ptr)
-
- current%next => timer_ptr
- current => timer_ptr
- nullify(timer_ptr%next)
- current%levels = levels
- current%timer_name = timer_name
- current%running = .false.
- current%total_time = 0.0
- current%max_time = 0.0
- current%min_time = 100000000.0
- current%avg_time = 0.0
- current%calls = 0
- endif
- endif
-
- if(.not.timer_found) then
- current => all_timers
- find_end: do while((.not.timer_added) .and. (associated(current%next)))
- current => current%next
- end do find_end
-
- allocate(current%next)
- current => current%next
-
- nullify(current%next)
- timer_added = .true.
- endif
-
- if(timer_added .and. (.not.timer_found)) then
- current%levels = levels
- current%timer_name = timer_name
- current%running = .false.
- current%total_time = 0.0
- current%max_time = 0.0
- current%min_time = 100000000.0
- current%avg_time = 0.0
- current%calls = 0
- endif
-
- if((timer_added .or. timer_found) .and. (.not.current%running)) then
- current%running = .true.
- levels = levels + 1
-
-#ifdef _PAPI
- call PAPIF_get_real_usec(usecs, check_flag)
- current%start_time = usecs/1.0e6
-#elif _MPI
- current%start_time = MPI_Wtime()
-#else
- call system_clock (count=clock)
- call system_clock (count_rate=hz)
- current%start_time = real(clock,kind=RKIND)/real(hz,kind=RKIND)
-#endif
- endif
-
- if(present(clear_timer)) then
- if(clear_timer) then
- current%start_time = 0.0
- current%end_time = 0.0
- current%total_time = 0.0
- current%max_time = 0.0
- current%min_time = 0.0
- current%avg_time = 0.0
- current%calls = 0
- current%running = .false.
- endif
- endif
-
- if(present(timer_ptr)) then
- timer_ptr => current
- endif
-
- end subroutine timer_start!}}}
-
- subroutine timer_stop(timer_name, timer_ptr)!{{{
- character (len=*), intent(in) :: timer_name !< Input: name of timer to stop
- type (timer_node), pointer, intent(in), optional :: timer_ptr !< Input: pointer to timer, for stopping
-
- type (timer_node), pointer :: current
-
- real (kind=RKIND) :: time_temp
- logical :: timer_found, string_equal, check_flag
- integer :: clock, hz, usecs
-
- timer_found = .false.
-
- if(present(timer_ptr)) then
- timer_found = .true.
- current => timer_ptr
- endif
-
- if(.not.associated(all_timers)) then
- print *,' timer_stop :: timer_stop called with no timers initialized'
- else if(.not. timer_found) then
- current => all_timers
- timer_find: do while(.not.timer_found .and. associated(current))
- string_equal = (trim(current%timer_name) == trim(timer_name))
-
- if(string_equal) then
- timer_found = .true.
- else
- current => current%next
- endif
- end do timer_find
- endif
-
- if(.not.timer_found) then
- print *,' timer_stop :: timer_stop called with timer_name =', timer_name,' when timer has not been started.'
- stop
- endif
-
- if(current%running) then
- current%running = .false.
- levels = levels - 1
-
-#ifdef _PAPI
- call PAPIF_get_real_usec(usecs, check_flag)
- current%end_time = usecs/1.0e6
-#elif _MPI
- current%end_time = MPI_Wtime()
-#else
- call system_clock(count=clock)
- call system_clock(count_rate=hz)
- current%end_time = real(clock,kind=RKIND)/real(hz,kind=RKIND)
-#endif
-
- time_temp = current%end_time - current%start_time
- current%total_time = current%total_time + time_temp
-
- if(time_temp > current%max_time) then
- current%max_time = time_temp
- endif
-
- if(time_temp < current%min_time) then
- current%min_time = time_temp
- endif
-
- current%avg_time = current%avg_time + time_temp
- current%calls = current%calls + 1
- endif
-
- end subroutine timer_stop!}}}
-
- recursive subroutine timer_write(timer_ptr, total_ptr)!{{{
- type (timer_node), pointer, intent(inout), optional :: timer_ptr
- type (timer_node), pointer, intent(in), optional :: total_ptr
- character (len=10) :: tname
-
- logical :: total_found, string_equals
- type (timer_node), pointer :: current, total
- real (kind=RKIND) :: percent
- integer :: i
-
- total_found = .false.
-
- if(present(timer_ptr) .and. (.not.present(total_ptr))) then
- print *,'timer_write :: timer_ptr valid, but total_ptr is not assigned.'
- stop
- else if(present(timer_ptr)) then
- tname = ''
- do i=0,timer_ptr%levels+2
- tname = tname//' '
-! write(*,'(a,$)') ' '
- end do
-! tname = tname//timer_ptr%timer_name
-
- if(timer_ptr%total_time == 0.0d0) then
- timer_ptr%min_time = 0.0d0
- timer_ptr%max_time = 0.0d0
- timer_ptr%avg_time = 0.0d0
- percent = 0.0d0
- else
- timer_ptr%avg_time = timer_ptr%avg_time/timer_ptr%calls
- percent = timer_ptr%total_time/total_ptr%total_time
- endif
-
- write(*,'(i2, 1x, a35, f15.5, i10, 3f15.5, f8.2)') timer_ptr%levels, tname(1:timer_ptr%levels)//timer_ptr%timer_name, timer_ptr%total_time, timer_ptr%calls, timer_ptr%min_time, timer_ptr%max_time, timer_ptr%avg_time, percent
- return
- endif
-
- total => all_timers
-
- find_total: do while((.not.total_found) .and. associated(total))
- string_equals = (trim(total%timer_name) == trim("total time"))
- if(string_equals) then
- total_found = .true.
- else
- total => total%next
- endif
- end do find_total
-
- if(.not.total_found) then
- print *,' timer_write :: no timer named "total time" found.'
- stop
- end if
-
- write(*,'(3x, a10, 24x, a15, a10, a13, a15, a15, a15)') 'timer_name', 'total', 'calls', 'min', 'max', 'avg', 'percent'
- write(*,'(i2, 1x, a35, f15.5, i10, 3f15.5)') total%levels, total%timer_name, total%total_time, total%calls, total%min_time, total%max_time, total%avg_time
-
- current => all_timers
-
- print_timers: do while(associated(current))
- string_equals = (trim(current%timer_name) == trim("total time"))
- string_equals = string_equals .or. (trim(current%timer_name) == trim(" "))
-
- if(.not.string_equals) then
- call timer_write(current, total)
- current => current%next
- else
- current => current%next
- endif
- end do print_timers
-
- end subroutine timer_write!}}}
-
- end module timer
-
-! vim: foldmethod=marker et ts=2
Deleted: branches/source_renaming/src/framework/module_zoltan_interface.F
===================================================================
--- branches/source_renaming/src/framework/module_zoltan_interface.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/framework/module_zoltan_interface.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,581 +0,0 @@
-module zoltan_interface
- use zoltan
-
- implicit none
-
- include 'mpif.h'
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Data for reordering cells
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer :: numCells
- integer, dimension(:), pointer :: cellIDs
- integer :: geomDim
- real (kind=RKIND), dimension(:), pointer :: cellCoordX, cellCoordY, cellCoordZ
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Data for reordering edges
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer :: numEdges
- integer, dimension(:), pointer :: edgeIDs
- real (kind=RKIND), dimension(:), pointer :: edgeCoordX, edgeCoordY, edgeCoordZ
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Data for reordering vertices
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer :: numVerts
- integer, dimension(:), pointer :: vertIDs
- real (kind=RKIND), dimension(:), pointer :: vertCoordX, vertCoordY, vertCoordZ
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- contains
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Perhaps not necessary, but implemented in case it helps
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zoltanStart()
-
- integer(Zoltan_INT) :: error
- real(Zoltan_FLOAT) :: version
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Body of subroutine
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- error = Zoltan_Initialize(version)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- end subroutine
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zoltanOrderLocHSFC_Cells(in_numcells,in_cellIDs,in_geomDim,in_cellX, &
- in_cellY, in_cellZ)
- implicit none
-
- integer :: in_numcells
- integer, dimension(:), pointer :: in_cellIDs
- integer :: in_geomDim
- real (kind=RKIND), dimension(:), pointer :: in_cellX, in_cellY, in_cellZ
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! local variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- type(Zoltan_Struct), pointer :: zz_obj
- integer(ZOLTAN_INT) :: ierr
-
- integer :: numGidEntries, i
- integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
- real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Body of subroutine
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- numCells = in_numcells
- cellIDs => in_cellIDs
- geomDim = in_geomDim
- cellCoordX => in_cellX
- cellCoordY => in_cellY
- cellCoordZ => in_cellZ
-
- nullify(zz_obj)
- zz_obj => Zoltan_Create(MPI_COMM_SELF)
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! General Zoltan Parameters
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! register query functions
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumCells)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetCells)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetCellGeom)
-
- numGidEntries=1
-
- allocate(global_ids(numCells))
- allocate(permIndices(numCells))
- allocate(permGIDs(numCells))
- allocate(permXs(numCells))
- allocate(permYs(numCells))
- allocate(permZs(numCells))
-
- !! MMW: There might be a way to use cellIDs directly
- do i=1,numCells
- global_ids(i) = cellIDs(i)
- end do
-
- ierr = Zoltan_Order(zz_obj, numGidEntries, numCells, global_ids, permIndices);
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! This is necessary for now until we fix a small bug in Zoltan_Order
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numCells
- permGIDs(i) = global_ids(permIndices(i)+1)
- permXs(i) = cellCoordX(permIndices(i)+1)
- permYs(i) = cellCoordY(permIndices(i)+1)
- permZs(i) = cellCoordZ(permIndices(i)+1)
- end do
-
- !!do i=1,numCells
- !! write(*,*) global_ids(i), permGIDs(i)
- !!end do
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Actually change the ordering of the cells
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numCells
- cellIDs(i) = permGIDs(i)
- cellCoordX(i) = permXs(i)
- cellCoordY(i) = permYs(i)
- cellCoordZ(i) = permZs(i)
- end do
- !!!!!!!!!!!!!!!!!!!!!!!!!!
-
- deallocate(global_ids)
- deallocate(permIndices)
- deallocate(permGIDs)
- deallocate(permXs)
- deallocate(permYs)
- deallocate(permZs)
-
- call Zoltan_Destroy(zz_obj)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- end subroutine zoltanOrderLocHSFC_Cells
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns number of cells
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer function zqfNumCells(data, ierr)
-
- ! Local declarations
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- zqfNumCells = numCells
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end function zqfNumCells
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns lists of Cell IDs
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetCells (data, num_gid_entries, num_lid_entries, global_ids, &
- local_ids, wgt_dim, obj_wgts, ierr)
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
- integer(ZOLTAN_INT), intent(in) :: wgt_dim
- real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- ! local declarations
- integer :: i
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do i= 1, numCells
- global_ids(i) = cellIDs(i)
- local_ids(i) = i
- end do
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetCells
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Zoltan Query Function:
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer function zqfGeomDim(data, ierr)
- !use zoltan
- implicit none
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- zqfGeomDim = geomDim
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end function zqfGeomDim
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Zoltan Query Function:
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetCellGeom(data, num_gid_entries, num_lid_entries, global_id, &
- local_id, geom_vec, ierr)
- !use zoltan
- implicit none
-
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(in) :: global_id, local_id
- real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Assuming geom_dim is 3
- geom_vec(1) = cellCoordX(local_id)
- geom_vec(2) = cellCoordY(local_id)
- geom_vec(3) = cellCoordZ(local_id)
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetCellGeom
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! The ordering functions should perhaps be refactored so that there
- !! are not separate functions for cells, edges, and vertices
- !! Not sure if this is worth it with the additional conditionals that would
- !! be required.
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zoltanOrderLocHSFC_Edges(in_numedges,in_edgeIDs,in_geomDim,in_edgeX, &
- in_edgeY, in_edgeZ)
- implicit none
-
- integer :: in_numedges
- integer, dimension(:), pointer :: in_edgeIDs
- integer :: in_geomDim
- real (kind=RKIND), dimension(:), pointer :: in_edgeX, in_edgeY, in_edgeZ
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! local variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- type(Zoltan_Struct), pointer :: zz_obj
- integer(ZOLTAN_INT) :: ierr
-
- integer :: numGidEntries, i
- integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
- real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Body of subroutine
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- numEdges = in_numedges
- edgeIDs => in_edgeIDs
- geomDim = in_geomDim
- edgeCoordX => in_edgeX
- edgeCoordY => in_edgeY
- edgeCoordZ => in_edgeZ
-
- nullify(zz_obj)
- zz_obj => Zoltan_Create(MPI_COMM_SELF)
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! General Zoltan Parameters
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! register query functions
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumEdges)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetEdges)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetEdgeGeom)
-
- numGidEntries=1
-
- allocate(global_ids(numEdges))
- allocate(permIndices(numEdges))
- allocate(permGIDs(numEdges))
- allocate(permXs(numEdges))
- allocate(permYs(numEdges))
- allocate(permZs(numEdges))
-
- !! MMW: There might be a way to use edgeIDs directly
- do i=1,numEdges
- global_ids(i) = edgeIDs(i)
- end do
-
- ierr = Zoltan_Order(zz_obj, numGidEntries, numEdges, global_ids, permIndices);
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! This is necessary for now until we fix a small bug in Zoltan_Order
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numEdges
- permGIDs(i) = global_ids(permIndices(i)+1)
- permXs(i) = edgeCoordX(permIndices(i)+1)
- permYs(i) = edgeCoordY(permIndices(i)+1)
- permZs(i) = edgeCoordZ(permIndices(i)+1)
- end do
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Actually change the ordering of the edges
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numEdges
- edgeIDs(i) = permGIDs(i)
- edgeCoordX(i) = permXs(i)
- edgeCoordY(i) = permYs(i)
- edgeCoordZ(i) = permZs(i)
- end do
- !!!!!!!!!!!!!!!!!!!!!!!!!!
-
- deallocate(global_ids)
- deallocate(permIndices)
- deallocate(permGIDs)
- deallocate(permXs)
- deallocate(permYs)
- deallocate(permZs)
-
- call Zoltan_Destroy(zz_obj)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zoltanOrderLocHSFC_Edges
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns number of edges
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer function zqfNumEdges(data, ierr)
- ! Local declarations
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- zqfNumEdges = numEdges
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end function zqfNumEdges
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns lists of Edge IDs
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetEdges (data, num_gid_entries, num_lid_entries, global_ids, &
- local_ids, wgt_dim, obj_wgts, ierr)
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
- integer(ZOLTAN_INT), intent(in) :: wgt_dim
- real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- ! local declarations
- integer :: i
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do i= 1, numEdges
- global_ids(i) = edgeIDs(i)
- local_ids(i) = i
- end do
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetEdges
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Zoltan Query Function:
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetEdgeGeom(data, num_gid_entries, num_lid_entries, global_id, &
- local_id, geom_vec, ierr)
- !use zoltan
- implicit none
-
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(in) :: global_id, local_id
- real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Assuming geom_dim is 3
- geom_vec(1) = edgeCoordX(local_id)
- geom_vec(2) = edgeCoordY(local_id)
- geom_vec(3) = edgeCoordZ(local_id)
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetEdgeGeom
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zoltanOrderLocHSFC_Verts(in_numverts,in_vertIDs,in_geomDim,in_vertX, &
- in_vertY, in_vertZ)
- implicit none
-
- integer :: in_numverts
- integer, dimension(:), pointer :: in_vertIDs
- integer :: in_geomDim
- real (kind=RKIND), dimension(:), pointer :: in_vertX, in_vertY, in_vertZ
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! local variables
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- type(Zoltan_Struct), pointer :: zz_obj
- integer(ZOLTAN_INT) :: ierr
-
- integer :: numGidEntries, i
- integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
- real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Body of subroutine
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- numVerts = in_numverts
- vertIDs => in_vertIDs
- geomDim = in_geomDim
- vertCoordX => in_vertX
- vertCoordY => in_vertY
- vertCoordZ => in_vertZ
-
- nullify(zz_obj)
- zz_obj => Zoltan_Create(MPI_COMM_SELF)
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! General Zoltan Parameters
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! register query functions
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumVerts)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetVerts)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
- ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetVertGeom)
-
- numGidEntries=1
-
- allocate(global_ids(numVerts))
- allocate(permIndices(numVerts))
- allocate(permGIDs(numVerts))
- allocate(permXs(numVerts))
- allocate(permYs(numVerts))
- allocate(permZs(numVerts))
-
- !! MMW: There might be a way to use vertIDs directly
- do i=1,numVerts
- global_ids(i) = vertIDs(i)
- end do
-
- ierr = Zoltan_Order(zz_obj, numGidEntries, numVerts, global_ids, permIndices);
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! This is necessary for now until we fix a small bug in Zoltan_Order
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numVerts
- permGIDs(i) = global_ids(permIndices(i)+1)
- permXs(i) = vertCoordX(permIndices(i)+1)
- permYs(i) = vertCoordY(permIndices(i)+1)
- permZs(i) = vertCoordZ(permIndices(i)+1)
- end do
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Actually change the ordering of the verts
- !!!!!!!!!!!!!!!!!!!!!!!!!!
- do i=1,numVerts
- vertIDs(i) = permGIDs(i)
- vertCoordX(i) = permXs(i)
- vertCoordY(i) = permYs(i)
- vertCoordZ(i) = permZs(i)
- end do
- !!!!!!!!!!!!!!!!!!!!!!!!!!
-
- deallocate(global_ids)
- deallocate(permIndices)
- deallocate(permGIDs)
- deallocate(permXs)
- deallocate(permYs)
- deallocate(permZs)
-
- call Zoltan_Destroy(zz_obj)
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- end subroutine zoltanOrderLocHSFC_Verts
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns number of verts
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- integer function zqfNumVerts(data, ierr)
-
- ! Local declarations
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- zqfNumVerts = numVerts
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end function zqfNumVerts
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! zoltan query function:
- !! Returns lists of Vert IDs
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetVerts (data, num_gid_entries, num_lid_entries, global_ids, &
- local_ids, wgt_dim, obj_wgts, ierr)
-
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
- integer(ZOLTAN_INT), intent(in) :: wgt_dim
- real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- ! local declarations
- integer :: i
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- do i= 1, numVerts
- global_ids(i) = vertIDs(i)
- local_ids(i) = i
- end do
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetVerts
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Zoltan Query Function:
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- subroutine zqfGetVertGeom(data, num_gid_entries, num_lid_entries, global_id, &
- local_id, geom_vec, ierr)
- !use zoltan
- implicit none
-
- integer(ZOLTAN_INT), intent(in) :: data(*)
- integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
- integer(ZOLTAN_INT), intent(in) :: global_id, local_id
- real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
- integer(ZOLTAN_INT), intent(out) :: ierr
-
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !! Assuming geom_dim is 3
- geom_vec(1) = vertCoordX(local_id)
- geom_vec(2) = vertCoordY(local_id)
- geom_vec(3) = vertCoordZ(local_id)
-
- ierr = ZOLTAN_OK
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- end subroutine zqfGetVertGeom
- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
-
-
-end module zoltan_interface
Copied: branches/source_renaming/src/framework/mpas_block_decomp.F (from rev 1089, branches/source_renaming/src/framework/module_block_decomp.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_block_decomp.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_block_decomp.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,306 @@
+module mpas_block_decomp
+
+ use mpas_dmpar
+ use mpas_hash
+
+ type graph
+ integer :: nVerticesTotal
+ integer :: nVertices, maxDegree
+ integer :: ghostStart
+ integer, dimension(:), pointer :: vertexID
+ integer, dimension(:), pointer :: nAdjacent
+ integer, dimension(:,:), pointer :: adjacencyList
+ end type graph
+
+
+ contains
+
+
+ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, local_cell_list)
+
+ use mpas_configure
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ type (graph), intent(in) :: partial_global_graph_info
+ integer, dimension(:), pointer :: local_cell_list
+
+ integer, dimension(:), pointer :: global_cell_list
+ integer, dimension(:), pointer :: global_start
+
+ integer :: i, j, owner, iunit, istatus
+ integer, dimension(:), pointer :: local_nvertices
+ character (len=256) :: filename
+
+ if (dminfo % nprocs > 1) then
+
+ allocate(local_nvertices(dminfo % nprocs))
+ allocate(global_start(dminfo % nprocs))
+ allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+
+ if (dminfo % my_proc_id == IO_NODE) then
+
+ iunit = 50 + dminfo % my_proc_id
+ if (dminfo % nprocs < 10) then
+ write(filename,'(a,i1)') trim(config_decomp_file_prefix), dminfo % nprocs
+ else if (dminfo % nprocs < 100) then
+ write(filename,'(a,i2)') trim(config_decomp_file_prefix), dminfo % nprocs
+ else if (dminfo % nprocs < 1000) then
+ write(filename,'(a,i3)') trim(config_decomp_file_prefix), dminfo % nprocs
+ else if (dminfo % nprocs < 10000) then
+ write(filename,'(a,i4)') trim(config_decomp_file_prefix), dminfo % nprocs
+ else if (dminfo % nprocs < 100000) then
+ write(filename,'(a,i5)') trim(config_decomp_file_prefix), dminfo % nprocs
+ end if
+
+ open(unit=iunit, file=trim(filename), form='formatted', status='old', iostat=istatus)
+
+ if (istatus /= 0) then
+ write(0,*) 'Could not open block decomposition file for ',dminfo % nprocs,' tasks.'
+ write(0,*) 'Filename: ',trim(filename)
+ call mpas_dmpar_abort(dminfo)
+ end if
+
+ local_nvertices(:) = 0
+ do i=1,partial_global_graph_info % nVerticesTotal
+ read(unit=iunit, fmt=*) owner
+ local_nvertices(owner+1) = local_nvertices(owner+1) + 1
+ end do
+
+! allocate(global_cell_list(partial_global_graph_info % nVerticesTotal))
+
+ global_start(1) = 1
+ do i=2,dminfo % nprocs
+ global_start(i) = global_start(i-1) + local_nvertices(i-1)
+ end do
+
+ rewind(unit=iunit)
+
+ do i=1,partial_global_graph_info % nVerticesTotal
+ read(unit=iunit, fmt=*) owner
+ global_cell_list(global_start(owner+1)) = i
+ global_start(owner+1) = global_start(owner+1) + 1
+ end do
+
+ global_start(1) = 0
+ do i=2,dminfo % nprocs
+ global_start(i) = global_start(i-1) + local_nvertices(i-1)
+ end do
+
+ close(unit=iunit)
+
+ call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
+ allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_cell_list, local_cell_list)
+
+ else
+
+ call mpas_dmpar_bcast_ints(dminfo, dminfo % nprocs, local_nvertices)
+ allocate(local_cell_list(local_nvertices(dminfo % my_proc_id + 1)))
+
+ call mpas_dmpar_scatter_ints(dminfo, dminfo % nprocs, local_nvertices(dminfo % my_proc_id + 1), &
+ global_start, local_nvertices, global_cell_list, local_cell_list)
+
+ end if
+
+ deallocate(local_nvertices)
+ deallocate(global_start)
+ deallocate(global_cell_list)
+ else
+ allocate(local_cell_list(partial_global_graph_info % nVerticesTotal))
+ do i=1,size(local_cell_list)
+ local_cell_list(i) = i
+ end do
+ endif
+
+ end subroutine mpas_block_decomp_cells_for_proc
+
+
+ subroutine mpas_block_decomp_partitioned_edge_list(nCells, cellIDList, maxCells, nEdges, cellsOnEdge, edgeIDList, ghostEdgeStart)
+
+ implicit none
+
+ integer, intent(in) :: nCells, maxCells, nEdges
+ integer, dimension(nCells), intent(in) :: cellIDList
+ integer, dimension(maxCells, nEdges), intent(in) :: cellsOnEdge
+ integer, dimension(nEdges), intent(inout) :: edgeIDList
+ integer, intent(inout) :: ghostEdgeStart
+
+ integer :: i, j, lastEdge
+ integer, dimension(nEdges) :: edgeIDListLocal
+ type (hashtable) :: h
+
+ call mpas_hash_init(h)
+
+ do i=1,nCells
+ ! OPTIMIZATION: Actually, if we can assume that all cellIDs are unique, the if-test is unnecessary
+ if (.not. mpas_hash_search(h, cellIDList(i))) call mpas_hash_insert(h, cellIDList(i))
+ end do
+
+ lastEdge = 0
+ ghostEdgeStart = nEdges+1
+
+ edgeIDListLocal(:) = edgeIDList(:)
+
+ do i=1,nEdges
+ do j=1,maxCells
+ if (cellsOnEdge(j,i) /= 0) exit
+ end do
+ if (j > maxCells) &
+ write(0,*) 'Error in block_decomp_partitioned_edge_list: ',&
+ 'edge/vertex is not adjacent to any valid cells'
+ if (mpas_hash_search(h, cellsOnEdge(j,i))) then
+ lastEdge = lastEdge + 1
+ edgeIDList(lastEdge) = edgeIDListLocal(i)
+ else
+ ghostEdgeStart = ghostEdgeStart - 1
+ edgeIDList(ghostEdgeStart) = edgeIDListLocal(i)
+ end if
+ if (ghostEdgeStart <= lastEdge) then
+ write(0,*) 'block_decomp_partitioned_edge_list: ',&
+ 'Somehow we have more edges than we thought we should.'
+ end if
+ end do
+
+ if (ghostEdgeStart /= lastEdge + 1) then
+ write(0,*) 'block_decomp_partitioned_edge_list:',&
+ ' Somehow we didn''t have enough edges to fill edgeIDList.'
+ end if
+
+ call mpas_hash_destroy(h)
+
+ end subroutine mpas_block_decomp_partitioned_edge_list
+
+
+ subroutine mpas_block_decomp_all_edges_in_block(maxEdges, nCells, nEdgesOnCell, edgesOnCell, nEdges, edgeList)
+
+ implicit none
+
+ integer, intent(in) :: maxEdges, nCells
+ integer, dimension(nCells), intent(in) :: nEdgesOnCell
+ integer, dimension(maxEdges, nCells), intent(in) :: edgesOnCell
+ integer, intent(out) :: nEdges
+ integer, dimension(:), pointer :: edgeList
+
+ integer :: i, j, k
+ type (hashtable) :: h
+
+ call mpas_hash_init(h)
+
+ do i=1,nCells
+ do j=1,nEdgesOnCell(i)
+ if (.not. mpas_hash_search(h, edgesOnCell(j,i))) call mpas_hash_insert(h, edgesOnCell(j,i))
+ end do
+ end do
+
+ nEdges = mpas_hash_size(h)
+ allocate(edgeList(nEdges))
+
+ call mpas_hash_destroy(h)
+
+ call mpas_hash_init(h)
+
+ k = 0
+ do i=1,nCells
+ do j=1,nEdgesOnCell(i)
+ if (.not. mpas_hash_search(h, edgesOnCell(j,i))) then
+ k = k + 1
+ if (k > nEdges) then
+ write(0,*) 'block_decomp_all_edges_in_block: ',&
+ 'Trying to add more edges than expected.'
+ return
+ end if
+ edgeList(k) = edgesOnCell(j,i)
+ call mpas_hash_insert(h, edgesOnCell(j,i))
+ end if
+ end do
+ end do
+
+ call mpas_hash_destroy(h)
+
+ if (k < nEdges) then
+ write(0,*) 'block_decomp_all_edges_in_block: ',&
+ 'Listed fewer edges than expected.'
+ end if
+
+ end subroutine mpas_block_decomp_all_edges_in_block
+
+
+ subroutine mpas_block_decomp_add_halo(dminfo, local_graph_info, local_graph_with_halo)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ type (graph), intent(in) :: local_graph_info
+ type (graph), intent(out) :: local_graph_with_halo
+
+ integer :: i, j, k
+ type (hashtable) :: h
+
+
+ call mpas_hash_init(h)
+
+ do i=1,local_graph_info % nVertices
+ call mpas_hash_insert(h, local_graph_info % vertexID(i))
+ end do
+
+ do i=1,local_graph_info % nVertices
+ do j=1,local_graph_info % nAdjacent(i)
+ if (local_graph_info % adjacencyList(j,i) /= 0) then
+ if (.not. mpas_hash_search(h, local_graph_info % adjacencyList(j,i))) then
+ call mpas_hash_insert(h, local_graph_info % adjacencyList(j,i))
+ end if
+ end if
+ end do
+ end do
+
+
+ local_graph_with_halo % nVertices = local_graph_info % nVertices
+ local_graph_with_halo % maxDegree = local_graph_info % maxDegree
+ local_graph_with_halo % nVerticesTotal = mpas_hash_size(h)
+ local_graph_with_halo % ghostStart = local_graph_with_halo % nVertices + 1
+ allocate(local_graph_with_halo % vertexID(local_graph_with_halo % nVerticesTotal))
+ allocate(local_graph_with_halo % nAdjacent(local_graph_with_halo % nVerticesTotal))
+ allocate(local_graph_with_halo % adjacencyList(local_graph_with_halo % maxDegree, local_graph_with_halo % nVerticesTotal))
+
+ call mpas_hash_destroy(h)
+
+ call mpas_hash_init(h)
+
+ do i=1,local_graph_info % nVertices
+ if (mpas_hash_search(h, local_graph_info % vertexID(i))) &
+ write(0,*) 'block_decomp_add_halo: ', &
+ 'There appear to be duplicates in vertexID list.'
+ call mpas_hash_insert(h, local_graph_info % vertexID(i))
+ local_graph_with_halo % vertexID(i) = local_graph_info % vertexID(i)
+ local_graph_with_halo % nAdjacent(i) = local_graph_info % nAdjacent(i)
+ local_graph_with_halo % adjacencyList(:,i) = local_graph_info % adjacencyList(:,i)
+ end do
+
+ k = local_graph_with_halo % ghostStart
+ if (mpas_hash_size(h) /= k-1) &
+ write(0,*) 'block_decomp_add_halo: ',&
+ 'Somehow we don''t have the right number of non-ghost cells.'
+ do i=1,local_graph_info % nVertices
+ do j=1,local_graph_info % nAdjacent(i)
+ if (local_graph_info % adjacencyList(j,i) /= 0) then
+ if (.not. mpas_hash_search(h, local_graph_info % adjacencyList(j,i))) then
+ call mpas_hash_insert(h, local_graph_info % adjacencyList(j,i))
+ local_graph_with_halo % vertexID(k) = local_graph_info % adjacencyList(j,i)
+ k = k + 1
+ end if
+ end if
+ end do
+ end do
+ if (local_graph_with_halo % nVerticesTotal /= k-1) &
+ write(0,*) 'block_decomp_add_halo: ',&
+ 'Somehow we don''t have the right number of total cells.'
+
+ call mpas_hash_destroy(h)
+
+ end subroutine mpas_block_decomp_add_halo
+
+end module mpas_block_decomp
Copied: branches/source_renaming/src/framework/mpas_configure.F (from rev 1089, branches/source_renaming/src/framework/module_configure.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_configure.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_configure.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,36 @@
+module mpas_configure
+
+ use mpas_dmpar
+
+#include "config_defs.inc"
+
+ contains
+
+
+ subroutine mpas_read_namelist(dminfo)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+
+ integer :: funit
+
+#include "config_namelist_defs.inc"
+
+ funit = 21
+
+ ! Set default values for namelist options
+#include "config_set_defaults.inc"
+
+ if (dminfo % my_proc_id == IO_NODE) then
+ open(funit,file='namelist.input',status='old',form='formatted')
+
+#include "config_namelist_reads.inc"
+ close(funit)
+ end if
+
+#include "config_bcast_namelist.inc"
+
+ end subroutine mpas_read_namelist
+
+end module mpas_configure
Copied: branches/source_renaming/src/framework/mpas_constants.F (from rev 1089, branches/source_renaming/src/framework/module_constants.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_constants.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_constants.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,20 @@
+module mpas_constants
+
+ real (kind=RKIND), parameter :: pii = 3.141592653589793
+ real (kind=RKIND), parameter :: a = 6371229.0
+ real (kind=RKIND), parameter :: omega = 7.29212e-5
+ real (kind=RKIND), parameter :: gravity = 9.80616
+ real (kind=RKIND), parameter :: rgas = 287.
+ real (kind=RKIND), parameter :: cp = 1003.
+ real (kind=RKIND), parameter :: cv = 716. ! cp - rgas
+ real (kind=RKIND), parameter :: cvpm = -.71385842 ! -cv/cp
+ real (kind=RKIND), parameter :: prandtl = 1.0
+
+
+ contains
+
+ subroutine dummy()
+
+ end subroutine dummy
+
+end module mpas_constants
Copied: branches/source_renaming/src/framework/mpas_dmpar.F (from rev 1089, branches/source_renaming/src/framework/module_dmpar.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_dmpar.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_dmpar.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,1928 @@
+module mpas_dmpar
+
+ use mpas_sort
+
+#ifdef _MPI
+include 'mpif.h'
+ integer, parameter :: MPI_INTEGERKIND = MPI_INTEGER
+
+#if (RKIND == 8)
+ integer, parameter :: MPI_REALKIND = MPI_DOUBLE_PRECISION
+#else
+ integer, parameter :: MPI_REALKIND = MPI_REAL
+#endif
+#endif
+
+ integer, parameter :: IO_NODE = 0
+ integer, parameter :: BUFSIZE = 6000
+
+
+ type dm_info
+ integer :: nprocs, my_proc_id, comm, info
+ end type dm_info
+
+
+ type exchange_list
+ integer :: procID
+ integer :: nlist
+ integer, dimension(:), pointer :: list
+ type (exchange_list), pointer :: next
+ real (kind=RKIND), dimension(:), pointer :: rbuffer
+ integer, dimension(:), pointer :: ibuffer
+ integer :: reqID
+ end type exchange_list
+
+
+ interface mpas_dmpar_alltoall_field
+ module procedure mpas_dmpar_alltoall_field1d_integer
+ module procedure mpas_dmpar_alltoall_field2d_integer
+ module procedure mpas_dmpar_alltoall_field1d_real
+ module procedure mpas_dmpar_alltoall_field2d_real
+ module procedure mpas_dmpar_alltoall_field3d_real
+ end interface
+
+
+ contains
+
+
+ subroutine mpas_dmpar_init(dminfo)
+
+ implicit none
+
+ type (dm_info), intent(inout) :: dminfo
+
+#ifdef _MPI
+ integer :: mpi_rank, mpi_size
+ integer :: mpi_ierr
+
+ ! Find out our rank and the total number of processors
+ call MPI_Init(mpi_ierr)
+ call MPI_Comm_rank(MPI_COMM_WORLD, mpi_rank, mpi_ierr)
+ call MPI_Comm_size(MPI_COMM_WORLD, mpi_size, mpi_ierr)
+
+ dminfo % comm = MPI_COMM_WORLD
+
+ dminfo % nprocs = mpi_size
+ dminfo % my_proc_id = mpi_rank
+
+ write(0,'(a,i5,a,i5,a)') 'task ', mpi_rank, ' of ', mpi_size, &
+ ' is running'
+
+ call open_streams(dminfo % my_proc_id)
+
+ dminfo % info = MPI_INFO_NULL
+#else
+ dminfo % comm = 0
+ dminfo % my_proc_id = IO_NODE
+ dminfo % nprocs = 1
+#endif
+
+ end subroutine mpas_dmpar_init
+
+
+ subroutine mpas_dmpar_finalize(dminfo)
+
+ implicit none
+
+ type (dm_info), intent(inout) :: dminfo
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Finalize(mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_finalize
+
+
+ subroutine mpas_dmpar_abort(dminfo)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+
+#ifdef _MPI
+ integer :: mpi_ierr, mpi_errcode
+
+ call MPI_Abort(dminfo % comm, mpi_errcode, mpi_ierr)
+#endif
+
+ stop
+
+ end subroutine mpas_dmpar_abort
+
+
+ subroutine mpas_dmpar_global_abort(mesg)
+
+ implicit none
+
+ character (len=*), intent(in) :: mesg
+
+#ifdef _MPI
+ integer :: mpi_ierr, mpi_errcode
+
+ write(0,*) trim(mesg)
+ call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr)
+#endif
+
+ write(0,*) trim(mesg)
+ stop
+
+ end subroutine mpas_dmpar_global_abort
+
+
+ subroutine mpas_dmpar_bcast_int(dminfo, i)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(inout) :: i
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Bcast(i, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_bcast_int
+
+
+ subroutine mpas_dmpar_bcast_ints(dminfo, n, iarray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: n
+ integer, dimension(n), intent(inout) :: iarray
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Bcast(iarray, n, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_bcast_ints
+
+
+ subroutine mpas_dmpar_bcast_real(dminfo, r)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ real (kind=RKIND), intent(inout) :: r
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Bcast(r, 1, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_bcast_real
+
+
+ subroutine mpas_dmpar_bcast_reals(dminfo, n, rarray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: n
+ real (kind=RKIND), dimension(n), intent(inout) :: rarray
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Bcast(rarray, n, MPI_REALKIND, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_bcast_reals
+
+
+ subroutine mpas_dmpar_bcast_logical(dminfo, l)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ logical, intent(inout) :: l
+
+#ifdef _MPI
+ integer :: mpi_ierr
+ integer :: itemp
+
+ if (dminfo % my_proc_id == IO_NODE) then
+ if (l) then
+ itemp = 1
+ else
+ itemp = 0
+ end if
+ end if
+
+ call MPI_Bcast(itemp, 1, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
+
+ if (itemp == 1) then
+ l = .true.
+ else
+ l = .false.
+ end if
+#endif
+
+ end subroutine mpas_dmpar_bcast_logical
+
+
+ subroutine mpas_dmpar_bcast_char(dminfo, c)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ character (len=*), intent(inout) :: c
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Bcast(c, len(c), MPI_CHARACTER, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_bcast_char
+
+
+ subroutine mpas_dmpar_sum_int(dminfo, i, isum)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: i
+ integer, intent(out) :: isum
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(i, isum, 1, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ isum = i
+#endif
+
+ end subroutine mpas_dmpar_sum_int
+
+
+ subroutine mpas_dmpar_sum_real(dminfo, r, rsum)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ real(kind=RKIND), intent(in) :: r
+ real(kind=RKIND), intent(out) :: rsum
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(r, rsum, 1, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ rsum = r
+#endif
+
+ end subroutine mpas_dmpar_sum_real
+
+
+ subroutine mpas_dmpar_min_int(dminfo, i, imin)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: i
+ integer, intent(out) :: imin
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(i, imin, 1, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ imin = i
+#endif
+
+ end subroutine mpas_dmpar_min_int
+
+
+ subroutine mpas_dmpar_min_real(dminfo, r, rmin)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ real(kind=RKIND), intent(in) :: r
+ real(kind=RKIND), intent(out) :: rmin
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(r, rmin, 1, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ rmin = r
+#endif
+
+ end subroutine mpas_dmpar_min_real
+
+
+ subroutine mpas_dmpar_max_int(dminfo, i, imax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: i
+ integer, intent(out) :: imax
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(i, imax, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ imax = i
+#endif
+
+ end subroutine mpas_dmpar_max_int
+
+
+ subroutine mpas_dmpar_max_real(dminfo, r, rmax)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ real(kind=RKIND), intent(in) :: r
+ real(kind=RKIND), intent(out) :: rmax
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(r, rmax, 1, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ rmax = r
+#endif
+
+ end subroutine mpas_dmpar_max_real
+
+
+ subroutine mpas_dmpar_sum_int_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_sum_int_array
+
+
+ subroutine mpas_dmpar_min_int_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_min_int_array
+
+
+ subroutine mpas_dmpar_max_int_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ integer, dimension(nElements), intent(in) :: inArray
+ integer, dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_max_int_array
+
+
+ subroutine mpas_dmpar_sum_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_SUM, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_sum_real_array
+
+
+ subroutine mpas_dmpar_min_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MIN, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_min_real_array
+
+
+ subroutine mpas_dmpar_max_real_array(dminfo, nElements, inArray, outArray)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nElements
+ real(kind=RKIND), dimension(nElements), intent(in) :: inArray
+ real(kind=RKIND), dimension(nElements), intent(out) :: outArray
+
+ integer :: mpi_ierr
+
+#ifdef _MPI
+ call MPI_Allreduce(inArray, outArray, nElements, MPI_REALKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+#else
+ outArray = inArray
+#endif
+
+ end subroutine mpas_dmpar_max_real_array
+
+
+ subroutine mpas_dmpar_scatter_ints(dminfo, nprocs, noutlist, displs, counts, inlist, outlist)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nprocs, noutlist
+ integer, dimension(nprocs), intent(in) :: displs, counts
+ integer, dimension(:), pointer :: inlist
+ integer, dimension(noutlist), intent(inout) :: outlist
+
+#ifdef _MPI
+ integer :: mpi_ierr
+
+ call MPI_Scatterv(inlist, counts, displs, MPI_INTEGERKIND, outlist, noutlist, MPI_INTEGERKIND, IO_NODE, dminfo % comm, mpi_ierr)
+#endif
+
+ end subroutine mpas_dmpar_scatter_ints
+
+
+ subroutine mpas_dmpar_get_index_range(dminfo, &
+ global_start, global_end, &
+ local_start, local_end)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: global_start, global_end
+ integer, intent(out) :: local_start, local_end
+
+ local_start = nint(real(dminfo % my_proc_id) * real(global_end - global_start + 1) / real(dminfo % nprocs)) + 1
+ local_end = nint(real(dminfo % my_proc_id + 1) * real(global_end - global_start + 1) / real(dminfo % nprocs))
+
+ end subroutine mpas_dmpar_get_index_range
+
+
+ subroutine mpas_dmpar_compute_index_range(dminfo, &
+ local_start, local_end, &
+ global_start, global_end)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: local_start, local_end
+ integer, intent(inout) :: global_start, global_end
+
+ integer :: n
+ integer :: mpi_ierr
+
+ n = local_end - local_start + 1
+
+ if (dminfo % my_proc_id == 0) then
+ global_start = 1
+ global_end = global_start + n - 1
+
+#ifdef _MPI
+ else if (dminfo % my_proc_id == dminfo % nprocs - 1) then
+ call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr)
+ global_end = global_start + n - 1
+
+ else
+ call MPI_Recv(global_start, 1, MPI_INTEGERKIND, dminfo % my_proc_id - 1, 0, dminfo % comm, MPI_STATUS_IGNORE, mpi_ierr)
+ global_end = global_start + n
+ call MPI_Send(global_end, 1, MPI_INTEGERKIND, dminfo % my_proc_id + 1, 0, dminfo % comm, mpi_ierr)
+ global_end = global_end - 1
+#endif
+
+ end if
+
+
+ end subroutine mpas_dmpar_compute_index_range
+
+
+ subroutine mpas_dmpar_get_owner_list(dminfo, &
+ nOwnedList, nNeededList, &
+ ownedList, neededList, &
+ sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: nOwnedList, nNeededList
+ integer, dimension(nOwnedList), intent(in) :: ownedList
+ integer, dimension(nNeededList), intent(in) :: neededList
+ type (exchange_list), pointer :: sendList
+ type (exchange_list), pointer :: recvList
+
+ integer :: i, j, k, kk
+ integer :: totalSize, nMesgRecv, nMesgSend, recvNeighbor, sendNeighbor, currentProc
+ integer :: numToSend, numToRecv
+ integer, dimension(nOwnedList) :: recipientList
+ integer, dimension(2,nOwnedList) :: ownedListSorted
+ integer, allocatable, dimension(:) :: ownerListIn, ownerListOut
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: mpi_ierr, mpi_rreq, mpi_sreq
+
+#ifdef _MPI
+ allocate(sendList)
+ allocate(recvList)
+ nullify(sendList % next)
+ nullify(recvList % next)
+ sendListPtr => sendList
+ recvListPtr => recvList
+
+ do i=1,nOwnedList
+ ownedListSorted(1,i) = ownedList(i)
+ ownedListSorted(2,i) = i
+ end do
+ call quicksort(nOwnedList, ownedListSorted)
+
+ call MPI_Allreduce(nNeededList, totalSize, 1, MPI_INTEGERKIND, MPI_MAX, dminfo % comm, mpi_ierr)
+
+ allocate(ownerListIn(totalSize))
+ allocate(ownerListOut(totalSize))
+
+ nMesgRecv = nNeededList
+ ownerListIn(1:nNeededList) = neededList(1:nNeededList)
+
+ recvNeighbor = mod(dminfo % my_proc_id + dminfo % nprocs - 1, dminfo % nprocs)
+ sendNeighbor = mod(dminfo % my_proc_id + 1, dminfo % nprocs)
+
+ do i=1, dminfo % nprocs
+
+ recipientList(:) = -1
+ numToSend = 0
+
+ currentProc = mod(dminfo % my_proc_id + dminfo % nprocs - i + 1, dminfo % nprocs)
+ do j=1,nMesgRecv
+ if (ownerListIn(j) > 0) then
+ k = mpas_binary_search(ownedListSorted, 2, 1, nOwnedList, ownerListIn(j))
+ if (k <= nOwnedList) then
+ ownerListOut(j) = -1 * dminfo % my_proc_id
+ numToSend = numToSend + 1
+ recipientList(ownedListSorted(2,k)) = numToSend
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ else
+ ownerListOut(j) = ownerListIn(j)
+ end if
+ end do
+
+ if (numToSend > 0) then
+ allocate(sendListPtr % next)
+ sendListPtr => sendListPtr % next
+ sendListPtr % procID = currentProc
+ sendListPtr % nlist = numToSend
+ allocate(sendListPtr % list(numToSend))
+ nullify(sendListPtr % next)
+ kk = 1
+ do j=1,nOwnedList
+ if (recipientList(j) /= -1) then
+ sendListPtr % list(recipientList(j)) = j
+ kk = kk + 1
+ end if
+ end do
+ end if
+
+ nMesgSend = nMesgRecv
+ call MPI_Irecv(nMesgRecv, 1, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(nMesgSend, 1, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Irecv(ownerListIn, nMesgRecv, MPI_INTEGERKIND, recvNeighbor, i, dminfo % comm, mpi_rreq, mpi_ierr)
+ call MPI_Isend(ownerListOut, nMesgSend, MPI_INTEGERKIND, sendNeighbor, i, dminfo % comm, mpi_sreq, mpi_ierr)
+ call MPI_Wait(mpi_rreq, MPI_STATUS_IGNORE, mpi_ierr)
+ call MPI_Wait(mpi_sreq, MPI_STATUS_IGNORE, mpi_ierr)
+ end do
+
+ do i=0, dminfo % nprocs - 1
+
+ numToRecv = 0
+ do j=1,nNeededList
+ if (ownerListIn(j) == -i) numToRecv = numToRecv + 1
+ end do
+ if (numToRecv > 0) then
+ allocate(recvListPtr % next)
+ recvListPtr => recvListPtr % next
+ recvListPtr % procID = i
+ recvListPtr % nlist = numToRecv
+ allocate(recvListPtr % list(numToRecv))
+ nullify(recvListPtr % next)
+ kk = 1
+ do j=1,nNeededList
+ if (ownerListIn(j) == -i) then
+ recvListPtr % list(kk) = j
+ kk = kk + 1
+ end if
+ end do
+ end if
+
+ end do
+
+ deallocate(ownerListIn)
+ deallocate(ownerListOut)
+
+ sendListPtr => sendList
+ sendList => sendList % next
+ deallocate(sendListPtr)
+
+ recvListPtr => recvList
+ recvList => recvList % next
+ deallocate(recvListPtr)
+
+#else
+ allocate(recvList)
+ recvList % procID = dminfo % my_proc_id
+ recvList % nlist = nNeededList
+ allocate(recvList % list(nNeededList))
+ nullify(recvList % next)
+ do j=1,nNeededList
+ recvList % list(j) = j
+ end do
+
+ allocate(sendList)
+ sendList % procID = dminfo % my_proc_id
+ sendList % nlist = nOwnedList
+ allocate(sendList % list(nOwnedList))
+ nullify(sendList % next)
+ do j=1,nOwnedList
+ sendList % list(j) = j
+ end do
+#endif
+
+ end subroutine mpas_dmpar_get_owner_list
+
+
+ subroutine mpas_dmpar_alltoall_field1d_integer(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, dimension(*), intent(in) :: arrayIn
+ integer, dimension(*), intent(inout) :: arrayOut
+ integer, intent(in) :: nOwnedList, nNeededList
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
+
+#ifdef _MPI
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID == dminfo % my_proc_id) exit
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID == dminfo % my_proc_id) exit
+ recvListPtr => recvListPtr % next
+ end do
+
+ if (associated(recvListPtr) .and. associated(sendListPtr)) then
+ do i=1,recvListPtr % nlist
+ arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ end do
+ end if
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ allocate(recvListPtr % ibuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ allocate(sendListPtr % ibuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_integer(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_integer(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#else
+ if (nOwnedList /= nNeededList) then
+ write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
+ 'arrayIn and arrayOut dims must match.'
+ call mpas_dmpar_abort(dminfo)
+ else
+ arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
+ end if
+#endif
+
+ end subroutine mpas_dmpar_alltoall_field1d_integer
+
+
+ subroutine mpas_dmpar_alltoall_field2d_integer(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, nOwnedList, nNeededList
+ integer, dimension(dim1,*), intent(in) :: arrayIn
+ integer, dimension(dim1,*), intent(inout) :: arrayOut
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i, d2
+
+#ifdef _MPI
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID == dminfo % my_proc_id) exit
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID == dminfo % my_proc_id) exit
+ recvListPtr => recvListPtr % next
+ end do
+
+ if (associated(recvListPtr) .and. associated(sendListPtr)) then
+ do i=1,recvListPtr % nlist
+ arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ end do
+ end if
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * recvListPtr % nlist
+ allocate(recvListPtr % ibuffer(d2))
+ call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * sendListPtr % nlist
+ allocate(sendListPtr % ibuffer(d2))
+ call mpas_pack_send_buf2d_integer(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d2 = dim1 * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_integer(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#else
+ if (nOwnedList /= nNeededList) then
+ write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
+ 'arrayIn and arrayOut dims must match.'
+ call mpas_dmpar_abort(dminfo)
+ else
+ arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
+ end if
+#endif
+
+ end subroutine mpas_dmpar_alltoall_field2d_integer
+
+
+ subroutine mpas_dmpar_alltoall_field1d_real(dminfo, arrayIn, arrayOut, nOwnedList, nNeededList, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ real (kind=RKIND), dimension(*), intent(in) :: arrayIn
+ real (kind=RKIND), dimension(*), intent(inout) :: arrayOut
+ integer, intent(in) :: nOwnedList, nNeededList
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i
+
+#ifdef _MPI
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID == dminfo % my_proc_id) exit
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID == dminfo % my_proc_id) exit
+ recvListPtr => recvListPtr % next
+ end do
+
+ if (associated(recvListPtr) .and. associated(sendListPtr)) then
+ do i=1,recvListPtr % nlist
+ arrayOut(recvListPtr % list(i)) = arrayIn(sendListPtr % list(i))
+ end do
+ end if
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ allocate(recvListPtr % rbuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ allocate(sendListPtr % rbuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_real(nOwnedList, arrayIn, sendListPtr, 1, sendListPtr % nlist, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_real(nNeededList, arrayOut, recvListPtr, 1, recvListPtr % nlist, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#else
+ if (nOwnedList /= nNeededList) then
+ write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
+ 'arrayIn and arrayOut dims must match.'
+ call mpas_dmpar_abort(dminfo)
+ else
+ arrayOut(1:nNeededList) = arrayIn(1:nOwnedList)
+ end if
+#endif
+
+ end subroutine mpas_dmpar_alltoall_field1d_real
+
+
+ subroutine mpas_dmpar_alltoall_field2d_real(dminfo, arrayIn, arrayOut, dim1, nOwnedList, nNeededList, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, nOwnedList, nNeededList
+ real (kind=RKIND), dimension(dim1,*), intent(in) :: arrayIn
+ real (kind=RKIND), dimension(dim1,*), intent(inout) :: arrayOut
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i, d2
+
+#ifdef _MPI
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID == dminfo % my_proc_id) exit
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID == dminfo % my_proc_id) exit
+ recvListPtr => recvListPtr % next
+ end do
+
+ if (associated(recvListPtr) .and. associated(sendListPtr)) then
+ do i=1,recvListPtr % nlist
+ arrayOut(:,recvListPtr % list(i)) = arrayIn(:,sendListPtr % list(i))
+ end do
+ end if
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * recvListPtr % nlist
+ allocate(recvListPtr % rbuffer(d2))
+ call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * sendListPtr % nlist
+ allocate(sendListPtr % rbuffer(d2))
+ call mpas_pack_send_buf2d_real(1, dim1, nOwnedList, arrayIn, sendListPtr, 1, d2, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d2 = dim1 * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_real(1, dim1, nNeededList, arrayOut, recvListPtr, 1, d2, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#else
+ if (nOwnedList /= nNeededList) then
+ write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
+ 'arrayIn and arrayOut dims must match.'
+ call mpas_dmpar_abort(dminfo)
+ else
+ arrayOut(:,1:nNeededList) = arrayIn(:,1:nOwnedList)
+ end if
+#endif
+
+ end subroutine mpas_dmpar_alltoall_field2d_real
+
+
+ subroutine mpas_dmpar_alltoall_field3d_real(dminfo, arrayIn, arrayOut, dim1, dim2, nOwnedList, nNeededList, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, dim2, nOwnedList, nNeededList
+ real (kind=RKIND), dimension(dim1,dim2,*), intent(in) :: arrayIn
+ real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: arrayOut
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: i, d3
+
+#ifdef _MPI
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID == dminfo % my_proc_id) exit
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID == dminfo % my_proc_id) exit
+ recvListPtr => recvListPtr % next
+ end do
+
+ if (associated(recvListPtr) .and. associated(sendListPtr)) then
+ do i=1,recvListPtr % nlist
+ arrayOut(:,:,recvListPtr % list(i)) = arrayIn(:,:,sendListPtr % list(i))
+ end do
+ end if
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ allocate(recvListPtr % rbuffer(d3))
+ call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * sendListPtr % nlist
+ allocate(sendListPtr % rbuffer(d3))
+ call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, nOwnedList, arrayIn, sendListPtr, 1, d3, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, nNeededList, arrayOut, recvListPtr, 1, d3, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#else
+ if (nOwnedList /= nNeededList) then
+ write(0,*) 'Error in dmpar_alltoall_field: For non-dmpar, ', &
+ 'arrayIn and arrayOut dims must match.'
+ call mpas_dmpar_abort(dminfo)
+ else
+ arrayOut(:,:,1:nNeededList) = arrayIn(:,:,1:nOwnedList)
+ end if
+#endif
+
+ end subroutine mpas_dmpar_alltoall_field3d_real
+
+
+ subroutine mpas_pack_send_buf1d_integer(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: nField, nBuffer, startPackIdx
+ integer, dimension(*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ integer, dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + 1
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - 1
+ lastPackedIdx = i - 1
+ return
+ end if
+ buffer(nPacked) = field(sendList % list(i))
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf1d_integer
+
+
+ subroutine mpas_pack_send_buf2d_integer(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
+ integer, dimension(ds:de,*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ integer, dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i, n
+
+ n = de-ds+1
+
+ if (n > nBuffer) then
+ write(0,*) 'packSendBuf2dInteger: Not enough space in buffer', &
+ ' to fit a single slice.'
+ return
+ end if
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + n
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - n
+ lastPackedIdx = i - 1
+ return
+ end if
+ buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf2d_integer
+
+
+ subroutine mpas_pack_send_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
+ integer, dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ integer, dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i, j, k, n
+
+ n = (d1e-d1s+1) * (d2e-d2s+1)
+
+ if (n > nBuffer) then
+ write(0,*) 'packSendBuf3dInteger: Not enough space in buffer', &
+ ' to fit a single slice.'
+ return
+ end if
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + n
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - n
+ lastPackedIdx = i - 1
+ return
+ end if
+ k = nPacked-n+1
+ do j=d2s,d2e
+ buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
+ k = k + d1e-d1s+1
+ end do
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf3d_integer
+
+
+ subroutine mpas_pack_send_buf1d_real(nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: nField, nBuffer, startPackIdx
+ real (kind=RKIND), dimension(*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + 1
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - 1
+ lastPackedIdx = i - 1
+ return
+ end if
+ buffer(nPacked) = field(sendList % list(i))
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf1d_real
+
+
+ subroutine mpas_pack_send_buf2d_real(ds, de, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: ds, de, nField, nBuffer, startPackIdx
+ real (kind=RKIND), dimension(ds:de,*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i, n
+
+ n = de-ds+1
+
+ if (n > nBuffer) then
+ write(0,*) 'packSendBuf2dReal: Not enough space in buffer', &
+ ' to fit a single slice.'
+ return
+ end if
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + n
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - n
+ lastPackedIdx = i - 1
+ return
+ end if
+ buffer(nPacked-n+1:nPacked) = field(ds:de,sendList % list(i))
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf2d_real
+
+
+ subroutine mpas_pack_send_buf3d_real(d1s, d1e, d2s, d2e, nField, field, sendList, startPackIdx, nBuffer, buffer, nPacked, lastPackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startPackIdx
+ real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(in) :: field
+ type (exchange_list), intent(in) :: sendList
+ real (kind=RKIND), dimension(nBuffer), intent(out) :: buffer
+ integer, intent(inout) :: nPacked, lastPackedIdx
+
+ integer :: i, j, k, n
+
+ n = (d1e-d1s+1) * (d2e-d2s+1)
+
+ if (n > nBuffer) then
+ write(0,*) 'packSendBuf3dReal: Not enough space in buffer', &
+ ' to fit a single slice.'
+ return
+ end if
+
+ nPacked = 0
+ do i=startPackIdx, sendList % nlist
+ nPacked = nPacked + n
+ if (nPacked > nBuffer) then
+ nPacked = nPacked - n
+ lastPackedIdx = i - 1
+ return
+ end if
+ k = nPacked-n+1
+ do j=d2s,d2e
+ buffer(k:k+d1e-d1s) = field(d1s:d1e,j,sendList % list(i))
+ k = k + d1e-d1s+1
+ end do
+ end do
+ lastPackedIdx = sendList % nlist
+
+ end subroutine mpas_pack_send_buf3d_real
+
+
+ subroutine mpas_unpack_recv_buf1d_integer(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: nField, nBuffer, startUnpackIdx
+ integer, dimension(*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ integer, dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + 1
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - 1
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ field(recvList % list(i)) = buffer(nUnpacked)
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf1d_integer
+
+
+ subroutine mpas_unpack_recv_buf2d_integer(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
+ integer, dimension(ds:de,*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ integer, dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i, n
+
+ n = de-ds+1
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + n
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - n
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf2d_integer
+
+
+ subroutine mpas_unpack_recv_buf3d_integer(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
+ nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
+ integer, dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ integer, dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i, j, k, n
+
+ n = (d1e-d1s+1) * (d2e-d2s+1)
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + n
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - n
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ k = nUnpacked-n+1
+ do j=d2s,d2e
+ field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
+ k = k + d1e-d1s+1
+ end do
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf3d_integer
+
+
+ subroutine mpas_dmpar_exch_halo_field1d_integer(dminfo, array, dim1, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1
+ integer, dimension(*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ allocate(recvListPtr % ibuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % ibuffer, recvListPtr % nlist, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ allocate(sendListPtr % ibuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_integer(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, sendListPtr % nlist, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_integer(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field1d_integer
+
+
+ subroutine mpas_dmpar_exch_halo_field2d_integer(dminfo, array, dim1, dim2, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, dim2
+ integer, dimension(dim1,*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: d2
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * recvListPtr % nlist
+ allocate(recvListPtr % ibuffer(d2))
+ call MPI_Irecv(recvListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * sendListPtr % nlist
+ allocate(sendListPtr % ibuffer(d2))
+ call mpas_pack_send_buf2d_integer(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, d2, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d2 = dim1 * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_integer(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field2d_integer
+
+
+ subroutine mpas_dmpar_exch_halo_field3d_integer(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, dim2, dim3
+ integer, dimension(dim1,dim2,*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: d3
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ allocate(recvListPtr % ibuffer(d3))
+ call MPI_Irecv(recvListPtr % ibuffer, d3, MPI_INTEGERKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * sendListPtr % nlist
+ allocate(sendListPtr % ibuffer(d3))
+ call mpas_pack_send_buf3d_integer(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ sendListPtr % ibuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % ibuffer, d3, MPI_INTEGERKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_integer(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ recvListPtr % ibuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % ibuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % ibuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field3d_integer
+
+
+ subroutine mpas_unpack_recv_buf1d_real(nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: nField, nBuffer, startUnpackIdx
+ real (kind=RKIND), dimension(*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + 1
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - 1
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ field(recvList % list(i)) = buffer(nUnpacked)
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf1d_real
+
+
+ subroutine mpas_unpack_recv_buf2d_real(ds, de, nField, field, recvList, startUnpackIdx, nBuffer, buffer, nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: ds, de, nField, nBuffer, startUnpackIdx
+ real (kind=RKIND), dimension(ds:de,*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i, n
+
+ n = de-ds+1
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + n
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - n
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ field(ds:de,recvList % list(i)) = buffer(nUnpacked-n+1:nUnpacked)
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf2d_real
+
+
+ subroutine mpas_unpack_recv_buf3d_real(d1s, d1e, d2s, d2e, nField, field, recvList, startUnpackIdx, nBuffer, buffer, &
+ nUnpacked, lastUnpackedIdx)
+
+ implicit none
+
+ integer, intent(in) :: d1s, d1e, d2s, d2e, nField, nBuffer, startUnpackIdx
+ real (kind=RKIND), dimension(d1s:d1e,d2s:d2e,*), intent(inout) :: field
+ type (exchange_list), intent(in) :: recvList
+ real (kind=RKIND), dimension(nBuffer), intent(in) :: buffer
+ integer, intent(inout) :: nUnpacked, lastUnpackedIdx
+
+ integer :: i, j, k, n
+
+ n = (d1e-d1s+1) * (d2e-d2s+1)
+
+ nUnpacked = 0
+ do i=startUnpackIdx, recvList % nlist
+ nUnpacked = nUnpacked + n
+ if (nUnpacked > nBuffer) then
+ nUnpacked = nUnpacked - n
+ lastUnpackedIdx = i - 1
+ return
+ end if
+ k = nUnpacked-n+1
+ do j=d2s,d2e
+ field(d1s:d1e,j,recvList % list(i)) = buffer(k:k+d1e-d1s)
+ k = k + d1e-d1s+1
+ end do
+ end do
+ lastUnpackedIdx = recvList % nlist
+
+ end subroutine mpas_unpack_recv_buf3d_real
+
+
+ subroutine mpas_dmpar_exch_halo_field1d_real(dminfo, array, dim1, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1
+ real (kind=RKIND), dimension(*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ allocate(recvListPtr % rbuffer(recvListPtr % nlist))
+ call MPI_Irecv(recvListPtr % rbuffer, recvListPtr % nlist, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ allocate(sendListPtr % rbuffer(sendListPtr % nlist))
+ call mpas_pack_send_buf1d_real(dim1, array, sendListPtr, 1, sendListPtr % nlist, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, sendListPtr % nlist, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ call mpas_unpack_recv_buf1d_real(dim1, array, recvListPtr, 1, recvListPtr % nlist, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field1d_real
+
+
+ subroutine mpas_dmpar_exch_halo_field2d_real(dminfo, array, dim1, dim2, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, dim2
+ real (kind=RKIND), dimension(dim1,*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: d2
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * recvListPtr % nlist
+ allocate(recvListPtr % rbuffer(d2))
+ call MPI_Irecv(recvListPtr % rbuffer, d2, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d2 = dim1 * sendListPtr % nlist
+ allocate(sendListPtr % rbuffer(d2))
+ call mpas_pack_send_buf2d_real(1, dim1, dim2, array, sendListPtr, 1, d2, sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, d2, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d2 = dim1 * recvListPtr % nlist
+ call mpas_unpack_recv_buf2d_real(1, dim1, dim2, array, recvListPtr, 1, d2, recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field2d_real
+
+
+ subroutine mpas_dmpar_exch_halo_field3d_real(dminfo, array, dim1, dim2, dim3, sendList, recvList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ integer, intent(in) :: dim1, dim2, dim3
+ real (kind=RKIND), dimension(dim1,dim2,*), intent(inout) :: array
+ type (exchange_list), pointer :: sendList, recvList
+
+ type (exchange_list), pointer :: sendListPtr, recvListPtr
+ integer :: lastPackedIdx, lastUnpackedIdx, nPacked, nUnpacked
+ integer :: mpi_ierr
+ integer :: d3
+
+#ifdef _MPI
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ allocate(recvListPtr % rbuffer(d3))
+ call MPI_Irecv(recvListPtr % rbuffer, d3, MPI_REALKIND, &
+ recvListPtr % procID, recvListPtr % procID, dminfo % comm, recvListPtr % reqID, mpi_ierr)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ d3 = dim1 * dim2 * sendListPtr % nlist
+ allocate(sendListPtr % rbuffer(d3))
+ call mpas_pack_send_buf3d_real(1, dim1, 1, dim2, dim3, array, sendListPtr, 1, d3, &
+ sendListPtr % rbuffer, nPacked, lastPackedIdx)
+ call MPI_Isend(sendListPtr % rbuffer, d3, MPI_REALKIND, &
+ sendListPtr % procID, dminfo % my_proc_id, dminfo % comm, sendListPtr % reqID, mpi_ierr)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+ recvListPtr => recvList
+ do while (associated(recvListPtr))
+ if (recvListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(recvListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ d3 = dim1 * dim2 * recvListPtr % nlist
+ call mpas_unpack_recv_buf3d_real(1, dim1, 1, dim2, dim3, array, recvListPtr, 1, d3, &
+ recvListPtr % rbuffer, nUnpacked, lastUnpackedIdx)
+ deallocate(recvListPtr % rbuffer)
+ end if
+ recvListPtr => recvListPtr % next
+ end do
+
+ sendListPtr => sendList
+ do while (associated(sendListPtr))
+ if (sendListPtr % procID /= dminfo % my_proc_id) then
+ call MPI_Wait(sendListPtr % reqID, MPI_STATUS_IGNORE, mpi_ierr)
+ deallocate(sendListPtr % rbuffer)
+ end if
+ sendListPtr => sendListPtr % next
+ end do
+
+#endif
+
+ end subroutine mpas_dmpar_exch_halo_field3d_real
+
+
+end module mpas_dmpar
Copied: branches/source_renaming/src/framework/mpas_framework.F (from rev 1089, branches/source_renaming/src/framework/module_mpas_framework.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_framework.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_framework.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,49 @@
+module mpas_framework
+
+ use mpas_dmpar
+ use mpas_grid_types
+ use mpas_io_input
+ use mpas_io_output
+ use mpas_configure
+ use mpas_timer
+ use mpas_timekeeping
+
+
+ contains
+
+
+ subroutine mpas_framework_init(dminfo, domain)
+
+ implicit none
+
+ type (dm_info), pointer :: dminfo
+ type (domain_type), pointer :: domain
+
+ allocate(dminfo)
+ call mpas_dmpar_init(dminfo)
+
+ call mpas_read_namelist(dminfo)
+
+ call mpas_allocate_domain(domain, dminfo)
+
+ call mpas_timekeeping_init(config_calendar_type)
+
+ end subroutine mpas_framework_init
+
+
+ subroutine mpas_framework_finalize(dminfo, domain)
+
+ implicit none
+
+ type (dm_info), pointer :: dminfo
+ type (domain_type), pointer :: domain
+
+ call mpas_deallocate_domain(domain)
+
+ call mpas_dmpar_finalize(dminfo)
+
+ call mpas_timekeeping_finalize()
+
+ end subroutine mpas_framework_finalize
+
+end module mpas_framework
Copied: branches/source_renaming/src/framework/mpas_grid_types.F (from rev 1089, branches/source_renaming/src/framework/module_grid_types.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_grid_types.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_grid_types.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,219 @@
+module mpas_grid_types
+
+ use mpas_dmpar
+
+ integer, parameter :: nTimeLevs = 2
+
+
+ ! Derived type describing info for doing I/O specific to a field
+ type io_info
+ character (len=1024) :: fieldName
+ integer, dimension(4) :: start
+ integer, dimension(4) :: count
+ logical :: input
+ logical :: sfc
+ logical :: restart
+ logical :: output
+ end type io_info
+
+
+ ! Derived type for storing fields
+ type field3DReal
+ type (block_type), pointer :: block
+ real (kind=RKIND), dimension(:,:,:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field3DReal
+
+
+ ! Derived type for storing fields
+ type field2DReal
+ type (block_type), pointer :: block
+ real (kind=RKIND), dimension(:,:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field2DReal
+
+
+ ! Derived type for storing fields
+ type field1DReal
+ type (block_type), pointer :: block
+ real (kind=RKIND), dimension(:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field1DReal
+
+
+ ! Derived type for storing fields
+ type field0DReal
+ type (block_type), pointer :: block
+ real (kind=RKIND) :: scalar
+ type (io_info), pointer :: ioinfo
+ end type field0DReal
+
+
+ ! Derived type for storing fields
+ type field2DInteger
+ type (block_type), pointer :: block
+ integer, dimension(:,:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field2DInteger
+
+
+ ! Derived type for storing fields
+ type field1DInteger
+ type (block_type), pointer :: block
+ integer, dimension(:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field1DInteger
+
+
+ ! Derived type for storing fields
+ type field1DChar
+ type (block_type), pointer :: block
+ character (len=64), dimension(:), pointer :: array
+ type (io_info), pointer :: ioinfo
+ end type field1DChar
+
+
+ ! Derived type for storing fields
+ type field0DChar
+ type (block_type), pointer :: block
+ character (len=64) :: scalar
+ type (io_info), pointer :: ioinfo
+ end type field0DChar
+
+
+ ! Derived type for storing grid meta-data
+ type mesh_type
+
+#include "field_dimensions.inc"
+
+ logical :: on_a_sphere
+ real (kind=RKIND) :: sphere_radius
+
+#include "time_invariant_fields.inc"
+
+ end type mesh_type
+
+
+#include "variable_groups.inc"
+
+
+ ! Type for storing (possibly architecture specific) information concerning to parallelism
+ type parallel_info
+ type (exchange_list), pointer :: cellsToSend ! List of types describing which cells to send to other blocks
+ type (exchange_list), pointer :: cellsToRecv ! List of types describing which cells to receive from other blocks
+ type (exchange_list), pointer :: edgesToSend ! List of types describing which edges to send to other blocks
+ type (exchange_list), pointer :: edgesToRecv ! List of types describing which edges to receive from other blocks
+ type (exchange_list), pointer :: verticesToSend ! List of types describing which vertices to send to other blocks
+ type (exchange_list), pointer :: verticesToRecv ! List of types describing which vertices to receive from other blocks
+ end type parallel_info
+
+
+ ! Derived type for storing part of a domain; used as a basic unit of work for a process
+ type block_type
+
+#include "block_group_members.inc"
+
+ type (domain_type), pointer :: domain
+
+ type (parallel_info), pointer :: parinfo
+
+ type (block_type), pointer :: prev, next
+ end type block_type
+
+
+ ! Derived type for storing list of blocks from a domain to be handled by a process
+ type domain_type
+ type (block_type), pointer :: blocklist
+
+ ! Also store parallelization info here
+ type (dm_info), pointer :: dminfo
+ end type domain_type
+
+
+ contains
+
+
+ subroutine mpas_allocate_domain(dom, dminfo)
+
+ implicit none
+
+ type (domain_type), pointer :: dom
+ type (dm_info), pointer :: dminfo
+
+ allocate(dom)
+ nullify(dom % blocklist)
+ dom % dminfo => dminfo
+
+ end subroutine mpas_allocate_domain
+
+
+ subroutine mpas_allocate_block(b, dom, &
+#include "dim_dummy_args.inc"
+ )
+
+ implicit none
+
+ type (block_type), pointer :: b
+ type (domain_type), pointer :: dom
+#include "dim_dummy_decls.inc"
+
+ integer :: i
+
+ nullify(b % prev)
+ nullify(b % next)
+
+ allocate(b % parinfo)
+
+ b % domain => dom
+
+#include "block_allocs.inc"
+
+ end subroutine mpas_allocate_block
+
+
+#include "group_alloc_routines.inc"
+
+
+ subroutine mpas_deallocate_domain(dom)
+
+ implicit none
+
+ type (domain_type), pointer :: dom
+
+ type (block_type), pointer :: block_ptr
+
+ block_ptr => dom % blocklist
+ do while (associated(block_ptr))
+ call mpas_deallocate_block(block_ptr)
+ block_ptr => block_ptr % next
+ end do
+
+ deallocate(dom)
+
+ end subroutine mpas_deallocate_domain
+
+
+ subroutine mpas_deallocate_block(b)
+
+ implicit none
+
+ type (block_type), intent(inout) :: b
+
+ integer :: i
+
+ deallocate(b % parinfo)
+
+#include "block_deallocs.inc"
+
+ end subroutine mpas_deallocate_block
+
+
+#include "group_dealloc_routines.inc"
+
+
+#include "group_copy_routines.inc"
+
+
+#include "group_shift_level_routines.inc"
+
+end module mpas_grid_types
Copied: branches/source_renaming/src/framework/mpas_hash.F (from rev 1089, branches/source_renaming/src/framework/module_hash.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_hash.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_hash.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,175 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+! MODULE HASH
+!
+! Purpose: This module provides a dictionary/hashtable with insert, search, and
+! remove routines.
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+module mpas_hash
+
+ ! Parameters
+ integer, parameter :: TABLESIZE=27183 ! Number of spaces in the table (the
+ ! number of linked lists)
+
+ type hashnode
+ integer :: key
+ type (hashnode), pointer :: next
+ end type hashnode
+
+ type hashnode_ptr
+ type (hashnode), pointer :: p ! Pointer to a list of entries
+ end type hashnode_ptr
+
+ type hashtable
+ integer :: size
+ type (hashnode_ptr), dimension(TABLESIZE) :: table ! The hashtable array
+ end type hashtable
+
+
+ contains
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Name: hash_init
+ !
+ ! Purpose: To initialize a hashtable
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_hash_init(h)
+
+ implicit none
+
+ ! Arguments
+ type (hashtable), intent(inout) :: h
+
+ ! Local variables
+ integer :: i
+
+ h%size = 0
+
+ do i=1,TABLESIZE
+ nullify(h%table(i)%p)
+ end do
+
+ end subroutine mpas_hash_init
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Name: hash_insert
+ !
+ ! Purpose: Given a hashtable h and a key to be inserted into the hashtable,
+ ! this routine adds key to the table.
+ !
+ ! NOTE: If the key already exists in the table, a second copy of the
+ ! key is added to the table
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_hash_insert(h, key)
+
+ implicit none
+
+ ! Arguments
+ integer, intent(in) :: key
+ type (hashtable), intent(inout) :: h
+
+ ! Local variables
+ integer :: hashval, i
+ type (hashnode), pointer :: hn
+
+ hashval = mod(key, TABLESIZE) + 1
+
+ allocate(hn)
+ hn%key = key
+ hn%next => h%table(hashval)%p
+ h%table(hashval)%p => hn
+
+ h%size = h%size + 1
+
+ end subroutine mpas_hash_insert
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Name: hash_search
+ !
+ ! Purpose: This function returns TRUE if the specified key was found in the
+ ! hashtable h, and FALSE otherwise.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ logical function mpas_hash_search(h, key)
+
+ implicit none
+
+ ! Arguments
+ integer, intent(in) :: key
+ type (hashtable), intent(inout) :: h
+
+ ! Local variables
+ integer :: hashval, i
+ type (hashnode), pointer :: cursor
+
+ mpas_hash_search = .false.
+
+ hashval = mod(key, TABLESIZE) + 1
+
+ cursor => h%table(hashval)%p
+ do while(associated(cursor))
+ if (cursor%key == key) then
+ mpas_hash_search = .true.
+ return
+ else
+ cursor => cursor%next
+ end if
+ end do
+
+ return
+
+ end function mpas_hash_search
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Name: hash_size
+ !
+ ! Purpose: Returns the number of items in the hash table h.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer function mpas_hash_size(h)
+
+ implicit none
+
+ ! Arguments
+ type (hashtable) :: h
+
+ mpas_hash_size = h%size
+
+ return
+
+ end function mpas_hash_size
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Name: hash_destroy
+ !
+ ! Purpose: Frees all memory associated with hashtable h. This routine may be
+ ! used to remove all entries from a hashtable.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_hash_destroy(h)
+
+ implicit none
+
+ ! Arguments
+ type (hashtable), intent(inout) :: h
+
+ ! Local variables
+ integer :: i
+ type (hashnode), pointer :: cursor, cursor_prev
+
+ do i=1,TABLESIZE
+ cursor => h%table(i)%p
+ do while(associated(cursor))
+ cursor_prev => cursor
+ cursor => cursor%next
+ deallocate(cursor_prev)
+ end do
+ nullify(h%table(i)%p)
+ end do
+
+ h%size = 0
+
+ end subroutine mpas_hash_destroy
+
+end module mpas_hash
Copied: branches/source_renaming/src/framework/mpas_io_input.F (from rev 1089, branches/source_renaming/src/framework/module_io_input.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_io_input.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_io_input.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,1614 @@
+module mpas_io_input
+
+ use mpas_grid_types
+ use mpas_dmpar
+ use mpas_block_decomp
+ use mpas_sort
+ use mpas_configure
+ use mpas_timekeeping
+
+
+#ifdef HAVE_ZOLTAN
+ use mpas_zoltan_interface
+#endif
+
+ integer, parameter :: STREAM_INPUT=1, STREAM_SFC=2, STREAM_RESTART=3
+
+ type io_input_object
+ character (len=1024) :: filename
+ integer :: rd_ncid
+ integer :: stream
+
+ integer :: time
+
+#include "io_input_obj_decls.inc"
+ end type io_input_object
+
+
+ interface mpas_io_input_field
+ module procedure mpas_io_input_field0d_real
+ module procedure mpas_io_input_field1d_real
+ module procedure mpas_io_input_field2d_real
+ module procedure mpas_io_input_field3d_real
+ module procedure mpas_io_input_field1d_integer
+ module procedure mpas_io_input_field2d_integer
+ module procedure mpas_io_input_field0d_char
+ module procedure mpas_io_input_field1d_char
+ end interface mpas_io_input_field
+
+ interface mpas_io_input_field_time
+ module procedure mpas_io_input_field0d_real_time
+ module procedure mpas_io_input_field1d_real_time
+ module procedure mpas_io_input_field2d_real_time
+ module procedure mpas_io_input_field3d_real_time
+ module procedure mpas_io_input_field1d_integer_time
+ module procedure mpas_io_input_field0d_char_time
+ module procedure mpas_io_input_field1d_char_time
+ end interface mpas_io_input_field_time
+
+ type (exchange_list), pointer :: sendCellList, recvCellList
+ type (exchange_list), pointer :: sendEdgeList, recvEdgeList
+ type (exchange_list), pointer :: sendVertexList, recvVertexList
+ type (exchange_list), pointer :: sendVertLevelList, recvVertLevelList
+
+ integer :: readCellStart, readCellEnd, nReadCells
+ integer :: readEdgeStart, readEdgeEnd, nReadEdges
+ integer :: readVertexStart, readVertexEnd, nReadVertices
+ integer :: readVertLevelStart, readVertLevelEnd, nReadVertLevels
+
+
+ contains
+
+
+ subroutine mpas_input_state_for_domain(domain)
+
+ implicit none
+
+ type (domain_type), pointer :: domain
+
+ integer :: i, j, k
+ type (io_input_object) :: input_obj
+#include "dim_decls.inc"
+
+ character (len=16) :: c_on_a_sphere
+ real (kind=RKIND) :: r_sphere_radius
+
+ type (field1dInteger) :: indexToCellIDField
+ type (field1dInteger) :: indexToEdgeIDField
+ type (field1dInteger) :: indexToVertexIDField
+ type (field1dInteger) :: nEdgesOnCellField
+ type (field2dInteger) :: cellsOnCellField
+ type (field2dInteger) :: edgesOnCellField
+ type (field2dInteger) :: verticesOnCellField
+ type (field2dInteger) :: cellsOnEdgeField
+ type (field2dInteger) :: cellsOnVertexField
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ type (field1dReal) :: xCellField, yCellField, zCellField
+ type (field1dReal) :: xEdgeField, yEdgeField, zEdgeField
+ type (field1dReal) :: xVertexField, yVertexField, zVertexField
+#endif
+#endif
+
+ type (field1DChar) :: xtime
+
+ integer, dimension(:), pointer :: indexToCellID_0Halo
+ integer, dimension(:), pointer :: nEdgesOnCell_0Halo
+ integer, dimension(:,:), pointer :: cellsOnCell_0Halo
+
+ integer, dimension(:,:), pointer :: edgesOnCell_2Halo
+ integer, dimension(:,:), pointer :: verticesOnCell_2Halo
+ integer, dimension(:,:), pointer :: cellsOnEdge_2Halo
+ integer, dimension(:,:), pointer :: cellsOnVertex_2Halo
+
+ integer, dimension(:,:), pointer :: cellIDSorted
+ integer, dimension(:,:), pointer :: edgeIDSorted
+ integer, dimension(:,:), pointer :: vertexIDSorted
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ real (kind=RKIND), dimension(:), pointer :: xCell, yCell, zCell
+ real (kind=RKIND), dimension(:), pointer :: xEdge, yEdge, zEdge
+ real (kind=RKIND), dimension(:), pointer :: xVertex, yVertex, zVertex
+#endif
+#endif
+
+ integer, dimension(:), pointer :: local_cell_list, local_edge_list, local_vertex_list
+ integer, dimension(:), pointer :: local_vertlevel_list, needed_vertlevel_list
+ integer :: nlocal_edges, nlocal_vertices
+ type (exchange_list), pointer :: send1Halo, recv1Halo
+ type (exchange_list), pointer :: send2Halo, recv2Halo
+ type (graph) :: partial_global_graph_info
+ type (graph) :: block_graph_0Halo, block_graph_1Halo, block_graph_2Halo
+ integer :: ghostEdgeStart, ghostVertexStart
+
+ type (MPAS_Time_type) :: startTime
+ type (MPAS_Time_type) :: sliceTime
+ type (MPAS_TimeInterval_type) :: timeDiff
+ type (MPAS_TimeInterval_type) :: minTimeDiff
+ character(len=32) :: timeStamp
+
+ if (config_do_restart) then
+ input_obj % filename = trim(config_restart_name)
+ input_obj % stream = STREAM_RESTART
+ else
+ input_obj % filename = trim(config_input_name)
+ input_obj % stream = STREAM_INPUT
+ end if
+ call mpas_io_input_init(input_obj, domain % dminfo)
+
+
+ !
+ ! Read global number of cells/edges/vertices
+ !
+#include "read_dims.inc"
+
+ !
+ ! Determine the range of cells/edges/vertices that a processor will initially read
+ ! from the input file
+ !
+ call mpas_dmpar_get_index_range(domain % dminfo, 1, nCells, readCellStart, readCellEnd)
+ nReadCells = readCellEnd - readCellStart + 1
+
+ call mpas_dmpar_get_index_range(domain % dminfo, 1, nEdges, readEdgeStart, readEdgeEnd)
+ nReadEdges = readEdgeEnd - readEdgeStart + 1
+
+ call mpas_dmpar_get_index_range(domain % dminfo, 1, nVertices, readVertexStart, readVertexEnd)
+ nReadVertices = readVertexEnd - readVertexStart + 1
+
+ readVertLevelStart = 1
+ readVertLevelEnd = nVertLevels
+ nReadVertLevels = nVertLevels
+
+
+ !
+ ! Allocate and read fields that we will need in order to ultimately work out
+ ! which cells/edges/vertices are owned by each block, and which are ghost
+ !
+
+ ! Global cell indices
+ allocate(indexToCellIDField % ioinfo)
+ indexToCellIDField % ioinfo % fieldName = 'indexToCellID'
+ indexToCellIDField % ioinfo % start(1) = readCellStart
+ indexToCellIDField % ioinfo % count(1) = nReadCells
+ allocate(indexToCellIDField % array(nReadCells))
+ call mpas_io_input_field(input_obj, indexToCellIDField)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Cell x-coordinates (in 3d Cartesian space)
+ allocate(xCellField % ioinfo)
+ xCellField % ioinfo % fieldName = 'xCell'
+ xCellField % ioinfo % start(1) = readCellStart
+ xCellField % ioinfo % count(1) = nReadCells
+ allocate(xCellField % array(nReadCells))
+ call mpas_io_input_field(input_obj, xCellField)
+
+ ! Cell y-coordinates (in 3d Cartesian space)
+ allocate(yCellField % ioinfo)
+ yCellField % ioinfo % fieldName = 'yCell'
+ yCellField % ioinfo % start(1) = readCellStart
+ yCellField % ioinfo % count(1) = nReadCells
+ allocate(yCellField % array(nReadCells))
+ call mpas_io_input_field(input_obj, yCellField)
+
+ ! Cell z-coordinates (in 3d Cartesian space)
+ allocate(zCellField % ioinfo)
+ zCellField % ioinfo % fieldName = 'zCell'
+ zCellField % ioinfo % start(1) = readCellStart
+ zCellField % ioinfo % count(1) = nReadCells
+ allocate(zCellField % array(nReadCells))
+ call mpas_io_input_field(input_obj, zCellField)
+#endif
+#endif
+
+
+ ! Global edge indices
+ allocate(indexToEdgeIDField % ioinfo)
+ indexToEdgeIDField % ioinfo % fieldName = 'indexToEdgeID'
+ indexToEdgeIDField % ioinfo % start(1) = readEdgeStart
+ indexToEdgeIDField % ioinfo % count(1) = nReadEdges
+ allocate(indexToEdgeIDField % array(nReadEdges))
+ call mpas_io_input_field(input_obj, indexToEdgeIDField)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Edge x-coordinates (in 3d Cartesian space)
+ allocate(xEdgeField % ioinfo)
+ xEdgeField % ioinfo % fieldName = 'xEdge'
+ xEdgeField % ioinfo % start(1) = readEdgeStart
+ xEdgeField % ioinfo % count(1) = nReadEdges
+ allocate(xEdgeField % array(nReadEdges))
+ call mpas_io_input_field(input_obj, xEdgeField)
+
+ ! Edge y-coordinates (in 3d Cartesian space)
+ allocate(yEdgeField % ioinfo)
+ yEdgeField % ioinfo % fieldName = 'yEdge'
+ yEdgeField % ioinfo % start(1) = readEdgeStart
+ yEdgeField % ioinfo % count(1) = nReadEdges
+ allocate(yEdgeField % array(nReadEdges))
+ call mpas_io_input_field(input_obj, yEdgeField)
+
+ ! Edge z-coordinates (in 3d Cartesian space)
+ allocate(zEdgeField % ioinfo)
+ zEdgeField % ioinfo % fieldName = 'zEdge'
+ zEdgeField % ioinfo % start(1) = readEdgeStart
+ zEdgeField % ioinfo % count(1) = nReadEdges
+ allocate(zEdgeField % array(nReadEdges))
+ call mpas_io_input_field(input_obj, zEdgeField)
+#endif
+#endif
+
+ ! Global vertex indices
+ allocate(indexToVertexIDField % ioinfo)
+ indexToVertexIDField % ioinfo % fieldName = 'indexToVertexID'
+ indexToVertexIDField % ioinfo % start(1) = readVertexStart
+ indexToVertexIDField % ioinfo % count(1) = nReadVertices
+ allocate(indexToVertexIDField % array(nReadVertices))
+ call mpas_io_input_field(input_obj, indexToVertexIDField)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ ! Vertex x-coordinates (in 3d Cartesian space)
+ allocate(xVertexField % ioinfo)
+ xVertexField % ioinfo % fieldName = 'xVertex'
+ xVertexField % ioinfo % start(1) = readVertexStart
+ xVertexField % ioinfo % count(1) = nReadVertices
+ allocate(xVertexField % array(nReadVertices))
+ call mpas_io_input_field(input_obj, xVertexField)
+
+ ! Vertex y-coordinates (in 3d Cartesian space)
+ allocate(yVertexField % ioinfo)
+ yVertexField % ioinfo % fieldName = 'yVertex'
+ yVertexField % ioinfo % start(1) = readVertexStart
+ yVertexField % ioinfo % count(1) = nReadVertices
+ allocate(yVertexField % array(nReadVertices))
+ call mpas_io_input_field(input_obj, yVertexField)
+
+ ! Vertex z-coordinates (in 3d Cartesian space)
+ allocate(zVertexField % ioinfo)
+ zVertexField % ioinfo % fieldName = 'zVertex'
+ zVertexField % ioinfo % start(1) = readVertexStart
+ zVertexField % ioinfo % count(1) = nReadVertices
+ allocate(zVertexField % array(nReadVertices))
+ call mpas_io_input_field(input_obj, zVertexField)
+#endif
+#endif
+
+ ! Number of cell/edges/vertices adjacent to each cell
+ allocate(nEdgesOnCellField % ioinfo)
+ nEdgesOnCellField % ioinfo % fieldName = 'nEdgesOnCell'
+ nEdgesOnCellField % ioinfo % start(1) = readCellStart
+ nEdgesOnCellField % ioinfo % count(1) = nReadCells
+ allocate(nEdgesOnCellField % array(nReadCells))
+ call mpas_io_input_field(input_obj, nEdgesOnCellField)
+
+ ! Global indices of cells adjacent to each cell
+ allocate(cellsOnCellField % ioinfo)
+ cellsOnCellField % ioinfo % fieldName = 'cellsOnCell'
+ cellsOnCellField % ioinfo % start(1) = 1
+ cellsOnCellField % ioinfo % start(2) = readCellStart
+ cellsOnCellField % ioinfo % count(1) = maxEdges
+ cellsOnCellField % ioinfo % count(2) = nReadCells
+ allocate(cellsOnCellField % array(maxEdges,nReadCells))
+ call mpas_io_input_field(input_obj, cellsOnCellField)
+
+ ! Global indices of edges adjacent to each cell
+ allocate(edgesOnCellField % ioinfo)
+ edgesOnCellField % ioinfo % fieldName = 'edgesOnCell'
+ edgesOnCellField % ioinfo % start(1) = 1
+ edgesOnCellField % ioinfo % start(2) = readCellStart
+ edgesOnCellField % ioinfo % count(1) = maxEdges
+ edgesOnCellField % ioinfo % count(2) = nReadCells
+ allocate(edgesOnCellField % array(maxEdges,nReadCells))
+ call mpas_io_input_field(input_obj, edgesOnCellField)
+
+ ! Global indices of vertices adjacent to each cell
+ allocate(verticesOnCellField % ioinfo)
+ verticesOnCellField % ioinfo % fieldName = 'verticesOnCell'
+ verticesOnCellField % ioinfo % start(1) = 1
+ verticesOnCellField % ioinfo % start(2) = readCellStart
+ verticesOnCellField % ioinfo % count(1) = maxEdges
+ verticesOnCellField % ioinfo % count(2) = nReadCells
+ allocate(verticesOnCellField % array(maxEdges,nReadCells))
+ call mpas_io_input_field(input_obj, verticesOnCellField)
+
+ ! Global indices of cells adjacent to each edge
+ ! used for determining which edges are owned by a block, where
+ ! iEdge is owned iff cellsOnEdge(1,iEdge) is an owned cell
+ allocate(cellsOnEdgeField % ioinfo)
+ cellsOnEdgeField % ioinfo % fieldName = 'cellsOnEdge'
+ cellsOnEdgeField % ioinfo % start(1) = 1
+ cellsOnEdgeField % ioinfo % start(2) = readEdgeStart
+ cellsOnEdgeField % ioinfo % count(1) = 2
+ cellsOnEdgeField % ioinfo % count(2) = nReadEdges
+ allocate(cellsOnEdgeField % array(2,nReadEdges))
+ call mpas_io_input_field(input_obj, cellsOnEdgeField)
+
+ ! Global indices of cells adjacent to each vertex
+ ! used for determining which vertices are owned by a block, where
+ ! iVtx is owned iff cellsOnVertex(1,iVtx) is an owned cell
+ allocate(cellsOnVertexField % ioinfo)
+ cellsOnVertexField % ioinfo % fieldName = 'cellsOnVertex'
+ cellsOnVertexField % ioinfo % start(1) = 1
+ cellsOnVertexField % ioinfo % start(2) = readVertexStart
+ cellsOnVertexField % ioinfo % count(1) = vertexDegree
+ cellsOnVertexField % ioinfo % count(2) = nReadVertices
+ allocate(cellsOnVertexField % array(vertexDegree,nReadVertices))
+ call mpas_io_input_field(input_obj, cellsOnVertexField)
+
+
+ !
+ ! Set up a graph derived data type describing the connectivity for the cells
+ ! that were read by this process
+ ! A partial description is passed to the block decomp module by each process,
+ ! and the block decomp module returns with a list of global cell indices
+ ! that belong to the block on this process
+ !
+ partial_global_graph_info % nVertices = nReadCells
+ partial_global_graph_info % nVerticesTotal = nCells
+ partial_global_graph_info % maxDegree = maxEdges
+ partial_global_graph_info % ghostStart = nVertices+1
+ allocate(partial_global_graph_info % vertexID(nReadCells))
+ allocate(partial_global_graph_info % nAdjacent(nReadCells))
+ allocate(partial_global_graph_info % adjacencyList(maxEdges, nReadCells))
+
+ partial_global_graph_info % vertexID(:) = indexToCellIDField % array(:)
+ partial_global_graph_info % nAdjacent(:) = nEdgesOnCellField % array(:)
+ partial_global_graph_info % adjacencyList(:,:) = cellsOnCellField % array(:,:)
+
+
+ ! TODO: Ensure (by renaming or exchanging) that initial cell range on each proc is contiguous
+ ! This situation may occur when reading a restart file with cells/edges/vertices written
+ ! in a scrambled order
+
+
+ ! Determine which cells are owned by this process
+ call mpas_block_decomp_cells_for_proc(domain % dminfo, partial_global_graph_info, local_cell_list)
+
+ deallocate(partial_global_graph_info % vertexID)
+ deallocate(partial_global_graph_info % nAdjacent)
+ deallocate(partial_global_graph_info % adjacencyList)
+
+
+ allocate(indexToCellID_0Halo(size(local_cell_list)))
+ allocate(nEdgesOnCell_0Halo(size(local_cell_list)))
+ allocate(cellsOnCell_0Halo(maxEdges, size(local_cell_list)))
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ allocate(xCell(size(local_cell_list)))
+ allocate(yCell(size(local_cell_list)))
+ allocate(zCell(size(local_cell_list)))
+#endif
+#endif
+
+ !
+ ! Now that each process has a list of cells that it owns, exchange cell connectivity
+ ! information between the processes that read info for a cell and those that own that cell
+ !
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToCellIDField % array), size(local_cell_list), &
+ indexToCellIDField % array, local_cell_list, &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, indexToCellIDField % array, indexToCellID_0Halo, &
+ size(indexToCellIDField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, nEdgesOnCellField % array, nEdgesOnCell_0Halo, &
+ size(indexToCellIDField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnCellField % array, cellsOnCell_0Halo, &
+ size(cellsOnCellField % array, 1), size(indexToCellIDField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ call mpas_dmpar_alltoall_field(domain % dminfo, xCellField % array, xCell, &
+ size(xCellField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, yCellField % array, yCell, &
+ size(yCellField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, zCellField % array, zCell, &
+ size(zCellField % array), size(local_cell_list), &
+ sendCellList, recvCellList)
+#endif
+#endif
+
+
+ deallocate(sendCellList % list)
+ deallocate(sendCellList)
+ deallocate(recvCellList % list)
+ deallocate(recvCellList)
+
+
+
+ !
+ ! Build a graph of cell connectivity based on cells owned by this process
+ !
+ block_graph_0Halo % nVerticesTotal = size(local_cell_list)
+ block_graph_0Halo % nVertices = size(local_cell_list)
+ block_graph_0Halo % maxDegree = maxEdges
+ block_graph_0Halo % ghostStart = size(local_cell_list) + 1
+ allocate(block_graph_0Halo % vertexID(size(local_cell_list)))
+ allocate(block_graph_0Halo % nAdjacent(size(local_cell_list)))
+ allocate(block_graph_0Halo % adjacencyList(maxEdges, size(local_cell_list)))
+
+ block_graph_0Halo % vertexID(:) = indexToCellID_0Halo(:)
+ block_graph_0Halo % nAdjacent(:) = nEdgesOnCell_0Halo(:)
+ block_graph_0Halo % adjacencyList(:,:) = cellsOnCell_0Halo(:,:)
+
+ ! Get back a graph describing the owned cells plus the cells in the 1-halo
+ call mpas_block_decomp_add_halo(domain % dminfo, block_graph_0Halo, block_graph_1Halo)
+
+
+ !
+ ! Work out exchange lists for 1-halo and exchange cell information for 1-halo
+ !
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+ block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
+ send1Halo, recv1Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_1Halo % vertexID, &
+ block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+ send1Halo, recv1Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_1Halo % nAdjacent, &
+ block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+ send1Halo, recv1Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_1Halo % adjacencyList, &
+ block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_1Halo % nVerticesTotal, &
+ send1Halo, recv1Halo)
+
+
+ !
+ ! Work out exchange lists for 2-halo and exchange cell information for 2-halo
+ !
+ block_graph_1Halo % nVertices = block_graph_1Halo % nVerticesTotal
+ block_graph_1Halo % ghostStart = block_graph_1Halo % nVerticesTotal + 1
+
+ ! Get back a graph describing the owned and 1-halo cells plus the cells in the 2-halo
+ call mpas_block_decomp_add_halo(domain % dminfo, block_graph_1Halo, block_graph_2Halo)
+
+ block_graph_2Halo % nVertices = block_graph_0Halo % nVertices
+ block_graph_2Halo % ghostStart = block_graph_2Halo % nVertices + 1
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ !! For now, only use Zoltan with MPI
+ !! Zoltan initialization
+ call mpas_zoltan_start()
+
+ !! Zoltan hook for cells
+ call mpas_zoltan_order_loc_hsfc_cells(block_graph_2Halo%nVertices,block_graph_2Halo%VertexID,3,xCell,yCell,zCell)
+#endif
+#endif
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+ block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
+ send2Halo, recv2Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % vertexID, block_graph_2Halo % vertexID, &
+ block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+ send2Halo, recv2Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % nAdjacent, block_graph_2Halo % nAdjacent, &
+ block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+ send2Halo, recv2Halo)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, block_graph_0Halo % adjacencyList, block_graph_2Halo % adjacencyList, &
+ block_graph_0Halo % maxDegree, block_graph_0Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+ send2Halo, recv2Halo)
+
+
+
+ !
+ ! Knowing which cells are in block and the 2-halo, we can exchange lists of which edges are
+ ! on each cell and which vertices are on each cell from the processes that read these
+ ! fields for each cell to the processes that own the cells
+ !
+ allocate(edgesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
+ allocate(verticesOnCell_2Halo(maxEdges, block_graph_2Halo % nVerticesTotal))
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToCellIDField % array), block_graph_2Halo % nVerticesTotal, &
+ indexToCellIDField % array, block_graph_2Halo % vertexID, &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, edgesOnCellField % array, edgesOnCell_2Halo, &
+ maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
+ sendCellList, recvCellList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, verticesOnCellField % array, verticesOnCell_2Halo, &
+ maxEdges, nReadCells, block_graph_2Halo % nVerticesTotal, &
+ sendCellList, recvCellList)
+
+
+ !
+ ! Get a list of which edges and vertices are adjacent to cells (including halo cells) in block
+ !
+ call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
+ edgesOnCell_2Halo, nlocal_edges, local_edge_list)
+ call mpas_block_decomp_all_edges_in_block(maxEdges, block_graph_2Halo % nVerticesTotal, block_graph_2Halo % nAdjacent, &
+ verticesOnCell_2Halo, nlocal_vertices, local_vertex_list)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToEdgeIDField % array), nlocal_edges, &
+ indexToEdgeIDField % array, local_edge_list, &
+ sendEdgeList, recvEdgeList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToVertexIDField % array), nlocal_vertices, &
+ indexToVertexIDField % array, local_vertex_list, &
+ sendVertexList, recvVertexList)
+
+
+
+ !
+ ! Work out which edges and vertices are owned by this process, and which are ghost
+ !
+ allocate(cellsOnEdge_2Halo(2,nlocal_edges))
+ allocate(cellsOnVertex_2Halo(vertexDegree,nlocal_vertices))
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnEdgeField % array, cellsOnEdge_2Halo, &
+ 2, size(cellsOnEdgeField % array, 2), nlocal_edges, &
+ sendEdgeList, recvEdgeList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, cellsOnVertexField % array, cellsOnVertex_2Halo, &
+ vertexDegree, size(cellsOnVertexField % array, 2), nlocal_vertices, &
+ sendVertexList, recvVertexList)
+
+
+ call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
+ block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+ 2, nlocal_edges, cellsOnEdge_2Halo, local_edge_list, ghostEdgeStart)
+ call mpas_block_decomp_partitioned_edge_list(block_graph_2Halo % nVertices, &
+ block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), &
+ vertexDegree, nlocal_vertices, cellsOnVertex_2Halo, local_vertex_list, ghostVertexStart)
+
+
+ ! At this point, local_edge_list(1;ghostEdgeStart-1) contains all of the owned edges for this block
+ ! and local_edge_list(ghostEdgeStart:nlocal_edges) contains all of the ghost edges
+
+ ! At this point, local_vertex_list(1;ghostVertexStart-1) contains all of the owned vertices for this block
+ ! and local_vertex_list(ghostVertexStart:nlocal_vertices) contains all of the ghost vertices
+
+ ! Also, at this point, block_graph_2Halo % vertexID(1:block_graph_2Halo%nVertices) contains all of the owned
+ ! cells for this block, and block_graph_2Halo % vertexID(block_graph_2Halo%nVertices+1:block_graph_2Halo%nVerticesTotal)
+ ! contains all of the ghost cells
+
+
+ deallocate(sendEdgeList % list)
+ deallocate(sendEdgeList)
+ deallocate(recvEdgeList % list)
+ deallocate(recvEdgeList)
+
+ deallocate(sendVertexList % list)
+ deallocate(sendVertexList)
+ deallocate(recvVertexList % list)
+ deallocate(recvVertexList)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ allocate(xEdge(nlocal_edges))
+ allocate(yEdge(nlocal_edges))
+ allocate(zEdge(nlocal_edges))
+ allocate(xVertex(nlocal_vertices))
+ allocate(yVertex(nlocal_vertices))
+ allocate(zVertex(nlocal_vertices))
+#endif
+#endif
+
+ !
+ ! Knowing which edges/vertices are owned by this block and which are actually read
+ ! from the input or restart file, we can build exchange lists to perform
+ ! all-to-all field exchanges from process that reads a field to the processes that
+ ! need them
+ !
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToEdgeIDField % array), nlocal_edges, &
+ indexToEdgeIDField % array, local_edge_list, &
+ sendEdgeList, recvEdgeList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToVertexIDField % array), nlocal_vertices, &
+ indexToVertexIDField % array, local_vertex_list, &
+ sendVertexList, recvVertexList)
+
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ call mpas_dmpar_alltoall_field(domain % dminfo, xEdgeField % array, xEdge, &
+ size(xEdgeField % array), nlocal_edges, &
+ sendEdgeList, recvEdgeList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, yEdgeField % array, yEdge, &
+ size(yEdgeField % array), nlocal_edges, &
+ sendEdgeList, recvEdgeList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, zEdgeField % array, zEdge, &
+ size(zEdgeField % array), nlocal_edges, &
+ sendEdgeList, recvEdgeList)
+
+ call mpas_dmpar_alltoall_field(domain % dminfo, xVertexField % array, xVertex, &
+ size(xVertexField % array), nlocal_vertices, &
+ sendVertexList, recvVertexList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, yVertexField % array, yVertex, &
+ size(yVertexField % array), nlocal_vertices, &
+ sendVertexList, recvVertexList)
+ call mpas_dmpar_alltoall_field(domain % dminfo, zVertexField % array, zVertex, &
+ size(zVertexField % array), nlocal_vertices, &
+ sendVertexList, recvVertexList)
+ !!!!!!!!!!!!!!!!!!
+ !! Reorder edges
+ !!!!!!!!!!!!!!!!!!
+ call mpas_zoltan_order_loc_hsfc_edges(ghostEdgeStart-1,local_edge_list,3,xEdge,yEdge,zEdge)
+ !!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!
+ !! Reorder vertices
+ !!!!!!!!!!!!!!!!!!
+ call mpas_zoltan_order_loc_hsfc_verts(ghostVertexStart-1,local_vertex_list,3,xVertex,yVertex,zVertex)
+ !!!!!!!!!!!!!!!!!!
+
+ deallocate(sendEdgeList % list)
+ deallocate(sendEdgeList)
+ deallocate(recvEdgeList % list)
+ deallocate(recvEdgeList)
+
+ deallocate(sendVertexList % list)
+ deallocate(sendVertexList)
+ deallocate(recvVertexList % list)
+ deallocate(recvVertexList)
+
+ !
+ ! Knowing which edges/vertices are owned by this block and which are actually read
+ ! from the input or restart file, we can build exchange lists to perform
+ ! all-to-all field exchanges from process that reads a field to the processes that
+ ! need them
+ !
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToEdgeIDField % array), nlocal_edges, &
+ indexToEdgeIDField % array, local_edge_list, &
+ sendEdgeList, recvEdgeList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(indexToVertexIDField % array), nlocal_vertices, &
+ indexToVertexIDField % array, local_vertex_list, &
+ sendVertexList, recvVertexList)
+
+#endif
+#endif
+
+ !
+ ! Build ownership and exchange lists for vertical levels
+ ! Essentially, process 0 owns all vertical levels when reading and writing,
+ ! and it distributes them or gathers them to/from all other processes
+ !
+ if (domain % dminfo % my_proc_id == 0) then
+ allocate(local_vertlevel_list(nVertLevels))
+ do i=1,nVertLevels
+ local_vertlevel_list(i) = i
+ end do
+ else
+ allocate(local_vertlevel_list(0))
+ end if
+ allocate(needed_vertlevel_list(nVertLevels))
+ do i=1,nVertLevels
+ needed_vertlevel_list(i) = i
+ end do
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(local_vertlevel_list), size(needed_vertlevel_list), &
+ local_vertlevel_list, needed_vertlevel_list, &
+ sendVertLevelList, recvVertLevelList)
+
+ deallocate(local_vertlevel_list)
+ deallocate(needed_vertlevel_list)
+
+
+ !
+ ! Read and distribute all fields given ownership lists and exchange lists (maybe already in block?)
+ !
+ allocate(domain % blocklist)
+
+ nCells = block_graph_2Halo % nVerticesTotal
+ nEdges = nlocal_edges
+ nVertices = nlocal_vertices
+
+ call mpas_allocate_block(domain % blocklist, domain, &
+#include "dim_dummy_args.inc"
+ )
+
+ !
+ ! Read attributes
+ !
+ call mpas_io_input_get_att_text(input_obj, 'on_a_sphere', c_on_a_sphere)
+ call mpas_io_input_get_att_real(input_obj, 'sphere_radius', r_sphere_radius)
+ if (index(c_on_a_sphere, 'YES') /= 0) then
+ domain % blocklist % mesh % on_a_sphere = .true.
+ else
+ domain % blocklist % mesh % on_a_sphere = .false.
+ end if
+ domain % blocklist % mesh % sphere_radius = r_sphere_radius
+
+ if (.not. config_do_restart) then
+ input_obj % time = 1
+ else
+ input_obj % time = 1
+
+ !
+ ! If doing a restart, we need to decide which time slice to read from the
+ ! restart file
+ !
+ if (input_obj % rdLocalTime <= 0) then
+ write(0,*) 'Error: Couldn''t find any times in restart file.'
+ call mpas_dmpar_abort(domain % dminfo)
+ end if
+ if (domain % dminfo % my_proc_id == IO_NODE) then
+ allocate(xtime % ioinfo)
+ xtime % ioinfo % start(1) = 1
+ xtime % ioinfo % count(1) = input_obj % rdLocalTime
+ allocate(xtime % array(input_obj % rdLocalTime))
+
+ xtime % ioinfo % fieldName = 'xtime'
+ call mpas_io_input_field(input_obj, xtime)
+
+ call mpas_set_timeInterval(interval=minTimeDiff, DD=10000)
+ call mpas_set_time(curr_time=startTime, dateTimeString=config_start_time)
+
+ do i=1,input_obj % rdLocalTime
+ call mpas_set_time(curr_time=sliceTime, dateTimeString=xtime % array(i))
+ timeDiff = abs(sliceTime - startTime)
+ if (timeDiff < minTimeDiff) then
+ minTimeDiff = timeDiff
+ input_obj % time = i
+ end if
+ end do
+
+ timeStamp = xtime % array(input_obj % time)
+
+ deallocate(xtime % ioinfo)
+ deallocate(xtime % array)
+ end if
+
+ call mpas_dmpar_bcast_int(domain % dminfo, input_obj % time)
+ call mpas_dmpar_bcast_char(domain % dminfo, timeStamp)
+
+ write(0,*) 'Restarting model from time ', timeStamp
+
+ end if
+
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ! Do the actual work of reading all fields in from the input or restart file
+ ! For each field:
+ ! 1) Each process reads a contiguous range of cell/edge/vertex indices, which
+ ! may not correspond with the cells/edges/vertices that are owned by the
+ ! process
+ ! 2) All processes then send the global indices that were read to the
+ ! processes that own those indices based on
+ ! {send,recv}{Cell,Edge,Vertex,VertLevel}List
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ call mpas_read_and_distribute_fields(domain % dminfo, input_obj, domain % blocklist, &
+ readCellStart, nReadCells, readEdgeStart, nReadEdges, readVertexStart, nReadVertices, &
+ readVertLevelStart, nReadVertLevels, &
+ sendCellList, recvCellList, sendEdgeList, recvEdgeList, sendVertexList, recvVertexList, &
+ sendVertLevelList, recvVertLevelList)
+
+
+ call mpas_io_input_finalize(input_obj, domain % dminfo)
+
+
+ !
+ ! Rename vertices in cellsOnCell, edgesOnCell, etc. to local indices
+ !
+ allocate(cellIDSorted(2,domain % blocklist % mesh % nCells))
+ allocate(edgeIDSorted(2,domain % blocklist % mesh % nEdges))
+ allocate(vertexIDSorted(2,domain % blocklist % mesh % nVertices))
+
+ do i=1,domain % blocklist % mesh % nCells
+ cellIDSorted(1,i) = domain % blocklist % mesh % indexToCellID % array(i)
+ cellIDSorted(2,i) = i
+ end do
+ call quicksort(block_graph_2Halo % nVerticesTotal, cellIDSorted)
+
+ do i=1,domain % blocklist % mesh % nEdges
+ edgeIDSorted(1,i) = domain % blocklist % mesh % indexToEdgeID % array(i)
+ edgeIDSorted(2,i) = i
+ end do
+ call quicksort(nlocal_edges, edgeIDSorted)
+
+ do i=1,domain % blocklist % mesh % nVertices
+ vertexIDSorted(1,i) = domain % blocklist % mesh % indexToVertexID % array(i)
+ vertexIDSorted(2,i) = i
+ end do
+ call quicksort(nlocal_vertices, vertexIDSorted)
+
+
+ do i=1,domain % blocklist % mesh % nCells
+ do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+ domain % blocklist % mesh % cellsOnCell % array(j,i))
+ if (k <= domain % blocklist % mesh % nCells) then
+ domain % blocklist % mesh % cellsOnCell % array(j,i) = cellIDSorted(2,k)
+ else
+ domain % blocklist % mesh % cellsOnCell % array(j,i) = domain % blocklist % mesh % nCells + 1
+! domain % blocklist % mesh % cellsOnCell % array(j,i) = 0
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+ domain % blocklist % mesh % edgesOnCell % array(j,i))
+ if (k <= domain % blocklist % mesh % nEdges) then
+ domain % blocklist % mesh % edgesOnCell % array(j,i) = edgeIDSorted(2,k)
+ else
+ domain % blocklist % mesh % edgesOnCell % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! domain % blocklist % mesh % edgesOnCell % array(j,i) = 0
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
+ domain % blocklist % mesh % verticesOnCell % array(j,i))
+ if (k <= domain % blocklist % mesh % nVertices) then
+ domain % blocklist % mesh % verticesOnCell % array(j,i) = vertexIDSorted(2,k)
+ else
+ domain % blocklist % mesh % verticesOnCell % array(j,i) = domain % blocklist % mesh % nVertices + 1
+! domain % blocklist % mesh % verticesOnCell % array(j,i) = 0
+ end if
+
+ end do
+ end do
+
+ do i=1,domain % blocklist % mesh % nEdges
+ do j=1,2
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+ domain % blocklist % mesh % cellsOnEdge % array(j,i))
+ if (k <= domain % blocklist % mesh % nCells) then
+ domain % blocklist % mesh % cellsOnEdge % array(j,i) = cellIDSorted(2,k)
+ else
+ domain % blocklist % mesh % cellsOnEdge % array(j,i) = domain % blocklist % mesh % nCells + 1
+! domain % blocklist % mesh % cellsOnEdge % array(j,i) = 0
+ end if
+
+ k = mpas_binary_search(vertexIDSorted, 2, 1, domain % blocklist % mesh % nVertices, &
+ domain % blocklist % mesh % verticesOnEdge % array(j,i))
+ if (k <= domain % blocklist % mesh % nVertices) then
+ domain % blocklist % mesh % verticesOnEdge % array(j,i) = vertexIDSorted(2,k)
+ else
+ domain % blocklist % mesh % verticesOnEdge % array(j,i) = domain % blocklist % mesh % nVertices + 1
+! domain % blocklist % mesh % verticesOnEdge % array(j,i) = 0
+ end if
+
+ end do
+
+ do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+ domain % blocklist % mesh % edgesOnEdge % array(j,i))
+ if (k <= domain % blocklist % mesh % nEdges) then
+ domain % blocklist % mesh % edgesOnEdge % array(j,i) = edgeIDSorted(2,k)
+ else
+ domain % blocklist % mesh % edgesOnEdge % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! domain % blocklist % mesh % edgesOnEdge % array(j,i) = 0
+ end if
+
+ end do
+ end do
+
+ do i=1,domain % blocklist % mesh % nVertices
+ do j=1,vertexDegree
+
+ k = mpas_binary_search(cellIDSorted, 2, 1, domain % blocklist % mesh % nCells, &
+ domain % blocklist % mesh % cellsOnVertex % array(j,i))
+ if (k <= domain % blocklist % mesh % nCells) then
+ domain % blocklist % mesh % cellsOnVertex % array(j,i) = cellIDSorted(2,k)
+ else
+ domain % blocklist % mesh % cellsOnVertex % array(j,i) = domain % blocklist % mesh % nCells + 1
+! domain % blocklist % mesh % cellsOnVertex % array(j,i) = 0
+ end if
+
+ k = mpas_binary_search(edgeIDSorted, 2, 1, domain % blocklist % mesh % nEdges, &
+ domain % blocklist % mesh % edgesOnVertex % array(j,i))
+ if (k <= domain % blocklist % mesh % nEdges) then
+ domain % blocklist % mesh % edgesOnVertex % array(j,i) = edgeIDSorted(2,k)
+ else
+ domain % blocklist % mesh % edgesOnVertex % array(j,i) = domain % blocklist % mesh % nEdges + 1
+! domain % blocklist % mesh % edgesOnVertex % array(j,i) = 0
+ end if
+
+ end do
+ end do
+
+ deallocate(cellIDSorted)
+ deallocate(edgeIDSorted)
+ deallocate(vertexIDSorted)
+
+
+ !
+ ! Work out halo exchange lists for cells, edges, and vertices
+ !
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ block_graph_2Halo % nVertices, block_graph_2Halo % nVerticesTotal, &
+ block_graph_2Halo % vertexID(1:block_graph_2Halo % nVertices), block_graph_2Halo % vertexID, &
+ domain % blocklist % parinfo % cellsToSend, domain % blocklist % parinfo % cellsToRecv)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ ghostEdgeStart-1, nlocal_edges, &
+ local_edge_list(1:ghostEdgeStart-1), local_edge_list, &
+ domain % blocklist % parinfo % edgesToSend, domain % blocklist % parinfo % edgesToRecv)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ ghostVertexStart-1, nlocal_vertices, &
+ local_vertex_list(1:ghostVertexStart-1), local_vertex_list, &
+ domain % blocklist % parinfo % verticesToSend, domain % blocklist % parinfo % verticesToRecv)
+
+ domain % blocklist % mesh % nCellsSolve = block_graph_2Halo % nVertices
+ domain % blocklist % mesh % nEdgesSolve = ghostEdgeStart-1
+ domain % blocklist % mesh % nVerticesSolve = ghostVertexStart-1
+ domain % blocklist % mesh % nVertLevelsSolve = domain % blocklist % mesh % nVertLevels ! No vertical decomp yet...
+
+
+ !
+ ! Deallocate fields, graphs, and other memory
+ !
+ deallocate(indexToCellIDField % ioinfo)
+ deallocate(indexToCellIDField % array)
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ deallocate(xCellField % ioinfo)
+ deallocate(xCellField % array)
+ deallocate(yCellField % ioinfo)
+ deallocate(yCellField % array)
+ deallocate(zCellField % ioinfo)
+ deallocate(zCellField % array)
+#endif
+#endif
+ deallocate(indexToEdgeIDField % ioinfo)
+ deallocate(indexToEdgeIDField % array)
+ deallocate(indexToVertexIDField % ioinfo)
+ deallocate(indexToVertexIDField % array)
+ deallocate(cellsOnCellField % ioinfo)
+ deallocate(cellsOnCellField % array)
+ deallocate(edgesOnCellField % ioinfo)
+ deallocate(edgesOnCellField % array)
+ deallocate(verticesOnCellField % ioinfo)
+ deallocate(verticesOnCellField % array)
+ deallocate(cellsOnEdgeField % ioinfo)
+ deallocate(cellsOnEdgeField % array)
+ deallocate(cellsOnVertexField % ioinfo)
+ deallocate(cellsOnVertexField % array)
+ deallocate(cellsOnCell_0Halo)
+ deallocate(nEdgesOnCell_0Halo)
+ deallocate(indexToCellID_0Halo)
+ deallocate(cellsOnEdge_2Halo)
+ deallocate(cellsOnVertex_2Halo)
+ deallocate(edgesOnCell_2Halo)
+ deallocate(verticesOnCell_2Halo)
+ deallocate(block_graph_0Halo % vertexID)
+ deallocate(block_graph_0Halo % nAdjacent)
+ deallocate(block_graph_0Halo % adjacencyList)
+#ifdef HAVE_ZOLTAN
+#ifdef _MPI
+ deallocate(xCell)
+ deallocate(yCell)
+ deallocate(zCell)
+#endif
+#endif
+ end subroutine mpas_input_state_for_domain
+
+
+ subroutine mpas_read_and_distribute_fields(dminfo, input_obj, block, &
+ readCellsStart, readCellsCount, &
+ readEdgesStart, readEdgesCount, &
+ readVerticesStart, readVerticesCount, &
+ readVertLevelsStart, readVertLevelsCount, &
+ sendCellsList, recvCellsList, &
+ sendEdgesList, recvEdgesList, &
+ sendVerticesList, recvVerticesList, &
+ sendVertLevelsList, recvVertLevelsList)
+
+ implicit none
+
+ type (dm_info), intent(in) :: dminfo
+ type (io_input_object), intent(in) :: input_obj
+ type (block_type), intent(inout) :: block
+ integer, intent(in) :: readCellsStart, readCellsCount, readEdgesStart, readEdgesCount, readVerticesStart, readVerticesCount
+ integer, intent(in) :: readVertLevelsStart, readVertLevelsCount
+ type (exchange_list), pointer :: sendCellsList, recvCellsList
+ type (exchange_list), pointer :: sendEdgesList, recvEdgesList
+ type (exchange_list), pointer :: sendVerticesList, recvVerticesList
+ type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
+
+ type (field1dInteger) :: int1d
+ type (field2dInteger) :: int2d
+ type (field0dReal) :: real0d
+ type (field1dReal) :: real1d
+ type (field2dReal) :: real2d
+ type (field3dReal) :: real3d
+ type (field0dChar) :: char0d
+ type (field1dChar) :: char1d
+
+ integer :: i1, i2, i3, i4
+
+ integer, dimension(:), pointer :: super_int1d
+ integer, dimension(:,:), pointer :: super_int2d
+ real (kind=RKIND) :: super_real0d
+ real (kind=RKIND), dimension(:), pointer :: super_real1d
+ real (kind=RKIND), dimension(:,:), pointer :: super_real2d
+ real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
+ character (len=64) :: super_char0d
+ character (len=64), dimension(:), pointer :: super_char1d
+
+ integer :: i, k
+
+#include "nondecomp_dims.inc"
+
+ allocate(int1d % ioinfo)
+ allocate(int2d % ioinfo)
+ allocate(real0d % ioinfo)
+ allocate(real1d % ioinfo)
+ allocate(real2d % ioinfo)
+ allocate(real3d % ioinfo)
+ allocate(char0d % ioinfo)
+ allocate(char1d % ioinfo)
+
+
+#include "io_input_fields.inc"
+
+#include "nondecomp_dims_dealloc.inc"
+
+ end subroutine mpas_read_and_distribute_fields
+
+
+
+ subroutine mpas_io_input_init(input_obj, dminfo)
+
+ implicit none
+
+ type (io_input_object), intent(inout) :: input_obj
+ type (dm_info), intent(in) :: dminfo
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+
+
+#ifdef OFFSET64BIT
+ nferr = nf_open(trim(input_obj % filename), ior(NF_SHARE,NF_64BIT_OFFSET), input_obj % rd_ncid)
+#else
+ nferr = nf_open(trim(input_obj % filename), NF_SHARE, input_obj % rd_ncid)
+#endif
+
+ if (nferr /= NF_NOERR) then
+ write(0,*) ' '
+ if (input_obj % stream == STREAM_RESTART) then
+ write(0,*) 'Error opening restart file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_INPUT) then
+ write(0,*) 'Error opening input file ''', trim(input_obj % filename), ''''
+ else if (input_obj % stream == STREAM_SFC) then
+ write(0,*) 'Error opening sfc file ''', trim(input_obj % filename), ''''
+ end if
+ write(0,*) ' '
+ call mpas_dmpar_abort(dminfo)
+ end if
+
+#include "netcdf_read_ids.inc"
+
+ end subroutine mpas_io_input_init
+
+
+ subroutine mpas_io_input_get_dimension(input_obj, dimname, dimsize)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ character (len=*), intent(in) :: dimname
+ integer, intent(out) :: dimsize
+
+#include "get_dimension_by_name.inc"
+
+ end subroutine mpas_io_input_get_dimension
+
+
+ subroutine mpas_io_input_get_att_real(input_obj, attname, attvalue)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ character (len=*), intent(in) :: attname
+ real (kind=RKIND), intent(out) :: attvalue
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+
+ if (RKIND == 8) then
+ nferr = nf_get_att_double(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
+ else
+ nferr = nf_get_att_real(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
+ end if
+ if (nferr /= NF_NOERR) then
+ write(0,*) 'Warning: Attribute '//trim(attname)//&
+ ' not found in '//trim(input_obj % filename)
+ if (index(attname, 'sphere_radius') /= 0) then
+ write(0,*) ' Setting '//trim(attname)//' to 1.0'
+ attvalue = 1.0
+ end if
+ end if
+
+ end subroutine mpas_io_input_get_att_real
+
+
+ subroutine mpas_io_input_get_att_text(input_obj, attname, attvalue)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ character (len=*), intent(in) :: attname
+ character (len=*), intent(out) :: attvalue
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+
+ nferr = nf_get_att_text(input_obj % rd_ncid, NF_GLOBAL, attname, attvalue)
+ if (nferr /= NF_NOERR) then
+ write(0,*) 'Warning: Attribute '//trim(attname)//&
+ ' not found in '//trim(input_obj % filename)
+ if (index(attname, 'on_a_sphere') /= 0) then
+ write(0,*) ' Setting '//trim(attname)//' to ''YES'''
+ attvalue = 'YES'
+ end if
+ end if
+
+ end subroutine mpas_io_input_get_att_text
+
+
+ subroutine mpas_io_input_field0d_real(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field0dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 1
+
+#include "input_field0dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+#endif
+
+ end subroutine mpas_io_input_field0d_real
+
+
+ subroutine mpas_io_input_field1d_real(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = field % ioinfo % start(1)
+ count1(1) = field % ioinfo % count(1)
+
+ !
+ ! Special case: we may want to read the xtime variable across the
+ ! time dimension as a 1d array.
+ !
+ if (trim(field % ioinfo % fieldName) == 'xtime') then
+ varID = input_obj % rdVarIDxtime
+ end if
+
+#include "input_field1dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % array)
+#endif
+
+ end subroutine mpas_io_input_field1d_real
+
+
+ subroutine mpas_io_input_field2d_real(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field2dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = field % ioinfo % start(2)
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = field % ioinfo % count(2)
+
+#include "input_field2dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
+#endif
+
+ end subroutine mpas_io_input_field2d_real
+
+
+ subroutine mpas_io_input_field3d_real(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field3dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start3, count3
+
+ start3(1) = field % ioinfo % start(1)
+ start3(2) = field % ioinfo % start(2)
+ start3(3) = field % ioinfo % start(3)
+ count3(1) = field % ioinfo % count(1)
+ count3(2) = field % ioinfo % count(2)
+ count3(3) = field % ioinfo % count(3)
+
+#include "input_field3dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
+#endif
+
+ end subroutine mpas_io_input_field3d_real
+
+
+ subroutine mpas_io_input_field0d_real_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field0dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = input_obj % time
+ count1(1) = 1
+
+#include "input_field0dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+#endif
+
+ end subroutine mpas_io_input_field0d_real_time
+
+
+ subroutine mpas_io_input_field1d_real_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = input_obj % time
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = 1
+
+#include "input_field1dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start2, count2, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start2, count2, field % array)
+#endif
+
+ end subroutine mpas_io_input_field1d_real_time
+
+
+ subroutine mpas_io_input_field2d_real_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field2dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start3, count3
+
+ start3(1) = field % ioinfo % start(1)
+ start3(2) = field % ioinfo % start(2)
+ start3(3) = input_obj % time
+ count3(1) = field % ioinfo % count(1)
+ count3(2) = field % ioinfo % count(2)
+ count3(3) = 1
+
+#include "input_field2dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start3, count3, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start3, count3, field % array)
+#endif
+
+ end subroutine mpas_io_input_field2d_real_time
+
+
+ subroutine mpas_io_input_field3d_real_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field3dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(4) :: start4, count4
+
+ start4(1) = field % ioinfo % start(1)
+ start4(2) = field % ioinfo % start(2)
+ start4(3) = field % ioinfo % start(3)
+ start4(4) = input_obj % time
+ count4(1) = field % ioinfo % count(1)
+ count4(2) = field % ioinfo % count(2)
+ count4(3) = field % ioinfo % count(3)
+ count4(4) = 1
+
+#include "input_field3dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_get_vara_double(input_obj % rd_ncid, varID, start4, count4, field % array)
+#else
+ nferr = nf_get_vara_real(input_obj % rd_ncid, varID, start4, count4, field % array)
+#endif
+
+ end subroutine mpas_io_input_field3d_real_time
+
+
+ subroutine mpas_io_input_field1d_integer(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = field % ioinfo % start(1)
+ count1(1) = field % ioinfo % count(1)
+
+#include "input_field1dinteger.inc"
+
+ nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start1, count1, field % array)
+
+ end subroutine mpas_io_input_field1d_integer
+
+
+ subroutine mpas_io_input_field2d_integer(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field2dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = field % ioinfo % start(2)
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = field % ioinfo % count(2)
+
+#include "input_field2dinteger.inc"
+
+ nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
+
+ end subroutine mpas_io_input_field2d_integer
+
+
+ subroutine mpas_io_input_field1d_integer_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = input_obj % time
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = 1
+
+#include "input_field1dinteger_time.inc"
+
+ nferr = nf_get_vara_int(input_obj % rd_ncid, varID, start2, count2, field % array)
+
+ end subroutine mpas_io_input_field1d_integer_time
+
+
+ subroutine mpas_io_input_field0d_char_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field0dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = input_obj % time
+ count1(2) = 1
+
+#include "input_field0dchar_time.inc"
+
+ nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+
+ end subroutine mpas_io_input_field0d_char_time
+
+
+ subroutine mpas_io_input_field1d_char_time(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start2, count2
+
+ start2(1) = 1
+ start2(2) = field % ioinfo % start(1)
+ start2(3) = input_obj % time
+ count2(1) = 64
+ count2(2) = field % ioinfo % count(1)
+ count2(3) = 1
+
+#include "input_field1dchar_time.inc"
+
+ nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start2, count2, field % array)
+
+ end subroutine mpas_io_input_field1d_char_time
+
+
+ subroutine mpas_io_input_field0d_char(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field0dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = 1
+ count1(2) = 1
+
+#include "input_field0dchar.inc"
+
+ nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % scalar)
+
+ end subroutine mpas_io_input_field0d_char
+
+
+ subroutine mpas_io_input_field1d_char(input_obj, field)
+
+ implicit none
+
+ type (io_input_object), intent(in) :: input_obj
+ type (field1dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = field % ioinfo % start(1)
+ count1(2) = field % ioinfo % count(1)
+
+ !
+ ! Special case: we may want to read the xtime variable across the
+ ! time dimension as a 1d array.
+ !
+ if (trim(field % ioinfo % fieldName) == 'xtime') then
+ varID = input_obj % rdVarIDxtime
+ end if
+
+#include "input_field1dchar.inc"
+
+ nferr = nf_get_vara_text(input_obj % rd_ncid, varID, start1, count1, field % array)
+
+ end subroutine mpas_io_input_field1d_char
+
+
+ subroutine mpas_io_input_finalize(input_obj, dminfo)
+
+ implicit none
+
+ type (io_input_object), intent(inout) :: input_obj
+ type (dm_info), intent(in) :: dminfo
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+
+ nferr = nf_close(input_obj % rd_ncid)
+
+ end subroutine mpas_io_input_finalize
+
+end module mpas_io_input
Copied: branches/source_renaming/src/framework/mpas_io_output.F (from rev 1089, branches/source_renaming/src/framework/module_io_output.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_io_output.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_io_output.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,865 @@
+module mpas_io_output
+
+ use mpas_grid_types
+ use mpas_dmpar
+ use mpas_sort
+ use mpas_configure
+
+ integer, parameter :: OUTPUT = 1
+ integer, parameter :: RESTART = 2
+ integer, parameter :: SFC = 3
+
+ type io_output_object
+ integer :: wr_ncid
+ character (len=1024) :: filename
+
+ integer :: time
+
+ integer :: stream
+
+ integer :: wrDimIDStrLen
+#include "io_output_obj_decls.inc"
+
+ logical :: validExchangeLists
+ type (exchange_list), pointer :: sendCellsList, recvCellsList
+ type (exchange_list), pointer :: sendEdgesList, recvEdgesList
+ type (exchange_list), pointer :: sendVerticesList, recvVerticesList
+ type (exchange_list), pointer :: sendVertLevelsList, recvVertLevelsList
+ end type io_output_object
+
+
+ interface mpas_io_output_field
+ module procedure mpas_io_output_field0d_real
+ module procedure mpas_io_output_field1d_real
+ module procedure mpas_io_output_field2d_real
+ module procedure mpas_io_output_field3d_real
+ module procedure mpas_io_output_field1d_integer
+ module procedure mpas_io_output_field2d_integer
+ module procedure mpas_io_output_field0d_char
+ module procedure mpas_io_output_field1d_char
+ end interface mpas_io_output_field
+
+ interface mpas_io_output_field_time
+ module procedure mpas_io_output_field0d_real_time
+ module procedure mpas_io_output_field1d_real_time
+ module procedure mpas_io_output_field2d_real_time
+ module procedure mpas_io_output_field3d_real_time
+ module procedure mpas_io_output_field1d_integer_time
+ module procedure mpas_io_output_field0d_char_time
+ module procedure mpas_io_output_field1d_char_time
+ end interface mpas_io_output_field_time
+
+
+ contains
+
+
+ subroutine mpas_output_state_init(output_obj, domain, stream, outputSuffix)
+
+ implicit none
+
+ type (io_output_object), intent(inout) :: output_obj
+ type (domain_type), intent(in) :: domain
+ character (len=*) :: stream
+ character (len=*), optional :: outputSuffix
+
+ character (len=128) :: tempfilename
+
+ type (block_type), pointer :: block_ptr
+#include "output_dim_actual_decls.inc"
+
+ block_ptr => domain % blocklist
+ nullify(output_obj % sendCellsList)
+ nullify(output_obj % recvCellsList)
+ nullify(output_obj % sendEdgesList)
+ nullify(output_obj % recvEdgesList)
+ nullify(output_obj % sendVerticesList)
+ nullify(output_obj % recvVerticesList)
+ nullify(output_obj % sendVertLevelsList)
+ nullify(output_obj % recvVertLevelsList)
+ output_obj % validExchangeLists = .false.
+
+#include "output_dim_inits.inc"
+
+ call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nCellsSolve, nCellsGlobal)
+ call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nEdgesSolve, nEdgesGlobal)
+ call mpas_dmpar_sum_int(domain % dminfo, block_ptr % mesh % nVerticesSolve, nVerticesGlobal)
+ nVertLevelsGlobal = block_ptr % mesh % nVertLevels
+
+ if (trim(stream) == 'OUTPUT') then
+ if(present(outputSuffix)) then
+ call mpas_insert_string_suffix(config_output_name, outputSuffix, tempfilename)
+ else
+ tempfilename = config_output_name
+ end if
+ output_obj % filename = trim(tempfilename)
+ output_obj % stream = OUTPUT
+ else if (trim(stream) == 'RESTART') then
+ output_obj % filename = trim(config_restart_name)
+ output_obj % stream = RESTART
+ else if (trim(stream) == 'SFC') then
+ ! Keep filename as whatever was set by the user
+ output_obj % stream = SFC
+ end if
+
+ ! For now, we assume that a domain consists only of one block,
+ ! although in future, work needs to be done to write model state
+ ! from many distributed blocks
+ call mpas_io_output_init(output_obj, domain % dminfo, &
+ block_ptr % mesh, &
+#include "output_dim_actual_args.inc"
+ )
+
+ end subroutine mpas_output_state_init
+
+
+ subroutine mpas_insert_string_suffix(stream, suffix, filename)
+
+ implicit none
+
+ character (len=*), intent(in) :: stream
+ character (len=*), intent(in) :: suffix
+ character (len=*), intent(out) :: filename
+ integer :: length, i
+
+ filename = trim(stream) // '.' // trim(suffix)
+
+ length = len_trim(stream)
+ do i=length-1,1,-1
+ if(stream(i:i) == '.') then
+ filename = trim(stream(:i)) // trim(suffix) // trim(stream(i:))
+ exit
+ end if
+ end do
+
+ end subroutine mpas_insert_string_suffix
+
+
+ subroutine mpas_output_state_for_domain(output_obj, domain, itime)
+
+ implicit none
+
+ type (io_output_object), intent(inout) :: output_obj
+ type (domain_type), intent(inout) :: domain
+ integer, intent(in) :: itime
+
+ integer :: i, j
+ integer :: nCellsGlobal
+ integer :: nEdgesGlobal
+ integer :: nVerticesGlobal
+ integer :: nVertLevelsGlobal
+ integer, dimension(:), pointer :: neededCellList
+ integer, dimension(:), pointer :: neededEdgeList
+ integer, dimension(:), pointer :: neededVertexList
+ integer, dimension(:), pointer :: neededVertLevelList
+ integer, dimension(:,:), pointer :: cellsOnCell, edgesOnCell, verticesOnCell, &
+ cellsOnEdge, verticesOnEdge, edgesOnEdge, cellsOnVertex, edgesOnVertex
+ integer, dimension(:,:), pointer :: cellsOnCell_save, edgesOnCell_save, verticesOnCell_save, &
+ cellsOnEdge_save, verticesOnEdge_save, edgesOnEdge_save, &
+ cellsOnVertex_save, edgesOnVertex_save
+ type (field1dInteger) :: int1d
+ type (field2dInteger) :: int2d
+ type (field0dReal) :: real0d
+ type (field1dReal) :: real1d
+ type (field2dReal) :: real2d
+ type (field3dReal) :: real3d
+ type (field0dChar) :: char0d
+ type (field1dChar) :: char1d
+
+ integer :: i1, i2, i3, i4
+
+ integer, dimension(:), pointer :: super_int1d
+ integer, dimension(:,:), pointer :: super_int2d
+ real (kind=RKIND) :: super_real0d
+ real (kind=RKIND), dimension(:), pointer :: super_real1d
+ real (kind=RKIND), dimension(:,:), pointer :: super_real2d
+ real (kind=RKIND), dimension(:,:,:), pointer :: super_real3d
+ character (len=64) :: super_char0d
+ character (len=64), dimension(:), pointer :: super_char1d
+
+#include "nondecomp_outputs.inc"
+
+ output_obj % time = itime
+
+ allocate(int1d % ioinfo)
+ allocate(int2d % ioinfo)
+ allocate(real0d % ioinfo)
+ allocate(real1d % ioinfo)
+ allocate(real2d % ioinfo)
+ allocate(real3d % ioinfo)
+ allocate(char0d % ioinfo)
+ allocate(char1d % ioinfo)
+
+ call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nCellsSolve, nCellsGlobal)
+ call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nEdgesSolve, nEdgesGlobal)
+ call mpas_dmpar_sum_int(domain % dminfo, domain % blocklist % mesh % nVerticesSolve, nVerticesGlobal)
+ nVertLevelsGlobal = domain % blocklist % mesh % nVertLevels
+
+ allocate(cellsOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
+ allocate(edgesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
+ allocate(verticesOnCell(domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nCellsSolve))
+ allocate(cellsOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
+ allocate(verticesOnEdge(2, domain % blocklist % mesh % nEdgesSolve))
+ allocate(edgesOnEdge(2 * domain % blocklist % mesh % maxEdges, domain % blocklist % mesh % nEdgesSolve))
+ allocate(cellsOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
+ allocate(edgesOnVertex(domain % blocklist % mesh % vertexDegree, domain % blocklist % mesh % nVerticesSolve))
+
+
+ !
+ ! Convert connectivity information from local to global indices
+ !
+ do i=1,domain % blocklist % mesh % nCellsSolve
+ do j=1,domain % blocklist % mesh % nEdgesOnCell % array(i)
+ cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
+ domain % blocklist % mesh % cellsOnCell % array(j,i))
+ edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
+ domain % blocklist % mesh % edgesOnCell % array(j,i))
+ verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
+ domain % blocklist % mesh % verticesOnCell % array(j,i))
+ end do
+ do j=domain % blocklist % mesh % nEdgesOnCell % array(i)+1,domain % blocklist % mesh % maxEdges
+ cellsOnCell(j,i) = domain % blocklist % mesh % indexToCellID % array( &
+ domain % blocklist % mesh % nEdgesOnCell % array(i))
+ edgesOnCell(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
+ domain % blocklist % mesh % nEdgesOnCell % array(i))
+ verticesOnCell(j,i) = domain % blocklist % mesh % indexToVertexID % array( &
+ domain % blocklist % mesh % nEdgesOnCell % array(i))
+ end do
+ end do
+ do i=1,domain % blocklist % mesh % nEdgesSolve
+ cellsOnEdge(1,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(1,i))
+ cellsOnEdge(2,i) = domain % blocklist % mesh % indexToCellID % array(domain % blocklist % mesh % cellsOnEdge % array(2,i))
+ verticesOnEdge(1,i) = domain % blocklist % mesh % indexToVertexID % array( &
+ domain % blocklist % mesh % verticesOnEdge % array(1,i))
+ verticesOnEdge(2,i) = domain % blocklist % mesh % indexToVertexID % array( &
+ domain % blocklist % mesh % verticesOnEdge % array(2,i))
+ do j=1,domain % blocklist % mesh % nEdgesOnEdge % array(i)
+ edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
+ domain % blocklist % mesh % edgesOnEdge % array(j,i))
+ end do
+ do j=domain % blocklist % mesh % nEdgesOnEdge % array(i)+1,2*domain % blocklist % mesh % maxEdges
+ if(domain % blocklist % mesh % nEdgesOnEdge % array(i) .eq. 0) then
+ edgesOnEdge(j,i) = domain % blocklist % mesh % nEdgesSolve + 1
+ else
+ edgesOnEdge(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
+ domain % blocklist % mesh % nEdgesOnEdge % array(i))
+ endif
+ end do
+ end do
+ do i=1,domain % blocklist % mesh % nVerticesSolve
+ do j=1,domain % blocklist % mesh % vertexDegree
+ cellsOnVertex(j,i) = domain % blocklist % mesh % indexToCellID % array( &
+ domain % blocklist % mesh % cellsOnVertex % array(j,i))
+ edgesOnVertex(j,i) = domain % blocklist % mesh % indexToEdgeID % array( &
+ domain % blocklist % mesh % edgesOnVertex % array(j,i))
+ end do
+ end do
+
+ if (domain % dminfo % my_proc_id == 0) then
+ allocate(neededCellList(nCellsGlobal))
+ allocate(neededEdgeList(nEdgesGlobal))
+ allocate(neededVertexList(nVerticesGlobal))
+ allocate(neededVertLevelList(nVertLevelsGlobal))
+ do i=1,nCellsGlobal
+ neededCellList(i) = i
+ end do
+ do i=1,nEdgesGlobal
+ neededEdgeList(i) = i
+ end do
+ do i=1,nVerticesGlobal
+ neededVertexList(i) = i
+ end do
+ do i=1,nVertLevelsGlobal
+ neededVertLevelList(i) = i
+ end do
+ else
+ allocate(neededCellList(0))
+ allocate(neededEdgeList(0))
+ allocate(neededVertexList(0))
+ allocate(neededVertLevelList(0))
+ end if
+
+ if (.not. output_obj % validExchangeLists) then
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ domain % blocklist % mesh % nCellsSolve, size(neededCellList), &
+ domain % blocklist % mesh % indexToCellID % array, neededCellList, &
+ output_obj % sendCellsList, output_obj % recvCellsList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ domain % blocklist % mesh % nEdgesSolve, size(neededEdgeList), &
+ domain % blocklist % mesh % indexToEdgeID % array, neededEdgeList, &
+ output_obj % sendEdgesList, output_obj % recvEdgesList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ domain % blocklist % mesh % nVerticesSolve, size(neededVertexList), &
+ domain % blocklist % mesh % indexToVertexID % array, neededVertexList, &
+ output_obj % sendVerticesList, output_obj % recvVerticesList)
+
+ call mpas_dmpar_get_owner_list(domain % dminfo, &
+ size(neededVertLevelList), size(neededVertLevelList), &
+ neededVertLevelList, neededVertLevelList, &
+ output_obj % sendVertLevelsList, output_obj % recvVertLevelsList)
+
+ output_obj % validExchangeLists = .true.
+ end if
+
+ deallocate(neededCellList)
+ deallocate(neededEdgeList)
+ deallocate(neededVertexList)
+
+ cellsOnCell_save => domain % blocklist % mesh % cellsOnCell % array
+ edgesOnCell_save => domain % blocklist % mesh % edgesOnCell % array
+ verticesOnCell_save => domain % blocklist % mesh % verticesOnCell % array
+ cellsOnEdge_save => domain % blocklist % mesh % cellsOnEdge % array
+ verticesOnEdge_save => domain % blocklist % mesh % verticesOnEdge % array
+ edgesOnEdge_save => domain % blocklist % mesh % edgesOnEdge % array
+ cellsOnVertex_save => domain % blocklist % mesh % cellsOnVertex % array
+ edgesOnVertex_save => domain % blocklist % mesh % edgesOnVertex % array
+
+ domain % blocklist % mesh % cellsOnCell % array => cellsOnCell
+ domain % blocklist % mesh % edgesOnCell % array => edgesOnCell
+ domain % blocklist % mesh % verticesOnCell % array => verticesOnCell
+ domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge
+ domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge
+ domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge
+ domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex
+ domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex
+
+#include "io_output_fields.inc"
+
+ domain % blocklist % mesh % cellsOnCell % array => cellsOnCell_save
+ domain % blocklist % mesh % edgesOnCell % array => edgesOnCell_save
+ domain % blocklist % mesh % verticesOnCell % array => verticesOnCell_save
+ domain % blocklist % mesh % cellsOnEdge % array => cellsOnEdge_save
+ domain % blocklist % mesh % verticesOnEdge % array => verticesOnEdge_save
+ domain % blocklist % mesh % edgesOnEdge % array => edgesOnEdge_save
+ domain % blocklist % mesh % cellsOnVertex % array => cellsOnVertex_save
+ domain % blocklist % mesh % edgesOnVertex % array => edgesOnVertex_save
+
+ deallocate(cellsOnCell)
+ deallocate(edgesOnCell)
+ deallocate(verticesOnCell)
+ deallocate(cellsOnEdge)
+ deallocate(verticesOnEdge)
+ deallocate(edgesOnEdge)
+ deallocate(cellsOnVertex)
+ deallocate(edgesOnVertex)
+
+#include "nondecomp_outputs_dealloc.inc"
+
+ end subroutine mpas_output_state_for_domain
+
+
+ subroutine mpas_output_state_finalize(output_obj, dminfo)
+
+ implicit none
+
+ type (io_output_object), intent(inout) :: output_obj
+ type (dm_info), intent(in) :: dminfo
+
+ call mpas_io_output_finalize(output_obj, dminfo)
+
+ end subroutine mpas_output_state_finalize
+
+
+ subroutine mpas_io_output_init( output_obj, &
+ dminfo, &
+ mesh, &
+#include "dim_dummy_args.inc"
+ )
+
+ implicit none
+
+ include 'netcdf.inc'
+
+ type (io_output_object), intent(inout) :: output_obj
+ type (dm_info), intent(in) :: dminfo
+ type (mesh_type), intent(in) :: mesh
+#include "dim_dummy_decls.inc"
+
+ integer :: nferr
+ integer, dimension(10) :: dimlist
+
+ if (dminfo % my_proc_id == 0) then
+#ifdef OFFSET64BIT
+ nferr = nf_create(trim(output_obj % filename), ior(NF_CLOBBER,NF_64BIT_OFFSET), output_obj % wr_ncid)
+#else
+ nferr = nf_create(trim(output_obj % filename), NF_CLOBBER, output_obj % wr_ncid)
+#endif
+
+ nferr = nf_def_dim(output_obj % wr_ncid, 'StrLen', 64, output_obj % wrDimIDStrLen)
+#include "netcdf_def_dims_vars.inc"
+
+ if (mesh % on_a_sphere) then
+ nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'YES ')
+ else
+ nferr = nf_put_att_text(output_obj % wr_ncid, NF_GLOBAL, 'on_a_sphere', 16, 'NO ')
+ end if
+ if (RKIND == 8) then
+ nferr = nf_put_att_double(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_DOUBLE, 1, mesh % sphere_radius)
+ else
+ nferr = nf_put_att_real(output_obj % wr_ncid, NF_GLOBAL, 'sphere_radius', NF_FLOAT, 1, mesh % sphere_radius)
+ end if
+
+ nferr = nf_enddef(output_obj % wr_ncid)
+ end if
+
+ end subroutine mpas_io_output_init
+
+
+ subroutine mpas_io_output_field0d_real(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field0dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 1
+
+#include "output_field0dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field0d_real
+
+
+ subroutine mpas_io_output_field1d_real(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = field % ioinfo % start(1)
+ count1(1) = field % ioinfo % count(1)
+
+#include "output_field1dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, VarID, start1, count1, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, VarID, start1, count1, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_real
+
+
+ subroutine mpas_io_output_field2d_real(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field2dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = field % ioinfo % start(2)
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = field % ioinfo % count(2)
+
+#include "output_field2dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field2d_real
+
+
+ subroutine mpas_io_output_field3d_real(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field3dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start3, count3
+
+ start3(1) = field % ioinfo % start(1)
+ start3(2) = field % ioinfo % start(2)
+ start3(3) = field % ioinfo % start(3)
+ count3(1) = field % ioinfo % count(1)
+ count3(2) = field % ioinfo % count(2)
+ count3(3) = field % ioinfo % count(3)
+
+#include "output_field3dreal.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field3d_real
+
+
+ subroutine mpas_io_output_field0d_real_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field0dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = output_obj % time
+ count1(1) = 1
+
+#include "output_field0dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field0d_real_time
+
+
+ subroutine mpas_io_output_field1d_real_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = output_obj % time
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = 1
+
+#include "output_field1dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start2, count2, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start2, count2, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_real_time
+
+
+ subroutine mpas_io_output_field2d_real_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field2dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start3, count3
+
+ start3(1) = field % ioinfo % start(1)
+ start3(2) = field % ioinfo % start(2)
+ start3(3) = output_obj % time
+ count3(1) = field % ioinfo % count(1)
+ count3(2) = field % ioinfo % count(2)
+ count3(3) = 1
+
+#include "output_field2dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start3, count3, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start3, count3, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field2d_real_time
+
+
+ subroutine mpas_io_output_field3d_real_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field3dReal), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(4) :: start4, count4
+
+ start4(1) = field % ioinfo % start(1)
+ start4(2) = field % ioinfo % start(2)
+ start4(3) = field % ioinfo % start(3)
+ start4(4) = output_obj % time
+ count4(1) = field % ioinfo % count(1)
+ count4(2) = field % ioinfo % count(2)
+ count4(3) = field % ioinfo % count(3)
+ count4(4) = 1
+
+#include "output_field3dreal_time.inc"
+
+#if (RKIND == 8)
+ nferr = nf_put_vara_double(output_obj % wr_ncid, varID, start4, count4, field % array)
+#else
+ nferr = nf_put_vara_real(output_obj % wr_ncid, varID, start4, count4, field % array)
+#endif
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field3d_real_time
+
+
+ subroutine mpas_io_output_field1d_integer(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(1) :: start1, count1
+
+ start1(1) = field % ioinfo % start(1)
+ count1(1) = field % ioinfo % count(1)
+
+#include "output_field1dinteger.inc"
+
+ nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start1, count1, field % array)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_integer
+
+
+ subroutine mpas_io_output_field2d_integer(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field2dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = field % ioinfo % start(2)
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = field % ioinfo % count(2)
+
+#include "output_field2dinteger.inc"
+
+ nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field2d_integer
+
+
+ subroutine mpas_io_output_field1d_integer_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dInteger), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start2, count2
+
+ start2(1) = field % ioinfo % start(1)
+ start2(2) = output_obj % time
+ count2(1) = field % ioinfo % count(1)
+ count2(2) = 1
+
+#include "output_field1dinteger_time.inc"
+
+ nferr = nf_put_vara_int(output_obj % wr_ncid, varID, start2, count2, field % array)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_integer_time
+
+
+ subroutine mpas_io_output_field0d_char_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field0dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = output_obj % time
+ count1(2) = 1
+
+#include "output_field0dchar_time.inc"
+
+ nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field0d_char_time
+
+
+ subroutine mpas_io_output_field1d_char_time(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(3) :: start2, count2
+
+ start2(1) = 1
+ start2(2) = field % ioinfo % start(1)
+ start2(3) = output_obj % time
+ count2(1) = 64
+ count2(2) = field % ioinfo % count(1)
+ count2(3) = 1
+
+#include "output_field1dchar_time.inc"
+
+ nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start2, count2, field % array)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_char_time
+
+
+ subroutine mpas_io_output_field0d_char(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field0dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = 1
+ count1(2) = 1
+
+#include "output_field0dchar.inc"
+
+ nferr = nf_put_vara_text(output_obj % wr_ncid, varID, start1, count1, field % scalar)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field0d_char
+
+
+ subroutine mpas_io_output_field1d_char(output_obj, field)
+
+ implicit none
+
+ type (io_output_object), intent(in) :: output_obj
+ type (field1dChar), intent(inout) :: field
+
+ include 'netcdf.inc'
+
+ integer :: nferr
+ integer :: varID
+ integer, dimension(2) :: start1, count1
+
+ start1(1) = 1
+ count1(1) = 64
+ start1(2) = field % ioinfo % start(1)
+ count1(2) = field % ioinfo % count(1)
+
+#include "output_field1dchar.inc"
+
+ nferr = nf_put_vara_text(output_obj % wr_ncid, VarID, start1, count1, field % array)
+
+ nferr = nf_sync(output_obj % wr_ncid)
+
+ end subroutine mpas_io_output_field1d_char
+
+
+ subroutine mpas_io_output_finalize(output_obj, dminfo)
+
+ implicit none
+
+ include 'netcdf.inc'
+
+ type (io_output_object), intent(inout) :: output_obj
+ type (dm_info), intent(in) :: dminfo
+
+ integer :: nferr
+
+ if (dminfo % my_proc_id == 0) then
+ nferr = nf_close(output_obj % wr_ncid)
+ end if
+
+ end subroutine mpas_io_output_finalize
+
+end module mpas_io_output
Copied: branches/source_renaming/src/framework/mpas_sort.F (from rev 1089, branches/source_renaming/src/framework/module_sort.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_sort.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_sort.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,230 @@
+module mpas_sort
+
+ interface quicksort
+ module procedure mpas_quicksort_int
+ module procedure mpas_quicksort_real
+ end interface
+
+
+ contains
+
+
+ recursive subroutine mpas_mergesort(array, d1, n1, n2)
+
+ implicit none
+
+ ! Arguments
+ integer, intent(in) :: n1, n2, d1
+ integer, dimension(1:d1,n1:n2), intent(inout) :: array
+
+ ! Local variables
+ integer :: i, j, k
+ integer :: rtemp
+ integer, dimension(1:d1,1:n2-n1+1) :: temp
+
+ if (n1 >= n2) return
+
+ if (n2 - n1 == 1) then
+ if (array(1,n1) > array(1,n2)) then
+ do i=1,d1
+ rtemp = array(i,n1)
+ array(i,n1) = array(i,n2)
+ array(i,n2) = rtemp
+ end do
+ end if
+ return
+ end if
+
+ call mpas_mergesort(array(1:d1,n1:n1+(n2-n1+1)/2), d1, n1, n1+(n2-n1+1)/2)
+ call mpas_mergesort(array(1:d1,n1+((n2-n1+1)/2)+1:n2), d1, n1+((n2-n1+1)/2)+1, n2)
+
+ i = n1
+ j = n1 + ((n2-n1+1)/2) + 1
+ k = 1
+ do while (i <= n1+(n2-n1+1)/2 .and. j <= n2)
+ if (array(1,i) < array(1,j)) then
+ temp(1:d1,k) = array(1:d1,i)
+ k = k + 1
+ i = i + 1
+ else
+ temp(1:d1,k) = array(1:d1,j)
+ k = k + 1
+ j = j + 1
+ end if
+ end do
+
+ if (i <= n1+(n2-n1+1)/2) then
+ do while (i <= n1+(n2-n1+1)/2)
+ temp(1:d1,k) = array(1:d1,i)
+ i = i + 1
+ k = k + 1
+ end do
+ else
+ do while (j <= n2)
+ temp(1:d1,k) = array(1:d1,j)
+ j = j + 1
+ k = k + 1
+ end do
+ end if
+
+ array(1:d1,n1:n2) = temp(1:d1,1:k-1)
+
+ end subroutine mpas_mergesort
+
+
+ subroutine mpas_quicksort_int(nArray, array)
+
+ implicit none
+
+ integer, intent(in) :: nArray
+ integer, dimension(2,nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ integer :: pivot_value
+ integer, dimension(2) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(1,pivot)
+ temp(:) = array(:,pivot)
+ array(:,pivot) = array(:,r)
+ array(:,r) = temp(:)
+
+ s = l
+ do i=l,r-1
+ if (array(1,i) <= pivot_value) then
+ temp(:) = array(:,s)
+ array(:,s) = array(:,i)
+ array(:,i) = temp(:)
+ s = s + 1
+ end if
+ end do
+
+ temp(:) = array(:,s)
+ array(:,s) = array(:,r)
+ array(:,r) = temp(:)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_int
+
+
+ subroutine mpas_quicksort_real(nArray, array)
+
+ implicit none
+
+ integer, intent(in) :: nArray
+ real (kind=RKIND), dimension(2,nArray), intent(inout) :: array
+
+ integer :: i, j, top, l, r, pivot, s
+ real (kind=RKIND) :: pivot_value
+ real (kind=RKIND), dimension(2) :: temp
+ integer, dimension(1000) :: lstack, rstack
+
+ if (nArray < 1) return
+
+ top = 1
+ lstack(top) = 1
+ rstack(top) = nArray
+
+ do while (top > 0)
+
+ l = lstack(top)
+ r = rstack(top)
+ top = top - 1
+
+ pivot = (l+r)/2
+
+ pivot_value = array(1,pivot)
+ temp(:) = array(:,pivot)
+ array(:,pivot) = array(:,r)
+ array(:,r) = temp(:)
+
+ s = l
+ do i=l,r-1
+ if (array(1,i) <= pivot_value) then
+ temp(:) = array(:,s)
+ array(:,s) = array(:,i)
+ array(:,i) = temp(:)
+ s = s + 1
+ end if
+ end do
+
+ temp(:) = array(:,s)
+ array(:,s) = array(:,r)
+ array(:,r) = temp(:)
+
+ if (s-1 > l) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = l
+ rstack(top) = s-1
+ end if
+
+ if (r > s+1) then
+ top = top + 1
+if (top > 1000) write(0,*) 'Error: Quicksort exhausted its stack.'
+ lstack(top) = s+1
+ rstack(top) = r
+ end if
+ end do
+
+ end subroutine mpas_quicksort_real
+
+
+ integer function mpas_binary_search(array, d1, n1, n2, key)
+
+ implicit none
+
+ integer, intent(in) :: d1, n1, n2, key
+ integer, dimension(d1,n1:n2), intent(in) :: array
+
+ integer :: l, u, k
+
+ mpas_binary_search = n2+1
+
+ l = n1
+ u = n2
+ k = (l+u)/2
+ do while (u >= l)
+ if (array(1,k) == key) then
+ mpas_binary_search = k
+ exit
+ else if (array(1,k) < key) then
+ l = k + 1
+ k = (l+u)/2
+ else
+ u = k - 1
+ k = (l+u)/2
+ end if
+ end do
+
+ end function mpas_binary_search
+
+end module mpas_sort
Copied: branches/source_renaming/src/framework/mpas_timekeeping.F (from rev 1089, branches/source_renaming/src/framework/module_mpas_timekeeping.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_timekeeping.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_timekeeping.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,1625 @@
+module mpas_timekeeping
+
+ use ESMF_BaseMod
+ use ESMF_Stubs
+ use ESMF_CalendarMod
+ use ESMF_ClockMod
+ use ESMF_TimeMod
+ use ESMF_TimeIntervalMod
+
+ private :: mpas_calibrate_alarms
+ private :: mpas_in_ringing_envelope
+
+ integer, parameter :: MPAS_MAX_ALARMS = 20
+ integer, parameter :: MPAS_NOW = 0, &
+ MPAS_START_TIME = 1, &
+ MPAS_STOP_TIME = 2
+ integer, parameter :: MPAS_FORWARD = 1, &
+ MPAS_BACKWARD = -1
+ integer, parameter :: MPAS_GREGORIAN = 0, &
+ MPAS_GREGORIAN_NOLEAP = 1, &
+ MPAS_360DAY = 2
+
+ integer :: TheCalendar
+
+ integer, dimension(12), parameter :: daysInMonth = (/31,28,31,30,31,30,31,31,30,31,30,31/)
+ integer, dimension(12), parameter :: daysInMonthLeap = (/31,29,31,30,31,30,31,31,30,31,30,31/)
+
+
+ type MPAS_Time_type
+ type (ESMF_Time) :: t
+ end type
+
+ type MPAS_TimeInterval_type
+ type (ESMF_TimeInterval) :: ti
+ end type
+
+ type MPAS_Alarm_type
+ integer :: alarmID
+ logical :: isRecurring
+ logical :: isSet
+ type (MPAS_Time_type) :: ringTime
+ type (MPAS_Time_type) :: prevRingTime
+ type (MPAS_TimeInterval_type) :: ringTimeInterval
+ type (MPAS_Alarm_type), pointer :: next
+ end type
+
+ type MPAS_Clock_type
+ integer :: direction
+ integer :: nAlarms
+ type (ESMF_Clock) :: c
+ type (MPAS_Alarm_type), pointer :: alarmListHead
+ end type
+
+ interface operator (+)
+ module procedure add_t_ti
+ module procedure add_ti_ti
+ end interface
+
+ interface operator (-)
+ module procedure sub_t_t
+ module procedure sub_t_ti
+ module procedure sub_ti_ti
+ module procedure neg_ti
+ end interface
+
+ interface operator (*)
+ module procedure mul_ti_n
+ end interface
+
+ interface operator (/)
+ module procedure div_ti_n
+ end interface
+
+ interface operator (.EQ.)
+ module procedure eq_t_t
+ module procedure eq_ti_ti
+ end interface
+
+ interface operator (.NE.)
+ module procedure ne_t_t
+ module procedure ne_ti_ti
+ end interface
+
+ interface operator (.LT.)
+ module procedure lt_t_t
+ module procedure lt_ti_ti
+ end interface
+
+ interface operator (.GT.)
+ module procedure gt_t_t
+ module procedure gt_ti_ti
+ end interface
+
+ interface operator (.LE.)
+ module procedure le_t_t
+ module procedure le_ti_ti
+ end interface
+
+ interface operator (.GE.)
+ module procedure ge_t_t
+ module procedure ge_ti_ti
+ end interface
+
+ interface abs
+ module procedure abs_ti
+ end interface
+
+
+ contains
+
+
+ subroutine mpas_timekeeping_init(calendar)
+
+ implicit none
+
+ integer, intent(in) :: calendar
+
+ TheCalendar = calendar
+
+ if (TheCalendar == MPAS_GREGORIAN) then
+ call ESMF_Initialize(defaultCalendar=ESMF_CAL_GREGORIAN)
+ else if (TheCalendar == MPAS_GREGORIAN_NOLEAP) then
+ call ESMF_Initialize(defaultCalendar=ESMF_CAL_NOLEAP)
+ else if (TheCalendar == MPAS_360DAY) then
+ call ESMF_Initialize(defaultCalendar=ESMF_CAL_360DAY)
+ else
+ write(0,*) 'ERROR: mpas_timekeeping_init: Invalid calendar type'
+ end if
+
+ end subroutine mpas_timekeeping_init
+
+
+ subroutine mpas_timekeeping_finalize()
+
+ implicit none
+
+ call ESMF_Finalize()
+
+ end subroutine mpas_timekeeping_finalize
+
+
+ subroutine mpas_create_clock(clock, startTime, timeStep, stopTime, runDuration, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(out) :: clock
+ type (MPAS_Time_type), intent(in) :: startTime
+ type (MPAS_TimeInterval_type), intent(in) :: timeStep
+ type (MPAS_Time_type), intent(in), optional :: stopTime
+ type (MPAS_TimeInterval_type), intent(in), optional :: runDuration
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Time_type) :: stop_time
+
+ if (present(runDuration)) then
+ stop_time = startTime + runDuration
+ if (present(stopTime)) then
+ if (stopTime /= stop_time) then
+ if (present(ierr)) ierr = 1 ! stopTime and runDuration are inconsistent
+ write(0,*) 'ERROR: MPAS_createClock: stopTime and runDuration are inconsistent'
+ return
+ end if
+ end if
+ else if (present(stopTime)) then
+ stop_time = stopTime
+ else
+ if (present(ierr)) ierr = 1 ! neither stopTime nor runDuration are specified
+ write(0,*) 'ERROR: MPAS_createClock: neither stopTime nor runDuration are specified'
+ return
+ end if
+
+ clock % c = ESMF_ClockCreate(TimeStep=timeStep%ti, StartTime=startTime%t, StopTime=stop_time%t, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+ clock % direction = MPAS_FORWARD
+ clock % nAlarms = 0
+ nullify(clock % alarmListHead)
+
+ end subroutine mpas_create_clock
+
+
+ subroutine mpas_destroy_clock(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+ clock % alarmListHead => alarmPtr % next
+ deallocate(alarmPtr)
+ alarmPtr => clock % alarmListHead
+ end do
+
+ call ESMF_ClockDestroy(clock % c, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_destroy_clock
+
+
+ logical function mpas_is_clock_start_time(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out), optional :: ierr
+
+ type (ESMF_Time) :: currTime, startTime, stopTime
+
+ call ESMF_ClockGet(clock % c, CurrTime=currTime, rc=ierr)
+ call ESMF_ClockGet(clock % c, StartTime=startTime, rc=ierr)
+ call ESMF_ClockGet(clock % c, StopTime=stopTime, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ if (startTime <= stopTime) then
+ mpas_is_clock_start_time = (currTime <= startTime)
+ else
+ mpas_is_clock_start_time = (currTime >= startTime)
+ end if
+
+ end function mpas_is_clock_start_time
+
+
+ logical function mpas_is_clock_stop_time(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out), optional :: ierr
+
+ type (ESMF_Time) :: currTime, startTime, stopTime
+
+ call ESMF_ClockGet(clock % c, CurrTime=currTime, rc=ierr)
+ call ESMF_ClockGet(clock % c, StartTime=startTime, rc=ierr)
+ call ESMF_ClockGet(clock % c, StopTime=stopTime, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ if (startTime <= stopTime) then
+ mpas_is_clock_stop_time = (currTime >= stopTime)
+ else
+ mpas_is_clock_stop_time = (currTime <= stopTime)
+ end if
+
+ end function mpas_is_clock_stop_time
+
+
+ subroutine mpas_set_clock_direction(clock, direction, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ integer, intent(in) :: direction
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_TimeInterval_type) :: timeStep
+
+ if (direction == MPAS_FORWARD .and. clock % direction == MPAS_FORWARD) return
+ if (direction == MPAS_BACKWARD .and. clock % direction == MPAS_BACKWARD) return
+
+ clock % direction = direction
+ call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr)
+ timeStep = neg_ti(timeStep)
+ call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr)
+
+ ! specify a valid previousRingTime for each alarm
+ call mpas_calibrate_alarms(clock, ierr);
+
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_set_clock_direction
+
+
+
+ integer function mpas_get_clock_direction(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out), optional :: ierr
+
+ if (present(ierr)) ierr = 0
+
+ mpas_get_clock_direction = clock % direction
+
+ end function mpas_get_clock_direction
+
+
+ subroutine mpas_set_clock_timestep(clock, timeStep, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ type (MPAS_TimeInterval_type), intent(in) :: timeStep
+ integer, intent(out), optional :: ierr
+
+ call ESMF_ClockSet(clock % c, TimeStep=timeStep%ti, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_set_clock_timestep
+
+
+ type (MPAS_TimeInterval_type) function mpas_get_clock_timestep(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_TimeInterval_type) :: timeStep
+
+ call ESMF_ClockGet(clock % c, TimeStep=timeStep%ti, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ mpas_get_clock_timestep = timeStep
+
+ end function mpas_get_clock_timestep
+
+
+ subroutine mpas_advance_clock(clock, timeStep, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ type (MPAS_TimeInterval_type), intent(in), optional :: timeStep
+ integer, intent(out), optional :: ierr
+
+ type (ESMF_TimeInterval) :: time_step
+
+ if (present(timeStep)) then
+ call ESMF_ClockGet(clock % c, TimeStep=time_step, rc=ierr)
+ call ESMF_ClockSet(clock % c, TimeStep=timeStep % ti, rc=ierr)
+ call ESMF_ClockAdvance(clock % c, rc=ierr)
+ call ESMF_ClockSet(clock % c, TimeStep=time_step, rc=ierr)
+ else
+ call ESMF_ClockAdvance(clock % c, rc=ierr)
+ end if
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_advance_clock
+
+
+ subroutine mpas_set_clock_time(clock, clock_time, whichTime, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ type (MPAS_Time_type), intent(in) :: clock_time
+ integer, intent(in) :: whichTime
+ integer, intent(out), optional :: ierr
+
+ if (whichTime == MPAS_NOW) then
+ call ESMF_ClockSet(clock % c, CurrTime=clock_time%t, rc=ierr)
+ call mpas_calibrate_alarms(clock, ierr);
+ else if (whichTime == MPAS_START_TIME) then
+ call ESMF_ClockSet(clock % c, StartTime=clock_time%t, rc=ierr)
+ else if (whichTime == MPAS_STOP_TIME) then
+ call ESMF_ClockSet(clock % c, StopTime=clock_time%t, rc=ierr)
+ else if (present(ierr)) then
+ ierr = 1
+ end if
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_set_clock_time
+
+
+ type (MPAS_Time_type) function mpas_get_clock_time(clock, whichTime, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(in) :: whichTime
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Time_type) :: clock_time
+
+ if (whichTime == MPAS_NOW) then
+ call ESMF_ClockGet(clock % c, CurrTime=clock_time%t, rc=ierr)
+ else if (whichTime == MPAS_START_TIME) then
+ call ESMF_ClockGet(clock % c, StartTime=clock_time%t, rc=ierr)
+ else if (whichTime == MPAS_STOP_TIME) then
+ call ESMF_ClockGet(clock % c, StopTime=clock_time%t, rc=ierr)
+ else if (present(ierr)) then
+ ierr = 1
+ end if
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ mpas_get_clock_time = clock_time
+
+ end function mpas_get_clock_time
+
+
+ subroutine mpas_add_clock_alarm(clock, alarmID, alarmTime, alarmTimeInterval, ierr)
+! TODO: possibly add a stop time for recurring alarms
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ integer, intent(in) :: alarmID
+ type (MPAS_Time_type), intent(in) :: alarmTime
+ type (MPAS_TimeInterval_type), intent(in), optional :: alarmTimeInterval
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ ! Add a new entry to the linked list of alarms for this clock
+ if (.not. associated(clock % alarmListHead)) then
+ allocate(clock % alarmListHead)
+ nullify(clock % alarmListHead % next)
+ alarmPtr => clock % alarmListHead
+ else
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr % next))
+ if (alarmPtr % alarmID == alarmID) then
+ write(0,*) 'OOPS -- we have a duplicate alarmID', alarmID
+ if (present(ierr)) ierr = 1
+ return
+ end if
+ alarmPtr => alarmPtr % next
+ end do
+ if (alarmPtr % alarmID == alarmID) then
+ write(0,*) 'OOPS -- we have a duplicate alarmID', alarmID
+ if (present(ierr)) ierr = 1
+ return
+ end if
+ allocate(alarmPtr % next)
+ alarmPtr => alarmPtr % next
+ nullify(alarmPtr % next)
+ end if
+
+ alarmPtr % alarmID = alarmID
+
+ clock % nAlarms = clock % nAlarms + 1
+
+ alarmPtr % isSet = .true.
+ alarmPtr % ringTime = alarmTime
+
+
+ if (present(alarmTimeInterval)) then
+ alarmPtr % isRecurring = .true.
+ alarmPtr % ringTimeInterval = alarmTimeInterval
+ if(clock % direction == MPAS_FORWARD) then
+ alarmPtr % prevRingTime = alarmTime - alarmTimeInterval
+ else
+ alarmPtr % prevRingTime = alarmTime + alarmTimeInterval
+ end if
+ else
+ alarmPtr % isRecurring = .false.
+ alarmPtr % prevRingTime = alarmTime
+ end if
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_add_clock_alarm
+
+
+ subroutine mpas_remove_clock_alarm(clock, alarmID, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ integer, intent(in) :: alarmID
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+ type (MPAS_Alarm_type), pointer :: alarmParentPtr
+
+ if (present(ierr)) ierr = 0
+
+ alarmPtr => clock % alarmListHead
+ alarmParentPtr = alarmPtr
+ do while (associated(alarmPtr))
+ if (alarmPtr % alarmID == alarmID) then
+ alarmParentPtr % next => alarmPtr % next
+ deallocate(alarmPtr)
+ exit
+ end if
+ alarmParentPtr = alarmPtr
+ alarmPtr => alarmPtr % next
+ end do
+
+ end subroutine mpas_remove_clock_alarm
+
+
+
+ subroutine mpas_print_alarm(clock, alarmID, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(in) :: alarmID
+ integer, intent(out) :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ type (MPAS_TimeInterval_type) :: alarmTimeInterval
+ type (MPAS_Time_type) :: alarmTime
+ character (len=32) :: printString
+
+ ierr = 0
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+ if (alarmPtr % alarmID == alarmID) then
+ write(0,*) 'ALARM ', alarmID
+
+ write(0,*) 'isRecurring', alarmPtr % isRecurring
+
+ write(0,*) 'isSet', alarmPtr % isSet
+
+ call mpas_get_time(alarmPtr % ringTime, dateTimeString=printString, ierr=ierr)
+ write(0,*) 'ringTime', printString
+
+ call mpas_get_time(alarmPtr % prevRingTime, dateTimeString=printString, ierr=ierr)
+ write(0,*) 'prevRingTime', printString
+
+ call mpas_get_timeInterval(alarmPtr % ringTimeInterval, timeString=printString, ierr=ierr)
+ write(0,*) 'ringTimeInterval', printString
+
+ exit
+ end if
+ alarmPtr => alarmPtr % next
+ end do
+
+ end subroutine mpas_print_alarm
+
+
+
+ logical function mpas_is_alarm_ringing(clock, alarmID, interval, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(in) :: alarmID
+ type (MPAS_TimeInterval_type), intent(in), optional :: interval
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ if (present(ierr)) ierr = 0
+
+ mpas_is_alarm_ringing = .false.
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+ if (alarmPtr % alarmID == alarmID) then
+ if (alarmPtr % isSet) then
+ if (mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)) then
+ mpas_is_alarm_ringing = .true.
+ end if
+ end if
+ exit
+ end if
+ alarmPtr => alarmPtr % next
+ end do
+
+ end function mpas_is_alarm_ringing
+
+
+
+ subroutine mpas_get_clock_ringing_alarms(clock, nAlarms, alarmList, interval, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out) :: nAlarms
+ integer, dimension(MPAS_MAX_ALARMS), intent(out) :: alarmList
+ type (MPAS_TimeInterval_type), intent(in), optional :: interval
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ if (present(ierr)) ierr = 0
+
+ nAlarms = 0
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+ if (alarmPtr % isSet) then
+ if (mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)) then
+ nAlarms = nAlarms + 1
+ alarmList(nAlarms) = alarmPtr % alarmID
+ end if
+ end if
+ alarmPtr => alarmPtr % next
+ end do
+
+ end subroutine mpas_get_clock_ringing_alarms
+
+
+ logical function mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ type (MPAS_Alarm_type), pointer, intent(in) :: alarmPtr
+ type (MPAS_TimeInterval_type), intent(in), optional :: interval
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Time_type) :: alarmNow
+ type (MPAS_Time_type) :: alarmThreshold
+
+ alarmNow = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+ alarmThreshold = alarmPtr % ringTime
+
+ mpas_in_ringing_envelope = .false.
+
+ if(clock % direction == MPAS_FORWARD) then
+
+ if (present(interval)) then
+ alarmNow = alarmNow + interval;
+ end if
+
+ if (alarmPtr % isRecurring) then
+ alarmThreshold = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
+ end if
+
+ if (alarmThreshold <= alarmNow) then
+ mpas_in_ringing_envelope = .true.
+ end if
+ else
+
+ if (present(interval)) then
+ alarmNow = alarmNow - interval;
+ end if
+
+ if (alarmPtr % isRecurring) then
+ alarmThreshold = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
+ end if
+
+ if (alarmThreshold >= alarmNow) then
+ mpas_in_ringing_envelope = .true.
+ end if
+ end if
+
+ end function mpas_in_ringing_envelope
+
+
+
+ subroutine mpas_reset_clock_alarm(clock, alarmID, interval, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(inout) :: clock
+ integer, intent(in) :: alarmID
+ type (MPAS_TimeInterval_type), intent(in), optional :: interval
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Time_type) :: alarmNow
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ if (present(ierr)) ierr = 0
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+
+ if (alarmPtr % alarmID == alarmID) then
+
+ if (mpas_in_ringing_envelope(clock, alarmPtr, interval, ierr)) then
+
+ if (.not. alarmPtr % isRecurring) then
+ alarmPtr % isSet = .false.
+ else
+ alarmNow = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+
+ if(clock % direction == MPAS_FORWARD) then
+ if (present(interval)) then
+ alarmNow = alarmNow + interval
+ end if
+
+ do while(alarmPtr % prevRingTime <= alarmNow)
+ alarmPtr % prevRingTime = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
+ end do
+ alarmPtr % prevRingTime = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
+ else
+ if (present(interval)) then
+ alarmNow = alarmNow - interval
+ end if
+
+ do while(alarmPtr % prevRingTime >= alarmNow)
+ alarmPtr % prevRingTime = alarmPtr % prevRingTime - alarmPtr % ringTimeInterval
+ end do
+ alarmPtr % prevRingTime = alarmPtr % prevRingTime + alarmPtr % ringTimeInterval
+ end if
+ end if
+ end if
+ exit
+ end if
+ alarmPtr => alarmPtr % next
+ end do
+
+ end subroutine mpas_reset_clock_alarm
+
+
+
+ ! specify a valid previousRingTime for each alarm
+ subroutine mpas_calibrate_alarms(clock, ierr)
+
+ implicit none
+
+ type (MPAS_Clock_type), intent(in) :: clock
+ integer, intent(out), optional :: ierr
+
+ type (MPAS_Time_type) :: now
+ type (MPAS_Time_type) :: previousRingTime
+ type (MPAS_Time_type) :: negativeNeighborRingTime
+ type (MPAS_Time_type) :: positiveNeighborRingTime
+ type (MPAS_TimeInterval_type) :: ringTimeInterval
+ type (MPAS_Alarm_type), pointer :: alarmPtr
+
+ now = mpas_get_clock_time(clock, MPAS_NOW, ierr)
+
+ alarmPtr => clock % alarmListHead
+ do while (associated(alarmPtr))
+
+ if (.not. alarmPtr % isRecurring) then
+ alarmPtr % isSet = .true.
+ else
+
+ previousRingTime = alarmPtr % prevRingTime
+
+ if (previousRingTime <= now) then
+
+ do while(previousRingTime <= now)
+ previousRingTime = previousRingTime + alarmPtr % ringTimeInterval
+ end do
+ positiveNeighborRingTime = previousRingTime
+
+ do while(previousRingTime >= now)
+ previousRingTime = previousRingTime - alarmPtr % ringTimeInterval
+ end do
+ negativeNeighborRingTime = previousRingTime
+
+ else
+
+ do while(previousRingTime >= now)
+ previousRingTime = previousRingTime - alarmPtr % ringTimeInterval
+ end do
+ negativeNeighborRingTime = previousRingTime
+
+ do while(previousRingTime <= now)
+ previousRingTime = previousRingTime + alarmPtr % ringTimeInterval
+ end do
+ positiveNeighborRingTime = previousRingTime
+
+ end if
+
+ if (clock % direction == MPAS_FORWARD) then
+ alarmPtr % prevRingTime = negativeNeighborRingTime
+ else
+ alarmPtr % prevRingTime = positiveNeighborRingTime
+ end if
+
+ end if
+
+ alarmPtr => alarmPtr % next
+
+ end do
+
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_calibrate_alarms
+
+
+ subroutine mpas_set_time(curr_time, YYYY, MM, DD, DoY, H, M, S, S_n, S_d, dateTimeString, ierr)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(out) :: curr_time
+ integer, intent(in), optional :: YYYY
+ integer, intent(in), optional :: MM
+ integer, intent(in), optional :: DD
+ integer, intent(in), optional :: DoY
+ integer, intent(in), optional :: H
+ integer, intent(in), optional :: M
+ integer, intent(in), optional :: S
+ integer, intent(in), optional :: S_n
+ integer, intent(in), optional :: S_d
+ character (len=*), intent(in), optional :: dateTimeString
+ integer, intent(out), optional :: ierr
+
+ integer, parameter :: integerMaxDigits = 8
+ integer :: year, month, day, hour, min, sec
+ integer :: numerator, denominator, denominatorPower
+
+ character (len=50) :: dateTimeString_
+ character (len=50) :: dateSubString
+ character (len=50) :: timeSubString
+ character (len=50) :: secDecSubString
+ character(len=50), pointer, dimension(:) :: subStrings
+
+ if (present(dateTimeString)) then
+
+ dateTimeString_ = dateTimeString
+ numerator = 0
+ denominator = 1
+
+ call mpas_split_string(dateTimeString_, ".", subStrings)
+ if (size(subStrings) == 2) then ! contains second decimals
+ dateTimeString_ = subStrings(1)
+ secDecSubString = subStrings(2)(:integerMaxDigits)
+ deallocate(subStrings)
+ denominatorPower = len_trim(secDecSubString)
+ if(denominatorPower > 0) then
+ read(secDecSubString,*) numerator
+ if(numerator > 0) then
+ denominator = 10**denominatorPower
+ end if
+ end if
+ else if (size(subStrings) /= 1) then
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid DateTime string', dateTimeString
+ return
+ end if
+
+ call mpas_split_string(dateTimeString_, "_", subStrings)
+
+ if(size(subStrings) == 2) then ! contains a date and time
+ dateSubString = subStrings(1)
+ timeSubString = subStrings(2)
+ deallocate(subStrings)
+
+ call mpas_split_string(timeSubString, ":", subStrings)
+
+ if (size(subStrings) == 3) then
+ read(subStrings(1),*) hour
+ read(subStrings(2),*) min
+ read(subStrings(3),*) sec
+ deallocate(subStrings)
+ else
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid DateTime string (invalid time substring)', dateTimeString
+ return
+ end if
+
+ else if(size(subStrings) == 1) then ! contains only a date- assume all time values are 0
+ dateSubString = subStrings(1)
+ deallocate(subStrings)
+
+ hour = 0
+ min = 0
+ sec = 0
+
+ else
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid DateTime string', dateTimeString
+ return
+ end if
+
+ call mpas_split_string(dateSubString, "-", subStrings)
+
+ if (size(subStrings) == 3) then
+ read(subStrings(1),*) year
+ read(subStrings(2),*) month
+ read(subStrings(3),*) day
+ deallocate(subStrings)
+ else
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid DateTime string (invalid date substring)', dateTimeString
+ return
+ end if
+
+ call ESMF_TimeSet(curr_time % t, YY=year, MM=month, DD=day, H=hour, M=min, S=sec, Sn=numerator, Sd=denominator, rc=ierr)
+
+ else
+
+ if (present(DoY)) then
+ call mpas_get_month_day(YYYY, DoY, month, day)
+
+ ! consistency check
+ if (present(MM)) then
+ if (MM /= month) then
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: MPAS_setTime : DoY and MM are inconsistent - using DoY'
+ end if
+ end if
+ if (present(DD)) then
+ if (DD /= day) then
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: MPAS_setTime : DoY and DD are inconsistent - using DoY'
+ end if
+ end if
+ else
+ if (present(MM)) then
+ month = MM
+ else
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: MPAS_setTime : Neither DoY nor MM are specified'
+ return
+ end if
+
+ if (present(DD)) then
+ day = DD
+ else
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: MPAS_setTime : Neither DoY nor DD are specified'
+ return
+ end if
+ end if
+
+ if (.not. isValidDate(YYYY,month,day)) then
+ write(0,*) 'ERROR: MPAS_setTime : Invalid date'
+ return
+ end if
+
+ call ESMF_TimeSet(curr_time % t, YY=YYYY, MM=month, DD=day, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
+
+ end if
+
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_set_time
+
+
+ subroutine mpas_get_time(curr_time, YYYY, MM, DD, DoY, H, M, S, S_n, S_d, dateTimeString, ierr)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: curr_time
+ integer, intent(out), optional :: YYYY
+ integer, intent(out), optional :: MM
+ integer, intent(out), optional :: DD
+ integer, intent(out), optional :: DoY
+ integer, intent(out), optional :: H
+ integer, intent(out), optional :: M
+ integer, intent(out), optional :: S
+ integer, intent(out), optional :: S_n
+ integer, intent(out), optional :: S_d
+ character (len=32), intent(out), optional :: dateTimeString
+ integer, intent(out), optional :: ierr
+
+ call ESMF_TimeGet(curr_time % t, YY=YYYY, MM=MM, DD=DD, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
+ call ESMF_TimeGet(curr_time % t, dayOfYear=DoY, rc=ierr)
+ call ESMF_TimeGet(curr_time % t, timeString=dateTimeString, rc=ierr)
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_get_time
+
+
+ subroutine mpas_set_timeInterval(interval, DD, H, M, S, S_n, S_d, timeString, dt, ierr)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(out) :: interval
+ integer, intent(in), optional :: DD
+ integer, intent(in), optional :: H
+ integer, intent(in), optional :: M
+ integer, intent(in), optional :: S
+ integer, intent(in), optional :: S_n
+ integer, intent(in), optional :: S_d
+ character (len=*), intent(in), optional :: timeString
+ real (kind=RKIND), intent(in), optional :: dt
+ integer, intent(out), optional :: ierr
+
+ integer, parameter :: integerMaxDigits = 8
+ integer :: days, hours, minutes, seconds
+ integer :: numerator, denominator, denominatorPower
+ type (MPAS_TimeInterval_type) :: zeroInterval
+
+ integer :: day, hour, min, sec
+ character (len=50) :: timeString_
+ character (len=50) :: daySubString
+ character (len=50) :: timeSubString
+ character (len=50) :: secDecSubString
+ character(len=50), pointer, dimension(:) :: subStrings
+
+! if (present(DD)) then
+! days = DD
+! else
+! days = 0
+! end if
+
+! if (present(H)) then
+! hours = H
+! else
+! hours = 0
+! end if
+
+! if (present(M)) then
+! minutes = M
+! else
+! minutes = 0
+! end if
+
+! if (present(S)) then
+! seconds = S
+! else
+! seconds = 0
+! end if
+
+
+ !
+ ! Reduce minute count to something less than one hour
+ !
+! do while (minutes > 1440)
+! days = days + 1
+! minutes = minutes - 1440
+! end do
+! do while (minutes > 60)
+! hours = hours + 1
+! minutes = minutes - 60
+! end do
+! do while (minutes < -1440)
+! days = days - 1
+! minutes = minutes + 1440
+! end do
+! do while (minutes < -60)
+! hours = hours - 1
+! minutes = minutes + 60
+! end do
+
+ !
+ ! Reduce hour count to something less than one day
+ !
+! do while (hours > 24)
+! days = days + 1
+! hours = hours - 24
+! end do
+! do while (hours < -24)
+! days = days - 1
+! hours = hours + 24
+! end do
+
+ !
+ ! Any leftover minutes and hours are given to the second count
+ !
+! seconds = seconds + hours*3600 + minutes*60
+
+! call ESMF_TimeIntervalSet(interval % ti, D=days, S=seconds, Sn=S_n, Sd=S_d, rc=ierr)
+
+
+ if (present(timeString) .or. present(dt)) then
+
+
+ if(present(dt)) then
+ write (timeString_,*) "00:00:", dt
+ else
+ timeString_ = timeString
+ end if
+
+ numerator = 0
+ denominator = 1
+
+ call mpas_split_string(timeString_, ".", subStrings)
+
+ if (size(subStrings) == 2) then ! contains second decimals
+ timeString_ = subStrings(1)
+ secDecSubString = subStrings(2)(:integerMaxDigits)
+ deallocate(subStrings)
+
+ denominatorPower = len_trim(secDecSubString)
+ if(denominatorPower > 0) then
+ read(secDecSubString,*) numerator
+ if(numerator > 0) then
+ denominator = 10**denominatorPower
+ end if
+ end if
+ else if (size(subStrings) /= 1) then
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid TimeInterval string', timeString
+ return
+ end if
+
+ call mpas_split_string(timeString_, "_", subStrings)
+
+ if(size(subStrings) == 2) then ! contains a day and time
+ daySubString = subStrings(1)
+ timeSubString = subStrings(2)
+ deallocate(subStrings)
+ read(daySubString,*) day
+ else if(size(subStrings) == 1) then ! contains only a time- assume day is 0
+ timeSubString = subStrings(1)
+ deallocate(subStrings)
+ day = 0
+ else
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid TimeInterval string', timeString
+ return
+ end if
+
+ call mpas_split_string(timeSubString, ":", subStrings)
+
+ if (size(subStrings) == 3) then
+ read(subStrings(1),*) hour
+ read(subStrings(2),*) min
+ read(subStrings(3),*) sec
+ deallocate(subStrings)
+ else
+ deallocate(subStrings)
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: Invalid TimeInterval string (invalid time substring)', timeString
+ return
+ end if
+
+ call ESMF_TimeIntervalSet(interval % ti, D=day, H=hour, M=min, S=sec, Sn=numerator, Sd=denominator, rc=ierr)
+
+ else
+
+ call ESMF_TimeIntervalSet(interval % ti, D=DD, H=H, M=M, S=S, Sn=S_n, Sd=S_d, rc=ierr)
+
+ end if
+
+ ! verify that time interval is positive
+ call ESMF_TimeIntervalSet(zeroInterval % ti, D=0, H=0, M=0, S=0, rc=ierr)
+
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ if (interval <= zeroInterval) then
+ if (present(ierr)) ierr = 1
+ write(0,*) 'ERROR: TimeInterval must be greater than 0', timeString !'ERROR: TimeInterval cannot be negative'
+ end if
+
+
+
+ end subroutine mpas_set_timeInterval
+
+
+ subroutine mpas_get_timeInterval(interval, DD, H, M, S, S_n, S_d, timeString, dt, ierr)
+! TODO: add double-precision seconds
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: interval
+ integer, intent(out), optional :: DD
+ integer, intent(out), optional :: H
+ integer, intent(out), optional :: M
+ integer, intent(out), optional :: S
+ integer, intent(out), optional :: S_n
+ integer, intent(out), optional :: S_d
+ character (len=32), intent(out), optional :: timeString
+ real (kind=RKIND), intent(out), optional :: dt
+ integer, intent(out), optional :: ierr
+
+ integer :: days, seconds, sn, sd
+
+ call ESMF_TimeIntervalGet(interval % ti, D=days, S=seconds, Sn=sn, Sd=sd, rc=ierr)
+
+ if (present(dt)) then
+ dt = (days * 24 * 60 * 60) + seconds + (sn / sd)
+ end if
+
+ if (present(DD)) then
+ DD = days
+ days = 0
+ end if
+
+ if (present(H)) then
+ H = (seconds - mod(seconds,3600)) / 3600
+ seconds = seconds - H*3600
+ H = H + days * 24
+ days = 0
+ end if
+
+ if (present(M)) then
+ M = (seconds - mod(seconds,60)) / 60
+ seconds = seconds - M*60
+ M = M + days * 1440
+ days = 0
+ end if
+
+ if (present(S)) then
+ S = seconds
+ end if
+
+ if (present(S_n)) then
+ S_n = sn
+ end if
+
+ if (present(S_d)) then
+ S_d = sd
+ end if
+
+ if (present(timeString)) then
+ call ESMF_TimeIntervalGet(interval % ti, timeString=timeString, rc=ierr)
+ end if
+
+ if (present(ierr)) then
+ if (ierr == ESMF_SUCCESS) ierr = 0
+ end if
+
+ end subroutine mpas_get_timeInterval
+
+
+ type (MPAS_Time_type) function add_t_ti(t, ti)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+
+ add_t_ti % t = t % t + ti % ti
+
+ end function add_t_ti
+
+
+ type (MPAS_TimeInterval_type) function add_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ add_ti_ti % ti = ti1 % ti + ti2 % ti
+
+ end function add_ti_ti
+
+
+ type (MPAS_TimeInterval_type) function sub_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ sub_t_t % ti = t1 % t - t2 % t
+
+ end function sub_t_t
+
+
+ type (MPAS_Time_type) function sub_t_ti(t, ti)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+
+ sub_t_ti % t = t % t - ti % ti
+
+ end function sub_t_ti
+
+
+ type (MPAS_TimeInterval_type) function sub_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ sub_ti_ti % ti = ti1 % ti - ti2 % ti
+
+ end function sub_ti_ti
+
+
+ type (MPAS_TimeInterval_type) function mul_ti_n(ti, n)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+ integer, intent(in) :: n
+
+ mul_ti_n % ti = ti % ti * n
+
+ end function mul_ti_n
+
+
+ type (MPAS_TimeInterval_type) function div_ti_n(ti, n)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+ integer, intent(in) :: n
+
+ div_ti_n % ti = ti % ti / n
+
+ end function div_ti_n
+
+
+ logical function eq_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ eq_t_t = (t1 % t == t2 % t)
+
+ end function eq_t_t
+
+
+ logical function ne_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ ne_t_t = (t1 % t /= t2 % t)
+
+ end function ne_t_t
+
+
+ logical function lt_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ lt_t_t = (t1 % t < t2 % t)
+
+ end function lt_t_t
+
+
+ logical function gt_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ gt_t_t = (t1 % t > t2 % t)
+
+ end function gt_t_t
+
+
+ logical function le_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ le_t_t = (t1 % t <= t2 % t)
+
+ end function le_t_t
+
+
+ logical function ge_t_t(t1, t2)
+
+ implicit none
+
+ type (MPAS_Time_type), intent(in) :: t1, t2
+
+ ge_t_t = (t1 % t >= t2 % t)
+
+ end function ge_t_t
+
+
+ logical function eq_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ eq_ti_ti = (ti1 % ti == ti2 % ti)
+
+ end function eq_ti_ti
+
+
+ logical function ne_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ ne_ti_ti = (ti1 % ti /= ti2 % ti)
+
+ end function ne_ti_ti
+
+
+ logical function lt_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ lt_ti_ti = (ti1 % ti < ti2 % ti)
+
+ end function lt_ti_ti
+
+
+ logical function gt_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ gt_ti_ti = (ti1 % ti > ti2 % ti)
+
+ end function gt_ti_ti
+
+
+ logical function le_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ le_ti_ti = (ti1 % ti <= ti2 % ti)
+
+ end function le_ti_ti
+
+
+ logical function ge_ti_ti(ti1, ti2)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+
+ ge_ti_ti = (ti1 % ti >= ti2 % ti)
+
+ end function ge_ti_ti
+
+
+ type (MPAS_TimeInterval_type) function neg_ti(ti)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+
+ integer :: rc
+ integer :: D, S, Sn, Sd
+
+ call ESMF_TimeIntervalGet(ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
+ D = -D
+ S = -S
+ Sn = -Sn
+ call ESMF_TimeIntervalSet(neg_ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
+
+ end function neg_ti
+
+
+ type (MPAS_TimeInterval_type) function abs_ti(ti)
+
+ implicit none
+
+ type (MPAS_TimeInterval_type), intent(in) :: ti
+
+ type (MPAS_TimeInterval_type) :: zeroInterval
+ integer :: rc
+ integer :: D, S, Sn, Sd
+
+ call ESMF_TimeIntervalSet(zeroInterval % ti, D=0, H=0, M=0, S=0, rc=rc)
+
+ if(ti < zeroInterval) then
+ call ESMF_TimeIntervalGet(ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
+ D = -D
+ S = -S
+ Sn = -Sn
+ call ESMF_TimeIntervalSet(abs_ti % ti, D=D, S=S, Sn=Sn, Sd=Sd, rc=rc)
+ else
+ abs_ti = ti
+ end if
+
+ end function abs_ti
+
+
+! TODO: Implement this function
+! type (MPAS_TimeInterval_type) function mod(ti1, ti2)
+!
+! implicit none
+!
+! type (MPAS_TimeInterval_type), intent(in) :: ti1, ti2
+!
+! mod % ti = mod(ti1 % ti, ti2 % ti)
+!
+! end function mod
+
+
+ subroutine mpas_split_string(string, delimiter, subStrings)
+
+ implicit none
+
+ character(len=*), intent(in) :: string
+ character, intent(in) :: delimiter
+ character(len=*), pointer, dimension(:) :: subStrings
+
+ integer :: i, start, index
+
+ index = 1
+ do i = 1, len(string)
+ if(string(i:i) == delimiter) then
+ index = index + 1
+ end if
+ end do
+
+ allocate(subStrings(1:index))
+
+ start = 1
+ index = 1
+ do i = 1, len(string)
+ if(string(i:i) == delimiter) then
+ subStrings(index) = string(start:i-1)
+ index = index + 1
+ start = i + 1
+ end if
+ end do
+ subStrings(index) = string(start:len(string))
+
+ end subroutine mpas_split_string
+
+
+ subroutine mpas_get_month_day(YYYY, DoY, month, day)
+
+ implicit none
+
+ integer, intent(in) :: YYYY, DoY
+ integer, intent(out) :: month, day
+
+ integer, dimension(12) :: dpm
+
+ if (isLeapYear(YYYY)) then
+ dpm(:) = daysInMonthLeap
+ else
+ dpm(:) = daysInMonth
+ end if
+
+ month = 1
+ day = DoY
+ do while (day > dpm(month))
+ day = day - dpm(month)
+ month = month + 1
+ end do
+
+ end subroutine mpas_get_month_day
+
+
+ logical function isValidDate(YYYY, MM, DD)
+
+ integer, intent(in) :: YYYY, MM, DD
+ integer :: daysInMM
+
+ isValidDate = .true.
+
+ ! TODO: ???? Gregorian calendar has no year zero, but perhaps 0 = 1 BC ???
+ !if (YYYY == 0) then
+ ! isValidDate = .false.
+ ! return
+ !end if
+
+ if (MM < 1 .or. MM > 12) then
+ isValidDate = .false.
+ return
+ end if
+
+ if (DD < 1) then
+ isValidDate = .false.
+ return
+ end if
+
+ if(TheCalendar == MPAS_360DAY) then
+ daysInMM = 30
+ else
+ if (TheCalendar == MPAS_GREGORIAN .and. isLeapYear(YYYY)) then
+ daysInMM = daysInMonthLeap(MM)
+ else
+ daysInMM = daysInMonth(MM)
+ end if
+ end if
+
+ if (DD > daysInMM) then
+ isValidDate = .false.
+ return
+ end if
+
+ end function
+
+
+ logical function isLeapYear(year)
+
+ implicit none
+
+ integer, intent(in) :: year
+
+ isLeapYear = .false.
+
+ if (mod(year,4) == 0) then
+ if (mod(year,100) == 0) then
+ if (mod(year,400) == 0) then
+ isLeapYear = .true.
+ end if
+ else
+ isLeapYear = .true.
+ end if
+ end if
+
+ end function isLeapYear
+
+
+
+
+
+end module mpas_timekeeping
+
+
+
+subroutine wrf_error_fatal(msg)
+
+ implicit none
+
+ character (len=*) :: msg
+
+ write(0,*) 'MPAS_TIMEKEEPING: '//trim(msg)
+
+ stop
+
+end subroutine wrf_error_fatal
Copied: branches/source_renaming/src/framework/mpas_timer.F (from rev 1089, branches/source_renaming/src/framework/module_timer.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_timer.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_timer.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,293 @@
+ module mpas_timer
+
+ implicit none
+ save
+! private
+
+#ifdef _PAPI
+ include 'f90papi.h'
+#endif
+
+#ifdef _MPI
+ include 'mpif.h'
+#endif
+
+ type timer_node
+ character (len=72) :: timer_name
+ logical :: running, printable
+ integer :: levels, calls
+ real (kind=RKIND) :: start_time, end_time, total_time
+ real (kind=RKIND) :: max_time, min_time, avg_time
+ type (timer_node), pointer :: next
+ end type timer_node
+
+ type (timer_node), pointer :: all_timers
+ integer :: levels
+
+ public :: mpas_timer_start, &
+ mpas_timer_stop, &
+ mpas_timer_write
+
+ contains
+
+ subroutine mpas_timer_start(timer_name, clear_timer, timer_ptr)!{{{
+ character (len=*), intent (in) :: timer_name !< Input: name of timer, stored as name of timer
+ logical, optional, intent(in) :: clear_timer !< Input: flag to clear timer
+ type (timer_node), optional, pointer, intent(out) :: timer_ptr !< Output: pointer to store timer in module
+
+ logical :: timer_added, timer_found, string_equal, check_flag
+ type (timer_node), pointer :: current, temp
+
+ integer :: clock, hz, usecs
+
+ timer_added = .false.
+ timer_found = .false.
+
+ if(.not.associated(all_timers)) then
+ timer_added = .true.
+ allocate(all_timers)
+ allocate(all_timers%next)
+ levels = 0
+
+ all_timers%timer_name = ''
+ current => all_timers%next
+ nullify(current%next)
+ else
+ current => all_timers%next
+ timer_search: do while ((.not.timer_found) .and. associated(current))
+ string_equal = (trim(current%timer_name) == trim(timer_name))
+ if(string_equal) then
+ timer_found = .true.
+ else
+ current => current%next
+ endif
+ end do timer_search
+ endif
+
+ if(present(timer_ptr)) then
+ timer_found = .true.
+ if(.not.associated(timer_ptr)) then
+ current => all_timers
+ find_end_ptr: do while((.not.timer_added) .and. (associated(current%next)))
+ current => current%next
+ end do find_end_ptr
+
+ allocate(timer_ptr)
+
+ current%next => timer_ptr
+ current => timer_ptr
+ nullify(timer_ptr%next)
+ current%levels = levels
+ current%timer_name = timer_name
+ current%running = .false.
+ current%total_time = 0.0
+ current%max_time = 0.0
+ current%min_time = 100000000.0
+ current%avg_time = 0.0
+ current%calls = 0
+ endif
+ endif
+
+ if(.not.timer_found) then
+ current => all_timers
+ find_end: do while((.not.timer_added) .and. (associated(current%next)))
+ current => current%next
+ end do find_end
+
+ allocate(current%next)
+ current => current%next
+
+ nullify(current%next)
+ timer_added = .true.
+ endif
+
+ if(timer_added .and. (.not.timer_found)) then
+ current%levels = levels
+ current%timer_name = timer_name
+ current%running = .false.
+ current%total_time = 0.0
+ current%max_time = 0.0
+ current%min_time = 100000000.0
+ current%avg_time = 0.0
+ current%calls = 0
+ endif
+
+ if((timer_added .or. timer_found) .and. (.not.current%running)) then
+ current%running = .true.
+ levels = levels + 1
+
+#ifdef _PAPI
+ call PAPIF_get_real_usec(usecs, check_flag)
+ current%start_time = usecs/1.0e6
+#elif _MPI
+ current%start_time = MPI_Wtime()
+#else
+ call system_clock (count=clock)
+ call system_clock (count_rate=hz)
+ current%start_time = real(clock,kind=RKIND)/real(hz,kind=RKIND)
+#endif
+ endif
+
+ if(present(clear_timer)) then
+ if(clear_timer) then
+ current%start_time = 0.0
+ current%end_time = 0.0
+ current%total_time = 0.0
+ current%max_time = 0.0
+ current%min_time = 0.0
+ current%avg_time = 0.0
+ current%calls = 0
+ current%running = .false.
+ endif
+ endif
+
+ if(present(timer_ptr)) then
+ timer_ptr => current
+ endif
+
+ end subroutine mpas_timer_start!}}}
+
+ subroutine mpas_timer_stop(timer_name, timer_ptr)!{{{
+ character (len=*), intent(in) :: timer_name !< Input: name of timer to stop
+ type (timer_node), pointer, intent(in), optional :: timer_ptr !< Input: pointer to timer, for stopping
+
+ type (timer_node), pointer :: current
+
+ real (kind=RKIND) :: time_temp
+ logical :: timer_found, string_equal, check_flag
+ integer :: clock, hz, usecs
+
+ timer_found = .false.
+
+ if(present(timer_ptr)) then
+ timer_found = .true.
+ current => timer_ptr
+ endif
+
+ if(.not.associated(all_timers)) then
+ print *,' timer_stop :: timer_stop called with no timers initialized'
+ else if(.not. timer_found) then
+ current => all_timers
+ timer_find: do while(.not.timer_found .and. associated(current))
+ string_equal = (trim(current%timer_name) == trim(timer_name))
+
+ if(string_equal) then
+ timer_found = .true.
+ else
+ current => current%next
+ endif
+ end do timer_find
+ endif
+
+ if(.not.timer_found) then
+ print *,' timer_stop :: timer_stop called with timer_name =', timer_name,' when timer has not been started.'
+ stop
+ endif
+
+ if(current%running) then
+ current%running = .false.
+ levels = levels - 1
+
+#ifdef _PAPI
+ call PAPIF_get_real_usec(usecs, check_flag)
+ current%end_time = usecs/1.0e6
+#elif _MPI
+ current%end_time = MPI_Wtime()
+#else
+ call system_clock(count=clock)
+ call system_clock(count_rate=hz)
+ current%end_time = real(clock,kind=RKIND)/real(hz,kind=RKIND)
+#endif
+
+ time_temp = current%end_time - current%start_time
+ current%total_time = current%total_time + time_temp
+
+ if(time_temp > current%max_time) then
+ current%max_time = time_temp
+ endif
+
+ if(time_temp < current%min_time) then
+ current%min_time = time_temp
+ endif
+
+ current%avg_time = current%avg_time + time_temp
+ current%calls = current%calls + 1
+ endif
+
+ end subroutine mpas_timer_stop!}}}
+
+ recursive subroutine mpas_timer_write(timer_ptr, total_ptr)!{{{
+ type (timer_node), pointer, intent(inout), optional :: timer_ptr
+ type (timer_node), pointer, intent(in), optional :: total_ptr
+ character (len=10) :: tname
+
+ logical :: total_found, string_equals
+ type (timer_node), pointer :: current, total
+ real (kind=RKIND) :: percent
+ integer :: i
+
+ total_found = .false.
+
+ if(present(timer_ptr) .and. (.not.present(total_ptr))) then
+ print *,'timer_write :: timer_ptr valid, but total_ptr is not assigned.'
+ stop
+ else if(present(timer_ptr)) then
+ tname = ''
+ do i=0,timer_ptr%levels+2
+ tname = tname//' '
+! write(*,'(a,$)') ' '
+ end do
+! tname = tname//timer_ptr%timer_name
+
+ if(timer_ptr%total_time == 0.0d0) then
+ timer_ptr%min_time = 0.0d0
+ timer_ptr%max_time = 0.0d0
+ timer_ptr%avg_time = 0.0d0
+ percent = 0.0d0
+ else
+ timer_ptr%avg_time = timer_ptr%avg_time/timer_ptr%calls
+ percent = timer_ptr%total_time/total_ptr%total_time
+ endif
+
+ write(*,'(i2, 1x, a35, f15.5, i10, 3f15.5, f8.2)') timer_ptr%levels, tname(1:timer_ptr%levels)//timer_ptr%timer_name, timer_ptr%total_time, timer_ptr%calls, timer_ptr%min_time, timer_ptr%max_time, timer_ptr%avg_time, percent
+ return
+ endif
+
+ total => all_timers
+
+ find_total: do while((.not.total_found) .and. associated(total))
+ string_equals = (trim(total%timer_name) == trim("total time"))
+ if(string_equals) then
+ total_found = .true.
+ else
+ total => total%next
+ endif
+ end do find_total
+
+ if(.not.total_found) then
+ print *,' timer_write :: no timer named "total time" found.'
+ stop
+ end if
+
+ write(*,'(3x, a10, 24x, a15, a10, a13, a15, a15, a15)') 'timer_name', 'total', 'calls', 'min', 'max', 'avg', 'percent'
+ write(*,'(i2, 1x, a35, f15.5, i10, 3f15.5)') total%levels, total%timer_name, total%total_time, total%calls, total%min_time, total%max_time, total%avg_time
+
+ current => all_timers
+
+ print_timers: do while(associated(current))
+ string_equals = (trim(current%timer_name) == trim("total time"))
+ string_equals = string_equals .or. (trim(current%timer_name) == trim(" "))
+
+ if(.not.string_equals) then
+ call mpas_timer_write(current, total)
+ current => current%next
+ else
+ current => current%next
+ endif
+ end do print_timers
+
+ end subroutine mpas_timer_write!}}}
+
+ end module mpas_timer
+
+! vim: foldmethod=marker et ts=2
Copied: branches/source_renaming/src/framework/mpas_zoltan_interface.F (from rev 1089, branches/source_renaming/src/framework/module_zoltan_interface.F)
===================================================================
--- branches/source_renaming/src/framework/mpas_zoltan_interface.F         (rev 0)
+++ branches/source_renaming/src/framework/mpas_zoltan_interface.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -0,0 +1,581 @@
+module mpas_zoltan_interface
+ use zoltan
+
+ implicit none
+
+ include 'mpif.h'
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Data for reordering cells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer :: numCells
+ integer, dimension(:), pointer :: cellIDs
+ integer :: geomDim
+ real (kind=RKIND), dimension(:), pointer :: cellCoordX, cellCoordY, cellCoordZ
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Data for reordering edges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer :: numEdges
+ integer, dimension(:), pointer :: edgeIDs
+ real (kind=RKIND), dimension(:), pointer :: edgeCoordX, edgeCoordY, edgeCoordZ
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Data for reordering vertices
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer :: numVerts
+ integer, dimension(:), pointer :: vertIDs
+ real (kind=RKIND), dimension(:), pointer :: vertCoordX, vertCoordY, vertCoordZ
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+ contains
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Perhaps not necessary, but implemented in case it helps
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zoltan_start()
+
+ integer(Zoltan_INT) :: error
+ real(Zoltan_FLOAT) :: version
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Body of subroutine
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ error = Zoltan_Initialize(version)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ end subroutine
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zoltan_order_loc_hsfc_cells(in_numcells,in_cellIDs,in_geomDim,in_cellX, &
+ in_cellY, in_cellZ)
+ implicit none
+
+ integer :: in_numcells
+ integer, dimension(:), pointer :: in_cellIDs
+ integer :: in_geomDim
+ real (kind=RKIND), dimension(:), pointer :: in_cellX, in_cellY, in_cellZ
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! local variables
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ type(Zoltan_Struct), pointer :: zz_obj
+ integer(ZOLTAN_INT) :: ierr
+
+ integer :: numGidEntries, i
+ integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
+ real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Body of subroutine
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ numCells = in_numcells
+ cellIDs => in_cellIDs
+ geomDim = in_geomDim
+ cellCoordX => in_cellX
+ cellCoordY => in_cellY
+ cellCoordZ => in_cellZ
+
+ nullify(zz_obj)
+ zz_obj => Zoltan_Create(MPI_COMM_SELF)
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! General Zoltan Parameters
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! register query functions
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumCells)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetCells)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetCellGeom)
+
+ numGidEntries=1
+
+ allocate(global_ids(numCells))
+ allocate(permIndices(numCells))
+ allocate(permGIDs(numCells))
+ allocate(permXs(numCells))
+ allocate(permYs(numCells))
+ allocate(permZs(numCells))
+
+ !! MMW: There might be a way to use cellIDs directly
+ do i=1,numCells
+ global_ids(i) = cellIDs(i)
+ end do
+
+ ierr = Zoltan_Order(zz_obj, numGidEntries, numCells, global_ids, permIndices);
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! This is necessary for now until we fix a small bug in Zoltan_Order
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numCells
+ permGIDs(i) = global_ids(permIndices(i)+1)
+ permXs(i) = cellCoordX(permIndices(i)+1)
+ permYs(i) = cellCoordY(permIndices(i)+1)
+ permZs(i) = cellCoordZ(permIndices(i)+1)
+ end do
+
+ !!do i=1,numCells
+ !! write(*,*) global_ids(i), permGIDs(i)
+ !!end do
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Actually change the ordering of the cells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numCells
+ cellIDs(i) = permGIDs(i)
+ cellCoordX(i) = permXs(i)
+ cellCoordY(i) = permYs(i)
+ cellCoordZ(i) = permZs(i)
+ end do
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ deallocate(global_ids)
+ deallocate(permIndices)
+ deallocate(permGIDs)
+ deallocate(permXs)
+ deallocate(permYs)
+ deallocate(permZs)
+
+ call Zoltan_Destroy(zz_obj)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ end subroutine mpas_zoltan_order_loc_hsfc_cells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns number of cells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer function zqfNumCells(data, ierr)
+
+ ! Local declarations
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ zqfNumCells = numCells
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end function zqfNumCells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns lists of Cell IDs
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_cells (data, num_gid_entries, num_lid_entries, global_ids, &
+ local_ids, wgt_dim, obj_wgts, ierr)
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
+ integer(ZOLTAN_INT), intent(in) :: wgt_dim
+ real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ ! local declarations
+ integer :: i
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i= 1, numCells
+ global_ids(i) = cellIDs(i)
+ local_ids(i) = i
+ end do
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_cells
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Zoltan Query Function:
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer function zqfGeomDim(data, ierr)
+ !use zoltan
+ implicit none
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ zqfGeomDim = geomDim
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end function zqfGeomDim
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Zoltan Query Function:
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_cell_geom(data, num_gid_entries, num_lid_entries, global_id, &
+ local_id, geom_vec, ierr)
+ !use zoltan
+ implicit none
+
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(in) :: global_id, local_id
+ real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Assuming geom_dim is 3
+ geom_vec(1) = cellCoordX(local_id)
+ geom_vec(2) = cellCoordY(local_id)
+ geom_vec(3) = cellCoordZ(local_id)
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_cell_geom
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! The ordering functions should perhaps be refactored so that there
+ !! are not separate functions for cells, edges, and vertices
+ !! Not sure if this is worth it with the additional conditionals that would
+ !! be required.
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zoltan_order_loc_hsfc_edges(in_numedges,in_edgeIDs,in_geomDim,in_edgeX, &
+ in_edgeY, in_edgeZ)
+ implicit none
+
+ integer :: in_numedges
+ integer, dimension(:), pointer :: in_edgeIDs
+ integer :: in_geomDim
+ real (kind=RKIND), dimension(:), pointer :: in_edgeX, in_edgeY, in_edgeZ
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! local variables
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ type(Zoltan_Struct), pointer :: zz_obj
+ integer(ZOLTAN_INT) :: ierr
+
+ integer :: numGidEntries, i
+ integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
+ real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Body of subroutine
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ numEdges = in_numedges
+ edgeIDs => in_edgeIDs
+ geomDim = in_geomDim
+ edgeCoordX => in_edgeX
+ edgeCoordY => in_edgeY
+ edgeCoordZ => in_edgeZ
+
+ nullify(zz_obj)
+ zz_obj => Zoltan_Create(MPI_COMM_SELF)
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! General Zoltan Parameters
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! register query functions
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumEdges)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetEdges)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetEdgeGeom)
+
+ numGidEntries=1
+
+ allocate(global_ids(numEdges))
+ allocate(permIndices(numEdges))
+ allocate(permGIDs(numEdges))
+ allocate(permXs(numEdges))
+ allocate(permYs(numEdges))
+ allocate(permZs(numEdges))
+
+ !! MMW: There might be a way to use edgeIDs directly
+ do i=1,numEdges
+ global_ids(i) = edgeIDs(i)
+ end do
+
+ ierr = Zoltan_Order(zz_obj, numGidEntries, numEdges, global_ids, permIndices);
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! This is necessary for now until we fix a small bug in Zoltan_Order
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numEdges
+ permGIDs(i) = global_ids(permIndices(i)+1)
+ permXs(i) = edgeCoordX(permIndices(i)+1)
+ permYs(i) = edgeCoordY(permIndices(i)+1)
+ permZs(i) = edgeCoordZ(permIndices(i)+1)
+ end do
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Actually change the ordering of the edges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numEdges
+ edgeIDs(i) = permGIDs(i)
+ edgeCoordX(i) = permXs(i)
+ edgeCoordY(i) = permYs(i)
+ edgeCoordZ(i) = permZs(i)
+ end do
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ deallocate(global_ids)
+ deallocate(permIndices)
+ deallocate(permGIDs)
+ deallocate(permXs)
+ deallocate(permYs)
+ deallocate(permZs)
+
+ call Zoltan_Destroy(zz_obj)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zoltan_order_loc_hsfc_edges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns number of edges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer function zqfNumEdges(data, ierr)
+ ! Local declarations
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ zqfNumEdges = numEdges
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end function zqfNumEdges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns lists of Edge IDs
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_edges (data, num_gid_entries, num_lid_entries, global_ids, &
+ local_ids, wgt_dim, obj_wgts, ierr)
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
+ integer(ZOLTAN_INT), intent(in) :: wgt_dim
+ real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ ! local declarations
+ integer :: i
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i= 1, numEdges
+ global_ids(i) = edgeIDs(i)
+ local_ids(i) = i
+ end do
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_edges
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Zoltan Query Function:
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_edge_geom(data, num_gid_entries, num_lid_entries, global_id, &
+ local_id, geom_vec, ierr)
+ !use zoltan
+ implicit none
+
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(in) :: global_id, local_id
+ real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Assuming geom_dim is 3
+ geom_vec(1) = edgeCoordX(local_id)
+ geom_vec(2) = edgeCoordY(local_id)
+ geom_vec(3) = edgeCoordZ(local_id)
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_edge_geom
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zoltan_order_loc_hsfc_verts(in_numverts,in_vertIDs,in_geomDim,in_vertX, &
+ in_vertY, in_vertZ)
+ implicit none
+
+ integer :: in_numverts
+ integer, dimension(:), pointer :: in_vertIDs
+ integer :: in_geomDim
+ real (kind=RKIND), dimension(:), pointer :: in_vertX, in_vertY, in_vertZ
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! local variables
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ type(Zoltan_Struct), pointer :: zz_obj
+ integer(ZOLTAN_INT) :: ierr
+
+ integer :: numGidEntries, i
+ integer(ZOLTAN_INT), allocatable :: global_ids(:), permIndices(:),permGIDs(:)
+ real(kind=RKIND), allocatable :: permXs(:),permYs(:),permZs(:)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Body of subroutine
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ numVerts = in_numverts
+ vertIDs => in_vertIDs
+ geomDim = in_geomDim
+ vertCoordX => in_vertX
+ vertCoordY => in_vertY
+ vertCoordZ => in_vertZ
+
+ nullify(zz_obj)
+ zz_obj => Zoltan_Create(MPI_COMM_SELF)
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! General Zoltan Parameters
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Param(zz_obj, "ORDER_METHOD", "LOCAL_HSFC")
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! register query functions
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_OBJ_FN_TYPE,zqfNumVerts)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_OBJ_LIST_FN_TYPE,zqfGetVerts)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_NUM_GEOM_FN_TYPE,zqfGeomDim)
+ ierr = Zoltan_Set_Fn(zz_obj, ZOLTAN_GEOM_FN_TYPE, zqfGetVertGeom)
+
+ numGidEntries=1
+
+ allocate(global_ids(numVerts))
+ allocate(permIndices(numVerts))
+ allocate(permGIDs(numVerts))
+ allocate(permXs(numVerts))
+ allocate(permYs(numVerts))
+ allocate(permZs(numVerts))
+
+ !! MMW: There might be a way to use vertIDs directly
+ do i=1,numVerts
+ global_ids(i) = vertIDs(i)
+ end do
+
+ ierr = Zoltan_Order(zz_obj, numGidEntries, numVerts, global_ids, permIndices);
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! This is necessary for now until we fix a small bug in Zoltan_Order
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numVerts
+ permGIDs(i) = global_ids(permIndices(i)+1)
+ permXs(i) = vertCoordX(permIndices(i)+1)
+ permYs(i) = vertCoordY(permIndices(i)+1)
+ permZs(i) = vertCoordZ(permIndices(i)+1)
+ end do
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Actually change the ordering of the verts
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i=1,numVerts
+ vertIDs(i) = permGIDs(i)
+ vertCoordX(i) = permXs(i)
+ vertCoordY(i) = permYs(i)
+ vertCoordZ(i) = permZs(i)
+ end do
+ !!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ deallocate(global_ids)
+ deallocate(permIndices)
+ deallocate(permGIDs)
+ deallocate(permXs)
+ deallocate(permYs)
+ deallocate(permZs)
+
+ call Zoltan_Destroy(zz_obj)
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ end subroutine mpas_zoltan_order_loc_hsfc_verts
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns number of verts
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ integer function zqfNumVerts(data, ierr)
+
+ ! Local declarations
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ zqfNumVerts = numVerts
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end function zqfNumVerts
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! zoltan query function:
+ !! Returns lists of Vert IDs
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_verts (data, num_gid_entries, num_lid_entries, global_ids, &
+ local_ids, wgt_dim, obj_wgts, ierr)
+
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(out) :: global_ids(*), local_ids(*)
+ integer(ZOLTAN_INT), intent(in) :: wgt_dim
+ real(ZOLTAN_FLOAT), intent(out) :: obj_wgts(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ ! local declarations
+ integer :: i
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ do i= 1, numVerts
+ global_ids(i) = vertIDs(i)
+ local_ids(i) = i
+ end do
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_verts
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Zoltan Query Function:
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ subroutine mpas_zqf_get_vert_geom(data, num_gid_entries, num_lid_entries, global_id, &
+ local_id, geom_vec, ierr)
+ !use zoltan
+ implicit none
+
+ integer(ZOLTAN_INT), intent(in) :: data(*)
+ integer(ZOLTAN_INT), intent(in) :: num_gid_entries, num_lid_entries
+ integer(ZOLTAN_INT), intent(in) :: global_id, local_id
+ real(ZOLTAN_DOUBLE), intent(out) :: geom_vec(*)
+ integer(ZOLTAN_INT), intent(out) :: ierr
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! Assuming geom_dim is 3
+ geom_vec(1) = vertCoordX(local_id)
+ geom_vec(2) = vertCoordY(local_id)
+ geom_vec(3) = vertCoordZ(local_id)
+
+ ierr = ZOLTAN_OK
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ end subroutine mpas_zqf_get_vert_geom
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+
+
+end module mpas_zoltan_interface
Modified: branches/source_renaming/src/operators/mpas_rbf_interpolation.F
===================================================================
--- branches/source_renaming/src/operators/mpas_rbf_interpolation.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/operators/mpas_rbf_interpolation.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,6 +1,6 @@
module rbf_interpolation
- use dmpar
- use grid_types
+ use mpas_dmpar
+ use mpas_grid_types
implicit none
private
Modified: branches/source_renaming/src/operators/mpas_vector_reconstruction.F
===================================================================
--- branches/source_renaming/src/operators/mpas_vector_reconstruction.F        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/operators/mpas_vector_reconstruction.F        2011-10-17 21:43:19 UTC (rev 1105)
@@ -1,8 +1,8 @@
module vector_reconstruction
- use grid_types
- use configure
- use constants
+ use mpas_grid_types
+ use mpas_configure
+ use mpas_constants
use rbf_interpolation
implicit none
Modified: branches/source_renaming/src/registry/gen_inc.c
===================================================================
--- branches/source_renaming/src/registry/gen_inc.c        2011-10-17 20:41:26 UTC (rev 1104)
+++ branches/source_renaming/src/registry/gen_inc.c        2011-10-17 21:43:19 UTC (rev 1105)
@@ -139,10 +139,10 @@
fd = fopen("config_bcast_namelist.inc", "w");
nls_ptr = nls;
while (nls_ptr) {
- if (nls_ptr->vtype == INTEGER) fortprintf(fd, " call dmpar_bcast_int(dminfo, %s)</font>
<font color="red">", nls_ptr->name);
- if (nls_ptr->vtype == REAL) fortprintf(fd, " call dmpar_bcast_real(dminfo, %s)</font>
<font color="red">", nls_ptr->name);
- if (nls_ptr->vtype == LOGICAL) fortprintf(fd, " call dmpar_bcast_logical(dminfo, %s)</font>
<font color="red">", nls_ptr->name);
- if (nls_ptr->vtype == CHARACTER) fortprintf(fd, " call dmpar_bcast_char(dminfo, %s)</font>
<font color="blue">", nls_ptr->name);
+ if (nls_ptr->vtype == INTEGER) fortprintf(fd, " call mpas_dmpar_bcast_int(dminfo, %s)</font>
<font color="blue">", nls_ptr->name);
+ if (nls_ptr->vtype == REAL) fortprintf(fd, " call mpas_dmpar_bcast_real(dminfo, %s)</font>
<font color="blue">", nls_ptr->name);
+ if (nls_ptr->vtype == LOGICAL) fortprintf(fd, " call mpas_dmpar_bcast_logical(dminfo, %s)</font>
<font color="blue">", nls_ptr->name);
+ if (nls_ptr->vtype == CHARACTER) fortprintf(fd, " call mpas_dmpar_bcast_char(dminfo, %s)</font>
<font color="black">", nls_ptr->name);
nls_ptr = nls_ptr->next;
}
fortprintf(fd, "</font>
<font color="gray">");
@@ -253,8 +253,8 @@
fd = fopen("read_dims.inc", "w");
dim_ptr = dims;
while (dim_ptr) {
- if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="red">", dim_ptr->name_in_file, dim_ptr->name_in_code);
- else if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_file);
+ if (dim_ptr->constant_value < 0 && !dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call mpas_io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="blue">", dim_ptr->name_in_file, dim_ptr->name_in_code);
+ else if (dim_ptr->constant_value < 0 && dim_ptr->namelist_defined && !is_derived_dim(dim_ptr->name_in_code)) fortprintf(fd, " call mpas_io_input_get_dimension(input_obj, \'%s\', %s)</font>
<font color="gray">", dim_ptr->name_in_file, dim_ptr->name_in_file);
dim_ptr = dim_ptr->next;
}
@@ -474,13 +474,13 @@
fortprintf(fd, " allocate(b %% %s %% time_levs(%i))</font>
<font color="black">", group_ptr->name, group_ptr->vlist->var->ntime_levs);
fortprintf(fd, " do i=1,b %% %s %% nTimeLevels</font>
<font color="black">", group_ptr->name);
fortprintf(fd, " allocate(b %% %s %% time_levs(i) %% %s)</font>
<font color="red">", group_ptr->name, group_ptr->name);
- fortprintf(fd, " call allocate_%s(b %% %s %% time_levs(i) %% %s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_allocate_%s(b %% %s %% time_levs(i) %% %s, &</font>
<font color="black">", group_ptr->name, group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black">");
fortprintf(fd, " end do</font>
<font color="black"></font>
<font color="red">");
}
else {
- fortprintf(fd, " call allocate_%s(b %% %s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_allocate_%s(b %% %s, &</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black"></font>
<font color="gray">");
}
@@ -495,13 +495,13 @@
while (group_ptr) {
if (group_ptr->vlist->var->ntime_levs > 1) {
fortprintf(fd, " do i=1,b %% %s %% nTimeLevels</font>
<font color="red">", group_ptr->name);
- fortprintf(fd, " call deallocate_%s(b %% %s %% time_levs(i) %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_deallocate_%s(b %% %s %% time_levs(i) %% %s)</font>
<font color="black">", group_ptr->name, group_ptr->name, group_ptr->name);
fortprintf(fd, " deallocate(b %% %s %% time_levs(i) %% %s)</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, " end do</font>
<font color="black">");
fortprintf(fd, " deallocate(b %% %s %% time_levs)</font>
<font color="red">", group_ptr->name);
}
else {
- fortprintf(fd, " call deallocate_%s(b %% %s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " call mpas_deallocate_%s(b %% %s)</font>
<font color="black">", group_ptr->name, group_ptr->name);
}
fortprintf(fd, " deallocate(b %% %s)</font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
group_ptr = group_ptr->next;
@@ -512,7 +512,7 @@
fd = fopen("group_alloc_routines.inc", "w");
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine allocate_%s(%s, &</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_allocate_%s(%s, &</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "#include \"dim_dummy_args.inc\"</font>
<font color="black">");
fortprintf(fd, " )</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="gray">");
@@ -659,7 +659,7 @@
}
}
- fortprintf(fd, " end subroutine allocate_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end subroutine mpas_allocate_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
group_ptr = group_ptr->next;
}
fclose(fd);
@@ -668,7 +668,7 @@
fd = fopen("group_dealloc_routines.inc", "w");
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine deallocate_%s(%s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_deallocate_%s(%s)</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="gray">");
@@ -705,7 +705,7 @@
}
}
- fortprintf(fd, " end subroutine deallocate_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end subroutine mpas_deallocate_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
group_ptr = group_ptr->next;
}
fclose(fd);
@@ -714,7 +714,7 @@
fd = fopen("group_copy_routines.inc", "w");
group_ptr = groups;
while (group_ptr) {
- fortprintf(fd, " subroutine copy_%s(dest, src)</font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " subroutine mpas_copy_%s(dest, src)</font>
<font color="black">", group_ptr->name);
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="gray">");
@@ -748,7 +748,7 @@
}
}
fortprintf(fd, "</font>
<font color="red">");
- fortprintf(fd, " end subroutine copy_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end subroutine mpas_copy_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
group_ptr = group_ptr->next;
}
fclose(fd);
@@ -758,7 +758,7 @@
group_ptr = groups;
while (group_ptr) {
if (group_ptr->vlist->var->ntime_levs > 1) {
- fortprintf(fd, " subroutine shift_time_levels_%s(%s)</font>
<font color="blue">", group_ptr->name, group_ptr->name);
+ fortprintf(fd, " subroutine mpas_shift_time_levels_%s(%s)</font>
<font color="black">", group_ptr->name, group_ptr->name);
fortprintf(fd, "</font>
<font color="black">");
fortprintf(fd, " implicit none</font>
<font color="black">");
fortprintf(fd, "</font>
<font color="gray">");
@@ -773,7 +773,7 @@
fortprintf(fd, " end do</font>
<font color="black">");
fortprintf(fd, " %s %% time_levs(%s %% nTimeLevels) %% %s => sptr</font>
<font color="black">", group_ptr->name, group_ptr->name, group_ptr->name);
fortprintf(fd, "</font>
<font color="red">");
- fortprintf(fd, " end subroutine shift_time_levels_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="blue">", group_ptr->name);
+ fortprintf(fd, " end subroutine mpas_shift_time_levels_%s</font>
<font color="black"></font>
<font color="black"></font>
<font color="gray">", group_ptr->name);
}
group_ptr = group_ptr->next;
}
@@ -1362,12 +1362,12 @@
fortprintf(fd, " %s%id %% ioinfo %% fieldName = \'%s\'</font>
<font color="red">", vtype, var_ptr->ndims, var_ptr->name_in_file);
if (var_ptr->timedim)
- fortprintf(fd, " call io_input_field_time(input_obj, %s%id)</font>
<font color="blue">", vtype, var_ptr->ndims);
+ fortprintf(fd, " call mpas_io_input_field_time(input_obj, %s%id)</font>
<font color="red">", vtype, var_ptr->ndims);
else
- fortprintf(fd, " call io_input_field(input_obj, %s%id)</font>
<font color="blue">", vtype, var_ptr->ndims);
+ fortprintf(fd, " call mpas_io_input_field(input_obj, %s%id)</font>
<font color="red">", vtype, var_ptr->ndims);
if (var_ptr->ndims > 0) {
- fortprintf(fd, " call dmpar_alltoall_field(dminfo, &</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_dmpar_alltoall_field(dminfo, &</font>
<font color="black">");
if (strncmp(var_ptr->super_array, "-", 1024) != 0)
fortprintf(fd, " %s%id %% array, super_%s%id, &</font>
<font color="gray">", vtype, var_ptr->ndims, vtype, var_ptr->ndims);
else
@@ -1982,7 +1982,7 @@
}
fortprintf(fd, " %s%id %% ioinfo %% fieldName = \'%s\'</font>
<font color="red">", vtype, var_ptr->ndims, var_ptr->name_in_file);
- fortprintf(fd, " call dmpar_alltoall_field(domain %% dminfo, &</font>
<font color="blue">");
+ fortprintf(fd, " call mpas_dmpar_alltoall_field(domain %% dminfo, &</font>
<font color="black">");
if (strncmp(var_ptr->super_array, "-", 1024) != 0)
fortprintf(fd, " super_%s%id, %s%id %% array, &</font>
<font color="gray">", vtype, var_ptr->ndims, vtype, var_ptr->ndims);
else
@@ -2067,9 +2067,9 @@
}
if (var_ptr->timedim)
- fortprintf(fd, " if (domain %% dminfo %% my_proc_id == IO_NODE) call io_output_field_time(output_obj, %s%id)</font>
<font color="blue">", vtype, var_ptr->ndims);
+ fortprintf(fd, " if (domain %% dminfo %% my_proc_id == IO_NODE) call mpas_io_output_field_time(output_obj, %s%id)</font>
<font color="red">", vtype, var_ptr->ndims);
else
- fortprintf(fd, " if (domain %% dminfo %% my_proc_id == IO_NODE) call io_output_field(output_obj, %s%id)</font>
<font color="blue">", vtype, var_ptr->ndims);
+ fortprintf(fd, " if (domain %% dminfo %% my_proc_id == IO_NODE) call mpas_io_output_field(output_obj, %s%id)</font>
<font color="black">", vtype, var_ptr->ndims);
if (var_ptr->ndims > 0) {
fortprintf(fd, " deallocate(%s%id %% array)</font>
<font color="black">", vtype, var_ptr->ndims);
if (strncmp(var_ptr->super_array, "-", 1024) != 0)
</font>
</pre>