[Dart-dev] DART/branches Revision: 12439
dart at ucar.edu
dart at ucar.edu
Fri Mar 9 16:49:04 MST 2018
hendric at ucar.edu
2018-03-09 16:49:04 -0700 (Fri, 09 Mar 2018)
118
adding tests for a group communicator for distributing the
ensemble mean into groups rather than on each processor.
Modified: DART/branches/rma_distrib/assimilation_code/modules/utilities/mpi_utilities_mod.f90
===================================================================
--- DART/branches/rma_distrib/assimilation_code/modules/utilities/mpi_utilities_mod.f90 2018-03-08 20:32:55 UTC (rev 12438)
+++ DART/branches/rma_distrib/assimilation_code/modules/utilities/mpi_utilities_mod.f90 2018-03-09 23:49:04 UTC (rev 12439)
@@ -106,7 +106,7 @@
broadcast_send, broadcast_recv, shell_execute, sleep_seconds, &
sum_across_tasks, get_dart_mpi_comm, datasize, send_minmax_to, &
get_from_fwd, get_from_mean, broadcast_minmax, broadcast_flag, &
- start_mpi_timer, read_mpi_timer, &
+ start_mpi_timer, read_mpi_timer, create_groups, &
all_reduce_min_max ! deprecated, replace by broadcast_minmax
! version controlled file description for error handling, do not edit
@@ -124,6 +124,13 @@
logical :: given_communicator = .false. ! if communicator passed in, use it
+! group variables
+integer, allocatable :: group_members(:)
+integer :: group_all
+integer :: mpi_group_comm
+integer :: subgroup !< subgroup for the grid
+integer :: group_rank !< rank within group
+
character(len = 256) :: errstring, errstring1
! for broadcasts, pack small messages into larger ones. remember that the
@@ -169,15 +176,18 @@
logical :: make_copy_before_sendrecv = .false. ! should not be needed; .true. is very slow
logical :: make_copy_before_broadcast = .false. ! should not be needed; .true. is very slow
+integer :: group_size = 1
+
! NAMELIST: change the following from .false. to .true. to enable
! the reading of this namelist. This is the only place you need
! to make this change.
-logical :: use_namelist = .false.
+logical :: use_namelist = .true.
namelist /mpi_utilities_nml/ reverse_task_layout, all_tasks_print, &
verbose, async2_verbose, async4_verbose, &
shell_name, separate_node_sync, create_local_comm, &
- make_copy_before_sendrecv, make_copy_before_broadcast
+ make_copy_before_sendrecv, make_copy_before_broadcast, &
+ group_size
contains
@@ -316,6 +326,10 @@
call error_handler(E_ERR,'initialize_mpi_utilities', errstring, source, revision, revdate)
endif
+if (myrank == 0) then
+ print*, 'GROUP SIZE ', group_size
+endif
+
! tell the utilities module what task number we are.
call set_tasknum(myrank)
@@ -446,6 +460,8 @@
dofinalize = .false.
endif
+deallocate(group_members)! this is module global
+
! Normally we shut down MPI here. If the user tells us not to shut down MPI
! they must call this routine from their own code before exiting.
if (dofinalize) then
@@ -459,6 +475,7 @@
if (verbose) write(*,*) "PE", myrank, ": finalize_mpi_utilities called without shutting down MPI"
endif
+
! NO I/O after calling MPI_Finalize. on some MPI implementations
! this can hang the job.
@@ -1975,6 +1992,63 @@
!-----------------------------------------------------------------------------
+subroutine create_groups
+
+integer i, ierr ! all MPI errors are fatal anyway
+
+allocate(group_members(group_size)) ! this is module global
+
+call mpi_comm_group(mpi_comm_world, group_all, ierr) ! get the word group from mpi_comm_world
+call build_my_group(group_size, group_members) ! create a list of processors in the grid group
+call mpi_group_incl(group_all, group_size, group_members, subgroup, ierr)
+call mpi_comm_create(mpi_comm_world, subgroup, mpi_group_comm, ierr)
+call mpi_comm_rank(mpi_group_comm, group_rank, ierr) ! rank within group
+
+call mpi_group_size(subgroup, group_size, ierr)
+call mpi_group_rank(subgroup, group_rank, ierr)
+
+call MPI_Barrier(MPI_COMM_WORLD, ierr)
+do i = 0, (task_count()-1)
+ call MPI_Barrier(MPI_COMM_WORLD, ierr)
+ if(my_task_id() == i) then
+ write(*,'(''WORLD RANK/SIZE:'',I2,''/'',I2,'' GROUP RANK/SIZE:'',I2,''/'',I2)') my_task_id(), task_count(), group_rank, group_size
+ endif
More information about the Dart-dev
mailing list