[Dart-dev] DART/branches Revision: 11708

dart at ucar.edu dart at ucar.edu
Mon Jun 12 13:46:53 MDT 2017


hendric at ucar.edu
2017-06-12 13:46:51 -0600 (Mon, 12 Jun 2017)
293

now that the extra copies and member are read in
in the same loop we have to assign the variables
starting index slightly differently than when
the variables were read in separate loops.  filter
now is running bitwise with starting with inflation
from file for the single file case and mpi.




Modified: DART/branches/rma_par_single_file/assimilation_code/modules/io/dart_time_io_mod.f90
===================================================================
--- DART/branches/rma_par_single_file/assimilation_code/modules/io/dart_time_io_mod.f90	2017-06-09 20:25:39 UTC (rev 11707)
+++ DART/branches/rma_par_single_file/assimilation_code/modules/io/dart_time_io_mod.f90	2017-06-12 19:46:51 UTC (rev 11708)
@@ -57,7 +57,6 @@
 real(digits12) :: model_time, time_array(1)
 
 integer, dimension(NF90_MAX_VAR_DIMS) :: dimIDs
-character(len=NF90_MAX_NAME)          :: varname,dimname
 character(len=256) :: file_calendar, dart_calendar
 character(len=256) :: unitstring
 
@@ -208,8 +207,7 @@
 real(digits12) :: model_time
 
 integer, dimension(NF90_MAX_VAR_DIMS) :: dimIDs
-character(len=NF90_MAX_NAME)          :: varname,dimname
-character(len=NF90_MAX_NAME)          :: dart_calendar, file_calendar
+character     (len=NF90_MAX_NAME)     :: dart_calendar, file_calendar
 
 ! this is used in many error messages below.  set it here, and
 ! don't reuse string3 here, please.

Modified: DART/branches/rma_par_single_file/assimilation_code/modules/io/direct_netcdf_mod.f90
===================================================================
--- DART/branches/rma_par_single_file/assimilation_code/modules/io/direct_netcdf_mod.f90	2017-06-09 20:25:39 UTC (rev 11707)
+++ DART/branches/rma_par_single_file/assimilation_code/modules/io/direct_netcdf_mod.f90	2017-06-12 19:46:51 UTC (rev 11708)
@@ -99,8 +99,8 @@
           num_elements_on_pe, &
           get_start_rank, &
           find_start_point, &
-          recv_variables_from_read, &
-          send_variables_from_read, &
+          wait_to_receive, &
+          send_to_waiting_task, &
           get_pe_loops
 
 ! version controlled file description for error handling, do not edit
@@ -401,7 +401,7 @@
    if (copies_read >= ens_size) exit
 
    ! what to do if a variable is larger than the memory limit?
-   start_var = 1 ! read first variable first !start_pos
+   start_var = 1 ! read first variable first from the var_block
    istart    = dart_index ! position in state_ens_handle%copies
 
    my_copy = copies_read + my_pe + 1
@@ -465,7 +465,7 @@
                      state_ens_handle%copies(ensemble_member, istart:iend ) = &
                      var_block(start_point:elm_count*task_count():task_count())
                   else ! post receive
-                     call recv_variables_from_read(state_ens_handle, sending_pe, ensemble_member, istart, iend)
+                     call wait_to_receive(state_ens_handle, sending_pe, ensemble_member, istart, iend)
                   endif
 
                endif
@@ -482,7 +482,8 @@
 
             if (query_read_copy(name_handle, my_copy)) then
 
-               call send_variables_from_read(state_ens_handle, recv_pe, start_point, elm_count, block_size, var_block)
+               call send_to_waiting_task(state_ens_handle, recv_pe, start_point, &
+                                         elm_count, block_size, var_block)
 
             endif
 
@@ -1358,7 +1359,7 @@
 !------------------------------------------------------
 !--------------------------------------------------------
 !> Send elements of variables to a processor. This routine must be called
-!> with a corresponding 'recv_variables_from_read'.
+!> with a corresponding 'wait_to_receive'.
 !> The data on the sender are non-contiguous with a stride of task_count. The
 !> start is different depending on which pe is the recv and which variables
 !> are being sent (these are calculated in the calling routine).
@@ -1365,7 +1366,7 @@
 !> The data on the receiver are contiguous (it is the %copies array)
 
 
-subroutine send_variables_from_read(state_ens_handle, recv_pe, start, elm_count, block_size, variable_block)
+subroutine send_to_waiting_task(state_ens_handle, recv_pe, start, elm_count, block_size, variable_block)
 
 type(ensemble_type), intent(in) :: state_ens_handle
 integer,             intent(in) :: recv_pe ! receiving pe
@@ -1384,16 +1385,16 @@
    ! need a contiguous array to send variables with MPI
    allocate(buffer(elm_count))
    buffer = variable_block(start:elm_count*task_count():task_count())
-   print*, 'MAP :: size -', map_pe_to_task(state_ens_handle, recv_pe), ':: size(buffer)', size(buffer)
    call send_to(map_pe_to_task(state_ens_handle, recv_pe), buffer)
    deallocate(buffer)
+
 else
 
-   call error_handler(E_ERR, 'send_variables_from_read', 'distributions other than 1 not supported')
+   call error_handler(E_ERR, 'send_to_waiting_task', 'distributions other than 1 not supported')
 
 endif
 
-end subroutine send_variables_from_read
+end subroutine send_to_waiting_task


More information about the Dart-dev mailing list