4 !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
5 #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
7 MODULE module_wrf_quilt
10 ! This module contains WRF-specific I/O quilt routines called by both
11 ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
12 ! a run-time optimization that allow I/O operations, executed on the I/O
13 ! quilt server tasks, to be overlapped with useful computation, executed on
14 ! the compute tasks. Since I/O operations are often quite slow compared to
15 ! computation, this performance optimization can increase parallel
18 ! Currently, one group of I/O servers can be specified at run-time. Namelist
19 ! variable "nio_tasks_per_group" is used to specify the number of I/O server
20 ! tasks in this group. In most cases, parallel efficiency is optimized when
21 ! the minimum number of I/O server tasks are used. If memory needed to cache
22 ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
23 ! If not, increase the number of I/O server tasks until I/O operations fit in
24 ! memory. In the future, multiple groups of I/O server tasks will be
25 ! supported. The number of groups will be specified by namelist variable
26 ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
27 ! only support overlap of output operations with computation. Also, only I/O
28 ! packages that do no support native parallel I/O may be used with I/O server
29 ! tasks. This excludes PHDF5 and MCEL.
31 ! In this module, the I/O quilt server tasks call package-dependent
32 ! WRF-specific I/O interfaces to perform I/O operations requested by the
33 ! client (compute) tasks. All of these calls occur inside subroutine
36 ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
37 ! interfaces that send requests to the I/O quilt servers. All of these calls
38 ! are made from module_io.F.
40 ! These routines have the same names and (roughly) the same arguments as those
41 ! specified in the WRF I/O API except that:
42 ! - "Quilt I/O" routines defined in this file and called by routines in
43 ! module_io.F have the "wrf_quilt_" prefix.
44 ! - Package-dependent routines called from routines in this file are defined
45 ! in the external I/O packages and have the "ext_" prefix.
47 ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
48 ! which then calls setup_quilt_servers() determine which tasks are compute
49 ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
50 ! server tasks call routine quilt() and remain there for the rest of the model
51 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
54 ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
55 ! version of the WRF I/O API. This document includes detailed descriptions
56 ! of subroutines and their arguments that are not duplicated here.
59 USE module_internal_header_util
62 INTEGER, PARAMETER :: int_num_handles = 99
63 LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
64 INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
65 REAL, POINTER,SAVE :: int_local_output_buffer(:)
66 INTEGER, SAVE :: int_local_output_cursor
67 LOGICAL :: quilting_enabled
68 LOGICAL :: disable_quilt = .FALSE.
69 INTEGER :: prev_server_for_handle = -1
70 INTEGER :: server_for_handle(int_num_handles)
71 INTEGER :: reduced(2), reduced_dummy(2)
72 LOGICAL, EXTERNAL :: wrf_dm_on_monitor
75 INTEGER :: mpi_comm_local
76 LOGICAL :: compute_node
77 LOGICAL :: compute_group_master(100)
78 INTEGER :: mpi_comm_io_groups(100)
79 INTEGER :: nio_tasks_in_group
80 INTEGER :: nio_tasks_per_group
81 INTEGER :: ncompute_tasks
85 INTEGER, PARAMETER :: onebyte = 1
86 INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
87 INTEGER, DIMENSION(4096) :: hdrbuf
88 INTEGER, DIMENSION(int_num_handles) :: handle
93 #if defined(DM_PARALLEL) && !defined( STUBMPI )
94 INTEGER FUNCTION get_server_id ( dhandle )
96 ! Logic in the client side to know which io server
97 ! group to send to. If the unit corresponds to a file that's
98 ! already been opened, then we have no choice but to send the
99 ! data to that group again, regardless of whether there are
100 ! other server-groups. If it's a new file, we can chose a new
101 ! server group. I.e. opening a file locks it onto a server
102 ! group. Closing the file unlocks it.
105 INTEGER, INTENT(IN) :: dhandle
106 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
107 IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
108 get_server_id = server_for_handle ( dhandle )
110 prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
111 server_for_handle( dhandle ) = prev_server_for_handle+1
112 get_server_id = prev_server_for_handle+1
115 CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
117 END FUNCTION get_server_id
120 SUBROUTINE set_server_id ( dhandle, value )
122 INTEGER, INTENT(IN) :: dhandle, value
123 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
124 server_for_handle(dhandle) = value
126 CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
128 END SUBROUTINE set_server_id
130 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
131 SUBROUTINE int_get_fresh_handle( retval )
133 ! Find an unused "client file handle" and return it in retval.
134 ! The "client file handle" is used to remember how a file was opened
135 ! so clients do not need to ask the I/O quilt servers for this information.
136 ! It is also used as a file identifier in communications with the I/O
139 ! Note that client tasks know nothing about package-specific handles.
140 ! Only the I/O quilt servers know about them.
144 DO i = 1, int_num_handles
145 IF ( .NOT. int_handle_in_use(i) ) THEN
151 IF ( retval < 0 ) THEN
152 CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
154 int_handle_in_use(i) = .TRUE.
155 NULLIFY ( int_local_output_buffer )
156 END SUBROUTINE int_get_fresh_handle
158 SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
167 ! Both client (compute) and server tasks call this routine to
168 ! determine which tasks are compute tasks and which are I/O server tasks.
170 ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
171 ! contain MPI communicators as follows:
173 ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
174 ! compute tasks it is the group of compute tasks; for a server group it the
175 ! communicator of tasks in the server group.
177 ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
178 ! more compute tasks and a single I/O server assigned to those compute tasks.
179 ! The I/O server tasks is always the last task in these communicators.
180 ! On a compute task, which has a single associate in each of the server
181 ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
182 ! to a different server group.
183 ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
184 ! because each server task is part of only one io_group.
186 ! I/O server tasks in each I/O server group are divided among compute tasks as
187 ! evenly as possible.
189 ! When multiple I/O server groups are used, each must have the same number of
190 ! tasks. When the total number of extra I/O tasks does not divide evenly by
191 ! the number of io server groups requested, the remainder tasks are not used
194 ! For example, communicator membership for 18 tasks with nio_groups=2 and
195 ! nio_tasks_per_group=3 is shown below:
198 ! Membership for MPI_COMM_LOCAL communicators:
199 ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
200 ! 1ST I/O SERVER GROUP: 12 13 14
201 ! 2ND I/O SERVER GROUP: 15 16 17
203 ! Membership for MPI_COMM_IO_GROUPS(1):
204 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
205 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
206 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
207 ! I/O SERVER TASK 12: 0 3 6 9 12
208 ! I/O SERVER TASK 13: 1 4 7 10 13
209 ! I/O SERVER TASK 14: 2 5 8 11 14
210 ! I/O SERVER TASK 15: 0 3 6 9 15
211 ! I/O SERVER TASK 16: 1 4 7 10 16
212 ! I/O SERVER TASK 17: 2 5 8 11 17
214 ! Membership for MPI_COMM_IO_GROUPS(2):
215 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
216 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
217 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
218 ! I/O SERVER TASK 12: ** not used **
219 ! I/O SERVER TASK 13: ** not used **
220 ! I/O SERVER TASK 14: ** not used **
221 ! I/O SERVER TASK 15: ** not used **
222 ! I/O SERVER TASK 16: ** not used **
223 ! I/O SERVER TASK 17: ** not used **
228 USE module_dm, ONLY : compute_mesh
232 INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
233 n_groups_arg, mpi_comm_wrld
234 INTEGER, INTENT(OUT) :: mpi_comm_local, nio
235 INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
237 INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
238 INTEGER, DIMENSION(ntasks) :: icolor
241 INTEGER :: io_form_setting
243 INTEGER :: k, m, nprocx, nprocy
244 LOGICAL :: reorder_mesh
246 !check the namelist and make sure there are no output forms specified
247 !that cannot be quilted
248 CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
249 CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
250 CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
251 CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
252 CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
253 CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
254 CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
255 CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
256 CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
257 CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
258 CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
259 CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
260 CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
262 n_groups = n_groups_arg
263 IF ( n_groups .LT. 1 ) n_groups = 1
265 compute_node = .TRUE.
268 ! nio is number of io tasks per group. If there arent enough tasks to satisfy
269 ! the requirement that there be at least as many compute tasks as io tasks in
270 ! each group, then just print a warning and dump out of quilting
273 nio = nio_tasks_per_group
274 ncompute_tasks = ntasks - (nio * n_groups)
275 IF ( ncompute_tasks .LT. nio ) THEN
276 WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
278 ncompute_tasks = ntasks
280 WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
282 CALL wrf_message(mess)
284 IF ( nio .LT. 0 ) THEN
287 IF ( nio .EQ. 0 ) THEN
288 quilting_enabled = .FALSE.
289 mpi_comm_local = mpi_comm_wrld
290 mpi_comm_io_groups = mpi_comm_wrld
293 quilting_enabled = .TRUE.
295 ! First construct the local communicators
296 ! prepare to split the communicator by designating compute-only tasks
297 DO i = 1, ncompute_tasks
301 ! and designating the groups of i/o tasks
302 DO i = ncompute_tasks+1, ntasks, nio
308 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
309 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
311 ! Now construct the communicators for the io_groups
312 CALL nl_get_reorder_mesh(1,reorder_mesh)
313 IF ( reorder_mesh ) THEN
314 reorder_mesh = .FALSE.
315 CALL nl_set_reorder_mesh(1,reorder_mesh)
316 CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
318 ! assign the compute tasks to the i/o tasks in full rows
319 CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
321 nio = min(nio,nprocy)
322 m = mod(nprocy,nio) ! divide up remainder, 1 row per, until gone
325 DO k = 1,nprocy/nio+min(m,1)
334 ! ... and add the io servers as the last task in each group
336 ! TBH: each I/O group will contain only one I/O server
337 DO i = ncompute_tasks+1,ntasks
338 icolor(i) = MPI_UNDEFINED
341 DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
345 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
346 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
347 mpi_comm_io_groups(j),ierr)
350 compute_group_master = .FALSE.
351 compute_node = .FALSE.
355 IF ( mytask .LT. ncompute_tasks .OR. & ! I am a compute task
356 (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) & ! I am the I/O server for this group
359 CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
360 ! Get the rank of this compute task in the compute+io
361 ! communicator to which it belongs
362 CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
364 ! If I am an I/O server for this group then make that group's
365 ! communicator the first element in the mpi_comm_io_groups array
366 ! (I will ignore all of the other elements).
367 IF ( me+1 .EQ. iisize ) THEN
368 mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
370 compute_node = .TRUE.
371 ! If I am a compute task, check whether I am the member of my
372 ! group that will communicate things that should be sent just
373 ! once (e.g. commands) to the IO server of my group.
374 compute_group_master(j) = (me .EQ. 0)
376 ! IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
381 END SUBROUTINE setup_quilt_servers
383 SUBROUTINE sokay ( stream, io_form )
384 USE module_state_description
389 SELECT CASE (io_form)
413 WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
414 CALL wrf_error_fatal(mess)
420 ! I/O server tasks call this routine and remain in it for the rest of the
421 ! model run. I/O servers receive I/O requests from compute tasks and
422 ! perform requested I/O operations by calling package-dependent WRF-specific
423 ! I/O interfaces. Requests are sent in the form of "data headers". Each
424 ! request has a unique "header" message associated with it. For requests that
425 ! contain large amounts of data, the data is appended to the header. See
426 ! file module_internal_header_util.F for detailed descriptions of all
429 ! We wish to be able to link to different packages depending on whether
430 ! the I/O is restart, initial, history, or boundary.
432 USE module_state_description
433 USE module_quilt_outbuf_ops
436 #include "intio_tags.h"
437 #include "wrf_io_flags.h"
438 INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
440 INTEGER mytask_io_group
441 INTEGER :: nout_set = 0
442 INTEGER :: obufsize, bigbufsize, chunksize, sz
443 REAL, DIMENSION(1) :: dummy
444 INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
445 REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
446 INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
447 CHARACTER (LEN=512) :: CDATA
448 CHARACTER (LEN=80) :: fname
449 INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
450 INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
451 INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
452 INTEGER :: dummybuf(1)
453 INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
454 CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
455 INTEGER, EXTERNAL :: use_package
456 LOGICAL :: stored_write_record, retval
457 INTEGER iii, jjj, vid, CC, DD
460 character*120 sysline
462 ! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
463 ! within the group of I/O servers rather than gathering the data onto the
464 ! root I/O server. Unfortunately, this approach means that we can no-longer
465 ! select different I/O layers for use with quilting at run time. ARPDBG.
466 ! This code is sufficiently different that it is kept in the separate
467 ! quilt_pnc() routine.
473 ! Call ext_pkg_ioinit() routines to initialize I/O packages.
476 CALL ext_ncd_ioinit( SysDepInfo, ierr)
479 CALL ext_int_ioinit( SysDepInfo, ierr )
482 CALL ext_xxx_ioinit( SysDepInfo, ierr)
485 CALL ext_yyy_ioinit( SysDepInfo, ierr)
488 CALL ext_zzz_ioinit( SysDepInfo, ierr)
491 CALL ext_gr1_ioinit( SysDepInfo, ierr)
494 CALL ext_gr2_ioinit( SysDepInfo, ierr)
497 okay_to_commit = .false.
498 stored_write_record = .false.
500 ! get info. about the I/O server group that this I/O server task
502 ! Last task in this I/O server group is the I/O server "root"
503 ! The I/O server "root" actually writes data to disk
504 ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
505 CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
506 CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
507 CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
508 CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
510 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
511 IF ( itypesize <= 0 ) THEN
512 CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
515 ! Work out whether this i/o server processor has one fewer associated compute proc than
516 ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
517 ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
518 ! same message when they start commmunicating to stitch together an output.
520 ! Compute processes associated with this task:
521 CC = ntasks_io_group - 1
522 ! Number of compute tasks per I/O task (less remainder)
523 DD = ncompute_tasks / ntasks_local_group
525 ! If CC-DD is 1 on servrs with the maximum number of compute clients,
526 ! 0 on servrs with one less than maximum
529 ! infinite loop until shutdown message received
530 ! This is the main request-handling loop. I/O quilt servers stay in this loop
531 ! until the model run ends.
533 DO WHILE (.TRUE.) ! {
536 ! Each I/O server receives requests from its compute tasks. Each request
537 ! is contained in a data header (see module_internal_header_util.F for
538 ! detailed descriptions of data headers).
539 ! Each request is sent in two phases. First, sizes of all messages that
540 ! will be sent from the compute tasks to this I/O server are summed on the
541 ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
542 ! and receives concatenated messages from the compute tasks in it via the
543 ! call to collect_on_comm(). Note that "sizes" are generally expressed in
544 ! *bytes* in this code so conversion to "count" (number of Fortran words) is
545 ! required for Fortran indexing and MPI calls.
547 ! wait for info from compute tasks in the I/O group that we're ready to rock
548 ! obufsize will contain number of *bytes*
550 ! first element of reduced is obufsize, second is DataHandle
551 ! if needed (currently needed only for ioclose).
553 CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER, &
554 MPI_SUM, mytask_io_group, &
555 mpi_comm_io_groups(1), ierr )
556 obufsize = reduced(1)
557 !CALL end_timing("MPI_Reduce at top of forever loop")
558 !JMDEBUGwrite(0,*)'obufsize = ',obufsize
559 ! Negative obufsize will trigger I/O server exit.
560 IF ( obufsize .LT. 0 ) THEN
561 IF ( obufsize .EQ. -100 ) THEN ! magic number
563 CALL ext_ncd_ioexit( Status )
566 CALL ext_int_ioexit( Status )
569 CALL ext_xxx_ioexit( Status )
572 CALL ext_yyy_ioexit( Status )
575 CALL ext_zzz_ioexit( Status )
578 CALL ext_gr1_ioexit( Status )
581 CALL ext_gr2_ioexit( Status )
583 CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
584 CALL mpi_finalize(ierr)
587 WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
588 CALL wrf_error_fatal(mess)
592 ! CALL start_timing()
593 ! Obufsize of zero signals a close
595 ! Allocate buffer obuf to be big enough for the data the compute tasks
596 ! will send. Note: obuf is size in *bytes* so we need to pare this
597 ! down, since the buffer is INTEGER.
598 IF ( obufsize .GT. 0 ) THEN
599 ALLOCATE( obuf( (obufsize+1)/itypesize ) )
601 ! let's roll; get the data from the compute procs and put in obuf
602 CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
606 ! CALL end_timing( "quilt on server: collecting data from compute procs" )
608 ! Necessarily, the compute processes send the ioclose signal,
609 ! if there is one, after the iosync, which means they
610 ! will stall on the ioclose message waiting for the quilt
611 ! processes if we handle the way other messages are collected,
612 ! using collect_on_comm. This avoids this, but we need
613 ! a special signal (obufsize zero) and the DataHandle
614 ! to be closed. That handle is send as the second
615 ! word of the io_close message received by the MPI_Reduce above.
616 ! Then a header representing the ioclose message is constructed
617 ! here and handled below as if it were received from the
618 ! compute processes. The clients (compute processes) must be
619 ! careful to send this correctly (one compule process sends the actual
620 ! handle and everone else sends a zero, so the result sums to
621 ! the value of the handle).
623 ALLOCATE( obuf( 4096 ) )
624 ! DataHandle is provided as second element of reduced
625 CALL int_gen_handle_header( obuf, obufsize, itypesize, &
626 reduced(2) , int_ioclose )
629 !write(0,*)'calling init_store_piece_of_field'
630 ! Now all messages received from the compute clients are stored in
631 ! obuf. Scan through obuf and extract headers and field data and store in
632 ! internal buffers. The scan is done twice, first to determine sizes of
633 ! internal buffers required for storage of headers and fields and second to
634 ! actually store the headers and fields. This bit of code does not do the
635 ! "quilting" (assembly of patches into full domains). For each field, it
636 ! simply concatenates all received patches for the field into a separate
637 ! internal buffer (i.e. one buffer per field). Quilting is done later by
638 ! routine store_patch_in_outbuf().
639 CALL init_store_piece_of_field
640 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
641 !write(0,*)'mpi_type_size returns ', itypesize
642 ! Scan obuf the first time to calculate the size of the buffer required for
643 ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
647 num_commit_messages = 0
648 num_field_training_msgs = 0
649 DO WHILE ( icurs .lt. obufsize ) ! {
650 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
651 SELECT CASE ( hdr_tag )
653 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
654 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
655 DomainDesc , MemoryOrder , Stagger , DimNames , &
656 DomainStart , DomainEnd , &
657 MemoryStart , MemoryEnd , &
658 PatchStart , PatchEnd )
659 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
660 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
662 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
663 IF ( num_field_training_msgs .EQ. 0 ) THEN
664 call add_to_bufsize_for_field( VarName, hdrbufsize )
665 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
667 num_field_training_msgs = num_field_training_msgs + 1
669 call add_to_bufsize_for_field( VarName, hdrbufsize )
670 !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
672 icurs = icurs + hdrbufsize
674 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
676 ! If this is a real write (i.e. not a training write), accumulate
677 ! buffersize for this field.
678 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
679 !write(0,*) 'X-1a', chunksize, TRIM(VarName)
680 call add_to_bufsize_for_field( VarName, chunksize )
681 icurs = icurs + chunksize
683 CASE ( int_open_for_write_commit ) ! only one per group of tasks
684 hdrbufsize = obuf(icurs/itypesize)
685 IF (num_commit_messages.EQ.0) THEN
686 call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
688 num_commit_messages = num_commit_messages + 1
689 icurs = icurs + hdrbufsize
691 hdrbufsize = obuf(icurs/itypesize)
693 ! This logic and the logic in the loop below is used to determine whether
694 ! to send a noop records sent by the compute processes to allow to go
695 ! through. The purpose is to make sure that the communications between this
696 ! server and the other servers in this quilt group stay synchronized in
697 ! the collection loop below, even when the servers are serving different
698 ! numbers of clients. Here are some conditions:
700 ! 1. The number of compute clients served will not differ by more than 1
701 ! 2. The servers with +1 number of compute clients begin with task 0
702 ! of mpi_comm_local, the commicator shared by this group of servers
704 ! 3. For each collective field or metadata output from the compute tasks,
705 ! there will be one record sent to the associated i/o server task. The
706 ! i/o server task collects these records and stores them contiguously
707 ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
708 ! server task will contain one record from each associated compute
711 ! 4. In the case of replicated output from the compute tasks
712 ! (e.g. put_dom_ti records and control records like
713 ! open_for_write_commit type records), compute task 0 is the only
714 ! one that sends the record. The other compute tasks send noop
715 ! records. Thus, obuf on server task zero will contain the output
716 ! record from task 0 followed by noop records from the rest of the
717 ! compute tasks associated with task 0. Obuf on the other server
718 ! tasks will contain nothing but noop records.
720 ! 5. The logic below will not allow any noop records from server task 0.
721 ! It allows only one noop record from each of the other server tasks
722 ! in the i/o group. This way, for replicated output, when the records
723 ! are collected on one server task below, using collect_on_comm on
724 ! mpi_comm_local, each task will provide exactly one record for each
725 ! call to collect_on_comm: 1 bona fide output record from server task
726 ! 0 and noops from the rest.
728 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
729 .OR.hdr_tag.NE.int_noop) THEN
730 write(VarName,'(I5.5)')vid
731 !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
732 call add_to_bufsize_for_field( VarName, hdrbufsize )
735 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
736 icurs = icurs + hdrbufsize
739 ! Store the headers and field data in internal buffers. The first call to
740 ! store_piece_of_field() allocates internal buffers using sizes computed by
741 ! calls to add_to_bufsize_for_field().
745 num_commit_messages = 0
746 num_field_training_msgs = 0
747 DO WHILE ( icurs .lt. obufsize ) !{
748 !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
749 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
750 SELECT CASE ( hdr_tag )
752 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
753 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
754 DomainDesc , MemoryOrder , Stagger , DimNames , &
755 DomainStart , DomainEnd , &
756 MemoryStart , MemoryEnd , &
757 PatchStart , PatchEnd )
758 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
759 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
761 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
762 IF ( num_field_training_msgs .EQ. 0 ) THEN
763 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
764 !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
766 num_field_training_msgs = num_field_training_msgs + 1
768 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
769 !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
771 icurs = icurs + hdrbufsize
772 ! If this is a real write (i.e. not a training write), store
773 ! this piece of this field.
774 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
775 !write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
776 call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
777 icurs = icurs + chunksize
779 CASE ( int_open_for_write_commit ) ! only one per group of tasks
780 hdrbufsize = obuf(icurs/itypesize)
781 IF (num_commit_messages.EQ.0) THEN
782 call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
784 num_commit_messages = num_commit_messages + 1
785 icurs = icurs + hdrbufsize
787 hdrbufsize = obuf(icurs/itypesize)
788 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
789 .OR.hdr_tag.NE.int_noop) THEN
790 write(VarName,'(I5.5)')vid
791 !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
792 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
795 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
796 icurs = icurs + hdrbufsize
800 ! Now, for each field, retrieve headers and patches (data) from the internal
801 ! buffers and collect them all on the I/O quilt server "root" task.
802 CALL init_retrieve_pieces_of_field
803 ! Retrieve header and all patches for the first field from the internal
805 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
806 ! Sum sizes of all headers and patches (data) for this field from all I/O
807 ! servers in this I/O server group onto the I/O server "root".
808 CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER, &
809 MPI_SUM, ntasks_local_group-1, &
810 mpi_comm_local, ierr )
811 !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
813 ! Loop until there are no more fields to retrieve from the internal buffers.
814 DO WHILE ( retval ) !{
818 ! I/O server "root" allocates space to collect headers and fields from all
819 ! other servers in this I/O server group.
820 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
821 ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
824 ! Collect buffers and fields from all I/O servers in this I/O server group
825 ! onto the I/O server "root"
826 CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName), &
827 get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), &
832 ! The I/O server "root" now handles collected requests from all compute
833 ! tasks served by this I/O server group (i.e. all compute tasks).
834 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
836 !do iii = 1, ntasks_local_group
837 ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
838 ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
841 icurs = itypesize ! icurs is a byte counter, but buffer is integer
843 stored_write_record = .false.
845 ! The I/O server "root" loops over the collected requests.
846 DO WHILE ( icurs .lt. bigbufsize ) !{
847 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
849 !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
850 ! The I/O server "root" gets the request out of the next header and
851 ! handles it by, in most cases, calling the appropriate external I/O package
853 SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
854 ! The I/O server "root" handles the "noop" (do nothing) request. This is
855 ! actually quite easy. "Noop" requests exist to help avoid race conditions.
856 ! In some cases, only one compute task will everything about a request so
857 ! other compute tasks send "noop" requests.
859 CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
860 icurs = icurs + hdrbufsize
862 ! The I/O server "root" handles the "put_dom_td_real" request.
863 CASE ( int_dom_td_real )
864 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
865 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
866 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
867 DataHandle, DateStr, Element, RData, Count, code )
868 icurs = icurs + hdrbufsize
870 SELECT CASE (use_package(io_form(DataHandle)))
873 CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
877 CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
881 CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
885 CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
889 CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
896 ! The I/O server "root" handles the "put_dom_ti_real" request.
897 CASE ( int_dom_ti_real )
898 !write(0,*)' int_dom_ti_real '
899 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
900 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
901 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
902 DataHandle, Element, RData, Count, code )
903 icurs = icurs + hdrbufsize
905 SELECT CASE (use_package(io_form(DataHandle)))
908 CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
909 !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
913 CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
917 CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
921 CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
925 CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
933 ! The I/O server "root" handles the "put_dom_td_integer" request.
934 CASE ( int_dom_td_integer )
935 !write(0,*)' int_dom_td_integer '
936 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
937 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
938 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
939 DataHandle, DateStr, Element, IData, Count, code )
940 icurs = icurs + hdrbufsize
942 SELECT CASE (use_package(io_form(DataHandle)))
945 CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
949 CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
953 CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
957 CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
961 CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
969 ! The I/O server "root" handles the "put_dom_ti_integer" request.
970 CASE ( int_dom_ti_integer )
971 !write(0,*)' int_dom_ti_integer '
973 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
974 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
975 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
976 DataHandle, Element, IData, Count, code )
977 icurs = icurs + hdrbufsize
978 SELECT CASE (use_package(io_form(DataHandle)))
981 CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
982 !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
986 CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
990 CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
994 CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
998 CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1007 ! The I/O server "root" handles the "set_time" request.
1008 CASE ( int_set_time )
1009 !write(0,*)' int_set_time '
1010 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1011 DataHandle, Element, VarName, CData, code )
1012 SELECT CASE (use_package(io_form(DataHandle)))
1015 CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
1021 icurs = icurs + hdrbufsize
1023 ! The I/O server "root" handles the "put_dom_ti_char" request.
1024 CASE ( int_dom_ti_char )
1025 !write(0,*)' before int_get_ti_header_char '
1026 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1027 DataHandle, Element, VarName, CData, code )
1028 !write(0,*)' after int_get_ti_header_char ',VarName
1030 SELECT CASE (use_package(io_form(DataHandle)))
1033 CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1037 CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1041 CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1045 CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1049 CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1055 icurs = icurs + hdrbufsize
1057 ! The I/O server "root" handles the "put_var_ti_char" request.
1058 CASE ( int_var_ti_char )
1059 !write(0,*)' int_var_ti_char '
1060 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1061 DataHandle, Element, VarName, CData, code )
1063 SELECT CASE (use_package(io_form(DataHandle)))
1066 CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1070 CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1074 CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1078 CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1082 CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1088 icurs = icurs + hdrbufsize
1091 ! ioexit is now handled by sending negative message length to server
1092 CALL wrf_error_fatal( &
1093 "quilt: should have handled int_ioexit already")
1094 ! The I/O server "root" handles the "ioclose" request.
1095 CASE ( int_ioclose )
1096 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1098 icurs = icurs + hdrbufsize
1100 IF ( DataHandle .GE. 1 ) THEN
1101 !JMDEBUGwrite(0,*)'closing DataHandle ',DataHandle,' io_form ',io_form(DataHandle)
1103 SELECT CASE (use_package(io_form(DataHandle)))
1106 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1107 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1108 CALL ext_ncd_ioclose(handle(DataHandle),Status)
1113 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
1114 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1115 CALL ext_pnc_ioclose(handle(DataHandle),Status)
1120 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1121 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1122 CALL ext_int_ioclose(handle(DataHandle),Status)
1127 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1128 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1129 CALL ext_yyy_ioclose(handle(DataHandle),Status)
1134 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1135 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1136 CALL ext_gr1_ioclose(handle(DataHandle),Status)
1141 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1142 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1143 CALL ext_gr2_ioclose(handle(DataHandle),Status)
1151 ! The I/O server "root" handles the "open_for_write_begin" request.
1152 CASE ( int_open_for_write_begin )
1154 CALL int_get_ofwb_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1155 FileName,SysDepInfo,io_form_arg,DataHandle )
1157 !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
1158 !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
1159 !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
1160 !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
1161 icurs = icurs + hdrbufsize
1162 !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
1164 io_form(DataHandle) = io_form_arg
1166 SELECT CASE (use_package(io_form(DataHandle)))
1169 CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1170 !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
1174 CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1178 CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1182 CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1186 CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1192 okay_to_write(DataHandle) = .false.
1194 ! The I/O server "root" handles the "open_for_write_commit" request.
1195 ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
1196 ! requests will initiate writes to disk. Actual commit will be done after
1197 ! all requests in this batch have been handled.
1198 CASE ( int_open_for_write_commit )
1200 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1202 icurs = icurs + hdrbufsize
1203 okay_to_commit(DataHandle) = .true.
1205 ! The I/O server "root" handles the "write_field" (int_field) request.
1206 ! If okay_to_write(DataHandle) is .true. then the patch in the
1207 ! header (bigbuf) is written to a globally-sized internal output buffer via
1208 ! the call to store_patch_in_outbuf(). Note that this is where the actual
1209 ! "quilting" (reassembly of patches onto a full-size domain) is done. If
1210 ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
1211 ! are called to write metadata for I/O formats that support native metadata.
1213 ! NOTE that the I/O server "root" will only see write_field (int_field)
1214 ! requests AFTER an "iosync" request.
1216 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1217 CALL int_get_write_field_header ( bigbuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1218 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1219 DomainDesc , MemoryOrder , Stagger , DimNames , &
1220 DomainStart , DomainEnd , &
1221 MemoryStart , MemoryEnd , &
1222 PatchStart , PatchEnd )
1223 !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
1224 icurs = icurs + hdrbufsize
1226 IF ( okay_to_write(DataHandle) ) THEN
1228 ! WRITE(0,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
1229 ! (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
1231 IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
1232 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
1233 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
1234 IF ( FieldType .EQ. WRF_DOUBLE) THEN
1235 ! this branch has not been tested TBH: 20050406
1236 CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
1238 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1240 stored_write_record = .true.
1241 CALL store_patch_in_outbuf ( bigbuf(icurs/itypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
1242 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1243 DomainStart , DomainEnd , &
1244 MemoryStart , MemoryEnd , &
1245 PatchStart , PatchEnd )
1247 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
1248 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1249 stored_write_record = .true.
1250 CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/itypesize), TRIM(DateStr), TRIM(VarName) , &
1251 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1252 DomainStart , DomainEnd , &
1253 MemoryStart , MemoryEnd , &
1254 PatchStart , PatchEnd )
1255 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
1256 ftypesize = LWORDSIZE
1258 icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1259 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1261 SELECT CASE (use_package(io_form(DataHandle)))
1264 CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1265 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1266 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1267 DomainStart , DomainEnd , &
1268 DomainStart , DomainEnd , &
1269 DomainStart , DomainEnd , &
1273 ! since this is training and the grib output doesn't need training, disable this branch.
1276 CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1277 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1278 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1279 DomainStart , DomainEnd , &
1280 DomainStart , DomainEnd , &
1281 DomainStart , DomainEnd , &
1290 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1292 icurs = icurs + hdrbufsize
1294 WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/itypesize) ),' icurs ',icurs/itypesize
1295 CALL wrf_error_fatal( mess )
1299 ! Now, the I/O server "root" has finshed handling all commands from the latest
1300 ! call to retrieve_pieces_of_field().
1302 IF (stored_write_record) THEN
1303 ! If any fields have been stored in a globally-sized internal output buffer
1304 ! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
1306 ! NOTE that the I/O server "root" will only have called
1307 ! store_patch_in_outbuf() when handling write_field (int_field)
1308 ! commands which only arrive AFTER an "iosync" command.
1310 CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
1311 ! CALL end_timing( "quilt: call to write_outbuf" )
1314 ! If one or more "open_for_write_commit" commands were encountered from the
1315 ! latest call to retrieve_pieces_of_field() then call the package-specific
1316 ! routine to do the commit.
1317 IF (okay_to_commit(DataHandle)) THEN
1319 SELECT CASE (use_package(io_form(DataHandle)))
1322 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1323 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1324 CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
1325 okay_to_write(DataHandle) = .true.
1330 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1331 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1332 CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
1333 okay_to_write(DataHandle) = .true.
1338 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1339 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1340 CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
1341 okay_to_write(DataHandle) = .true.
1346 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1347 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1348 CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
1349 okay_to_write(DataHandle) = .true.
1354 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1355 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1356 CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
1357 okay_to_write(DataHandle) = .true.
1365 okay_to_commit(DataHandle) = .false.
1367 DEALLOCATE( bigbuf )
1371 ! Retrieve header and all patches for the next field from the internal
1373 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1374 ! Sum sizes of all headers and patches (data) for this field from all I/O
1375 ! servers in this I/O server group onto the I/O server "root".
1376 CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER, &
1377 MPI_SUM, ntasks_local_group-1, &
1378 mpi_comm_local, ierr )
1379 ! Then, return to the top of the loop to collect headers and data from all
1380 ! I/O servers in this I/O server group onto the I/O server "root" and handle
1381 ! the next batch of commands.
1386 ! flush output files if needed
1387 IF (stored_write_record) THEN
1388 ! CALL start_timing()
1389 SELECT CASE ( use_package(io_form) )
1392 CALL ext_ncd_iosync( handle(DataHandle), Status )
1396 CALL ext_xxx_iosync( handle(DataHandle), Status )
1400 CALL ext_yyy_iosync( handle(DataHandle), Status )
1404 CALL ext_zzz_iosync( handle(DataHandle), Status )
1408 CALL ext_gr1_iosync( handle(DataHandle), Status )
1412 CALL ext_gr2_iosync( handle(DataHandle), Status )
1416 CALL ext_int_iosync( handle(DataHandle), Status )
1421 !CALL end_timing( "quilt: flush" )
1426 END SUBROUTINE quilt
1428 SUBROUTINE quilt_pnc
1430 ! Same as quilt() routine except that _all_ of the IO servers that call it
1431 ! actually write data to disk using pNetCDF. This version is only used when
1432 ! the code is compiled with PNETCDF_QUILT defined.
1434 USE module_state_description
1435 USE module_quilt_outbuf_ops
1438 #include "intio_tags.h"
1439 #include "wrf_io_flags.h"
1440 INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
1442 INTEGER mytask_io_group
1443 INTEGER :: nout_set = 0
1444 INTEGER :: obufsize, bigbufsize, chunksize, sz
1445 REAL, DIMENSION(1) :: dummy
1446 INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
1447 REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
1448 INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
1449 CHARACTER (LEN=512) :: CDATA
1450 CHARACTER (LEN=80) :: fname
1451 INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
1452 INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
1453 INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
1454 INTEGER :: dummybuf(1)
1455 INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
1456 CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
1457 INTEGER, EXTERNAL :: use_package
1458 LOGICAL :: stored_write_record, retval, written_record
1459 INTEGER iii, jjj, vid, CC, DD
1462 ! character*120 sysline
1464 ! Call ext_pkg_ioinit() routines to initialize I/O packages.
1467 CALL ext_ncd_ioinit( SysDepInfo, ierr)
1469 #ifdef PNETCDF_QUILT
1470 CALL ext_pnc_ioinit( SysDepInfo, ierr)
1473 CALL ext_int_ioinit( SysDepInfo, ierr )
1476 CALL ext_xxx_ioinit( SysDepInfo, ierr)
1479 CALL ext_yyy_ioinit( SysDepInfo, ierr)
1482 CALL ext_zzz_ioinit( SysDepInfo, ierr)
1485 CALL ext_gr1_ioinit( SysDepInfo, ierr)
1488 CALL ext_gr2_ioinit( SysDepInfo, ierr)
1491 okay_to_commit = .false.
1492 stored_write_record = .false.
1494 ! get info. about the I/O server group that this I/O server task
1496 CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
1497 CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
1498 CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
1499 CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
1501 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1502 IF ( itypesize <= 0 ) THEN
1503 CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
1506 ! Work out whether this i/o server processor has one fewer associated compute proc than
1507 ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
1508 ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
1509 ! same message when they start commmunicating to stitch together an output.
1511 ! Compute processes associated with this task:
1512 CC = ntasks_io_group - 1
1513 ! Number of compute tasks per I/O task (less remainder)
1514 DD = ncompute_tasks / ntasks_local_group
1516 ! If CC-DD is 1 on servrs with the maximum number of compute clients,
1517 ! 0 on servrs with one less than maximum
1520 ! infinite loop until shutdown message received
1521 ! This is the main request-handling loop. I/O quilt servers stay in this loop
1522 ! until the model run ends.
1523 !okay_to_w = .false.
1524 DO WHILE (.TRUE.) ! {
1527 ! Each I/O server receives requests from its compute tasks. Each request
1528 ! is contained in a data header (see module_internal_header_util.F for
1529 ! detailed descriptions of data headers).
1530 ! Each request is sent in two phases. First, sizes of all messages that
1531 ! will be sent from the compute tasks to this I/O server are summed on the
1532 ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
1533 ! and receives concatenated messages from the compute tasks in it via the
1534 ! call to collect_on_comm(). Note that "sizes" are generally expressed in
1535 ! *bytes* in this code so conversion to "count" (number of Fortran words) is
1536 ! required for Fortran indexing and MPI calls.
1538 ! wait for info from compute tasks in the I/O group that we're ready to rock
1539 ! obufsize will contain number of *bytes*
1541 ! first element of reduced is obufsize, second is DataHandle
1542 ! if needed (currently needed only for ioclose).
1544 CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER, &
1545 MPI_SUM, mytask_io_group, &
1546 mpi_comm_io_groups(1), ierr )
1547 obufsize = reduced(1)
1548 !CALL end_timing("MPI_Reduce at top of forever loop")
1549 !JMDEBUGwrite(0,*)'obufsize = ',obufsize
1550 ! Negative obufsize will trigger I/O server exit.
1551 IF ( obufsize .LT. 0 ) THEN
1552 IF ( obufsize .EQ. -100 ) THEN ! magic number
1554 CALL ext_ncd_ioexit( Status )
1556 #ifdef PNETCDF_QUILT
1557 CALL ext_pnc_ioexit( Status )
1560 CALL ext_int_ioexit( Status )
1563 CALL ext_xxx_ioexit( Status )
1566 CALL ext_yyy_ioexit( Status )
1569 CALL ext_zzz_ioexit( Status )
1572 CALL ext_gr1_ioexit( Status )
1575 CALL ext_gr2_ioexit( Status )
1577 CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
1578 CALL mpi_finalize(ierr)
1581 WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
1582 CALL wrf_error_fatal(mess)
1587 ! Obufsize of zero signals a close
1589 ! Allocate buffer obuf to be big enough for the data the compute tasks
1590 ! will send. Note: obuf is size in *bytes* so we need to pare this
1591 ! down, since the buffer is INTEGER.
1592 IF ( obufsize .GT. 0 ) THEN
1593 ALLOCATE( obuf( (obufsize+1)/itypesize ) )
1595 ! let's roll; get the data from the compute procs and put in obuf
1596 CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
1600 ! CALL end_timing( "quilt on server: collecting data from compute procs" )
1602 ! Necessarily, the compute processes send the ioclose signal,
1603 ! if there is one, after the iosync, which means they
1604 ! will stall on the ioclose message waiting for the quilt
1605 ! processes if we handle the way other messages are collected,
1606 ! using collect_on_comm. This avoids this, but we need
1607 ! a special signal (obufsize zero) and the DataHandle
1608 ! to be closed. That handle is send as the second
1609 ! word of the io_close message received by the MPI_Reduce above.
1610 ! Then a header representing the ioclose message is constructed
1611 ! here and handled below as if it were received from the
1612 ! compute processes. The clients (compute processes) must be
1613 ! careful to send this correctly (one compule process sends the actual
1614 ! handle and everone else sends a zero, so the result sums to
1615 ! the value of the handle).
1617 ALLOCATE( obuf( 4096 ) )
1618 ! DataHandle is provided as second element of reduced
1619 CALL int_gen_handle_header( obuf, obufsize, itypesize, &
1620 reduced(2) , int_ioclose )
1623 !write(0,*)'calling init_store_piece_of_field'
1624 ! Now all messages received from the compute clients are stored in
1625 ! obuf. Scan through obuf and extract headers and field data and store in
1626 ! internal buffers. The scan is done twice, first to determine sizes of
1627 ! internal buffers required for storage of headers and fields and second to
1628 ! actually store the headers and fields. This bit of code does not do any
1629 ! "quilting" (assembly of patches into full domains). For each field, it
1630 ! simply writes all received patches for the field to disk.
1631 ! ARPDBG we can vastly reduce the number of writes to disk by stitching
1632 ! any contiguous patches together first. Has implications for synchronisation
1633 ! of pNetCDF calls though.
1634 CALL init_store_piece_of_field
1635 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
1636 !write(0,*)'mpi_type_size returns ', itypesize
1637 ! Scan obuf the first time to calculate the size of the buffer required for
1638 ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
1642 num_commit_messages = 0
1643 num_field_training_msgs = 0
1644 DO WHILE ( icurs .lt. obufsize ) ! {
1645 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1646 SELECT CASE ( hdr_tag )
1648 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1649 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1650 DomainDesc , MemoryOrder , Stagger , DimNames , &
1651 DomainStart , DomainEnd , &
1652 MemoryStart , MemoryEnd , &
1653 PatchStart , PatchEnd )
1654 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1655 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1657 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
1658 IF ( num_field_training_msgs .EQ. 0 ) THEN
1659 call add_to_bufsize_for_field( VarName, hdrbufsize )
1660 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1662 num_field_training_msgs = num_field_training_msgs + 1
1664 call add_to_bufsize_for_field( VarName, hdrbufsize )
1665 !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1667 icurs = icurs + hdrbufsize
1669 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1671 ! If this is a real write (i.e. not a training write), accumulate
1672 ! buffersize for this field.
1673 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
1674 !write(0,*) 'X-1a', chunksize, TRIM(VarName)
1675 call add_to_bufsize_for_field( VarName, chunksize )
1676 icurs = icurs + chunksize
1678 CASE ( int_open_for_write_commit ) ! only one per group of tasks
1679 hdrbufsize = obuf(icurs/itypesize)
1680 IF (num_commit_messages.EQ.0) THEN
1681 call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
1683 num_commit_messages = num_commit_messages + 1
1684 icurs = icurs + hdrbufsize
1686 hdrbufsize = obuf(icurs/itypesize)
1688 ! This logic and the logic in the loop below is used to determine whether
1689 ! to send a noop records sent by the compute processes to allow to go
1690 ! through. The purpose is to make sure that the communications between this
1691 ! server and the other servers in this quilt group stay synchronized in
1692 ! the collection loop below, even when the servers are serving different
1693 ! numbers of clients. Here are some conditions:
1695 ! 1. The number of compute clients served will not differ by more than 1
1696 ! 2. The servers with +1 number of compute clients begin with task 0
1697 ! of mpi_comm_local, the commicator shared by this group of servers
1699 ! 3. For each collective field or metadata output from the compute tasks,
1700 ! there will be one record sent to the associated i/o server task. The
1701 ! i/o server task collects these records and stores them contiguously
1702 ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
1703 ! server task will contain one record from each associated compute
1706 ! 4. In the case of replicated output from the compute tasks
1707 ! (e.g. put_dom_ti records and control records like
1708 ! open_for_write_commit type records), only compute tasks for which
1709 ! (compute_group_master == .TRUE) send the record. The other compute
1710 ! tasks send noop records. This is done so that each server task
1711 ! receives exactly one record plus noops from the other compute tasks.
1713 ! 5. Logic below does not allow any noop records through since each IO
1714 ! server task now receives a valid record (from the 'compute-group master'
1715 ! when doing replicated output
1716 IF (hdr_tag.NE.int_noop) THEN
1717 write(VarName,'(I5.5)')vid
1718 !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1719 call add_to_bufsize_for_field( VarName, hdrbufsize )
1722 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1723 icurs = icurs + hdrbufsize
1727 ! Store the headers and field data in internal buffers. The first call to
1728 ! store_piece_of_field() allocates internal buffers using sizes computed by
1729 ! calls to add_to_bufsize_for_field().
1733 num_commit_messages = 0
1734 num_field_training_msgs = 0
1735 DO WHILE ( icurs .lt. obufsize ) !{
1736 !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
1737 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1738 SELECT CASE ( hdr_tag )
1740 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1741 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1742 DomainDesc , MemoryOrder , Stagger , DimNames , &
1743 DomainStart , DomainEnd , &
1744 MemoryStart , MemoryEnd , &
1745 PatchStart , PatchEnd )
1746 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1747 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1749 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
1750 IF ( num_field_training_msgs .EQ. 0 ) THEN
1751 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1752 !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1754 num_field_training_msgs = num_field_training_msgs + 1
1756 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1757 !write(0,*) 'A-2a', icurs, hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1759 icurs = icurs + hdrbufsize
1760 ! If this is a real write (i.e. not a training write), store
1761 ! this piece of this field.
1762 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
1763 call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
1764 icurs = icurs + chunksize
1765 !write(0,*) 'A-1a',TRIM(VarName),' icurs ',icurs,PatchStart(1:3),PatchEnd(1:3)
1767 CASE ( int_open_for_write_commit ) ! only one per group of tasks
1768 hdrbufsize = obuf(icurs/itypesize)
1769 IF (num_commit_messages.EQ.0) THEN
1770 call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
1772 num_commit_messages = num_commit_messages + 1
1773 icurs = icurs + hdrbufsize
1775 hdrbufsize = obuf(icurs/itypesize)
1776 IF (hdr_tag.NE.int_noop) THEN
1778 write(VarName,'(I5.5)')vid
1779 !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1780 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1783 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1784 icurs = icurs + hdrbufsize
1786 ENDDO !} while(icurs < obufsize)
1788 ! Now, for each field, retrieve headers and patches (data) from the internal
1790 CALL init_retrieve_pieces_of_field
1791 ! Retrieve header and all patches for the first field from the internal
1793 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1794 written_record = .false.
1796 ! Loop until there are no more fields to retrieve from the internal buffers.
1797 DO WHILE ( retval ) !{
1799 ! This I/O server now handles the collected requests from the compute
1802 icurs = itypesize ! icurs is a byte counter, but buffer is integer
1804 stored_write_record = .false.
1806 ! ALL I/O servers in this group loop over the collected requests they have
1808 DO WHILE ( icurs .lt. sz)! bigbufsize ) !{
1810 ! The I/O server gets the request out of the next header and
1811 ! handles it by, in most cases, calling the appropriate external I/O package
1813 !write(0,*)__FILE__,__LINE__,'get_hdr_tag ',icurs,sz,get_hdr_tag( obuf(icurs/itypesize) )
1814 SELECT CASE ( get_hdr_tag( obuf(icurs/itypesize) ) )
1815 ! The I/O server handles the "noop" (do nothing) request. This is
1816 ! actually quite easy. "Noop" requests exist to help avoid race conditions.
1818 CALL int_get_noop_header( obuf(icurs/itypesize), &
1819 hdrbufsize, itypesize )
1820 icurs = icurs + hdrbufsize
1822 ! The I/O server "root" handles the "put_dom_td_real" request.
1823 CASE ( int_dom_td_real )
1824 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1825 ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1826 CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1827 DataHandle, DateStr, Element, RData, Count, code )
1828 icurs = icurs + hdrbufsize
1830 SELECT CASE (use_package(io_form(DataHandle)))
1831 #ifdef PNETCDF_QUILT
1833 CALL ext_pnc_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1837 CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1841 CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1845 CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1849 CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1853 CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1860 ! Every I/O server handles the "put_dom_ti_real" request.
1861 CASE ( int_dom_ti_real )
1863 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1864 ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1865 CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1866 DataHandle, Element, RData, Count, code )
1867 icurs = icurs + hdrbufsize
1869 SELECT CASE (use_package(io_form(DataHandle)))
1870 #ifdef PNETCDF_QUILT
1872 CALL ext_pnc_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1876 CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1880 CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1884 CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1888 CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1892 CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1900 ! Every I/O server handles the "put_dom_td_integer" request.
1901 CASE ( int_dom_td_integer )
1903 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1904 ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1905 CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1906 DataHandle, DateStr, Element, IData, Count, code )
1907 icurs = icurs + hdrbufsize
1909 SELECT CASE (use_package(io_form(DataHandle)))
1910 #ifdef PNETCDF_QUILT
1912 CALL ext_pnc_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1916 CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1920 CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1924 CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1928 CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1932 CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1940 ! Every I/O server handles the "put_dom_ti_integer" request.
1941 CASE ( int_dom_ti_integer )
1943 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1944 ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1945 CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1946 DataHandle, Element, IData, Count, code )
1947 icurs = icurs + hdrbufsize
1948 SELECT CASE (use_package(io_form(DataHandle)))
1949 #ifdef PNETCDF_QUILT
1951 CALL ext_pnc_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1955 CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1959 CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1963 CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1967 CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1971 CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1980 ! Every I/O server handles the "set_time" request.
1981 CASE ( int_set_time )
1983 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
1984 DataHandle, Element, VarName, CData, code )
1985 SELECT CASE (use_package(io_form(DataHandle)))
1988 CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
1994 icurs = icurs + hdrbufsize
1996 ! Every I/O server handles the "put_dom_ti_char" request.
1997 CASE ( int_dom_ti_char )
1999 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2000 DataHandle, Element, VarName, CData, code )
2002 SELECT CASE (use_package(io_form(DataHandle)))
2003 #ifdef PNETCDF_QUILT
2005 CALL ext_pnc_put_dom_ti_char ( handle(DataHandle), TRIM(Element), Trim(CData), Status)
2009 CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2013 CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2017 CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2021 CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2025 CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2031 icurs = icurs + hdrbufsize
2033 ! Every I/O server handles the "put_var_ti_char" request.
2034 CASE ( int_var_ti_char )
2036 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2037 DataHandle, Element, VarName, CData, code )
2039 SELECT CASE (use_package(io_form(DataHandle)))
2040 #ifdef PNETCDF_QUILT
2042 CALL ext_pnc_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status )
2046 CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2050 CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2054 CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2058 CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2062 CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2068 icurs = icurs + hdrbufsize
2071 ! ioexit is now handled by sending negative message length to server
2072 CALL wrf_error_fatal( &
2073 "quilt: should have handled int_ioexit already")
2074 ! Every I/O server handles the "ioclose" request.
2075 CASE ( int_ioclose )
2076 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2078 icurs = icurs + hdrbufsize
2080 IF ( DataHandle .GE. 1 ) THEN
2082 SELECT CASE (use_package(io_form(DataHandle)))
2083 #ifdef PNETCDF_QUILT
2085 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2086 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2087 CALL ext_pnc_ioclose(handle(DataHandle),Status)
2092 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2093 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2094 CALL ext_ncd_ioclose(handle(DataHandle),Status)
2099 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2100 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2101 CALL ext_int_ioclose(handle(DataHandle),Status)
2106 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2107 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2108 CALL ext_yyy_ioclose(handle(DataHandle),Status)
2113 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2114 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2115 CALL ext_gr1_ioclose(handle(DataHandle),Status)
2120 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2121 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2122 CALL ext_gr2_ioclose(handle(DataHandle),Status)
2130 ! Every I/O server handles the "open_for_write_begin" request.
2131 CASE ( int_open_for_write_begin )
2133 CALL int_get_ofwb_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2134 FileName,SysDepInfo,io_form_arg,DataHandle )
2136 !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
2137 !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
2138 !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
2139 !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
2140 icurs = icurs + hdrbufsize
2141 !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
2143 io_form(DataHandle) = io_form_arg
2145 SELECT CASE (use_package(io_form(DataHandle)))
2146 #ifdef PNETCDF_QUILT
2148 CALL ext_pnc_open_for_write_begin(FileName,mpi_comm_local,mpi_comm_local,SysDepInfo,handle(DataHandle),Status )
2152 CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2153 !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
2157 CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2161 CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2165 CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2169 CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2175 okay_to_write(DataHandle) = .false.
2177 ! Every I/O server handles the "open_for_write_commit" request.
2178 ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
2179 ! (int_field) requests will initiate writes to disk. Actual commit will be done after
2180 ! all requests in this batch have been handled.
2181 CASE ( int_open_for_write_commit )
2183 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2185 icurs = icurs + hdrbufsize
2186 okay_to_commit(DataHandle) = .true.
2188 ! Every I/O server handles the "write_field" (int_field) request.
2189 ! If okay_to_write(DataHandle) is .true. then the patch in the
2190 ! header (bigbuf) is written to disk using pNetCDF. Note that this is where the actual
2191 ! "quilting" (reassembly of patches onto a full-size domain) is done. If
2192 ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
2193 ! are called to write metadata for I/O formats that support native metadata.
2195 ! NOTE that the I/O servers will only see write_field (int_field)
2196 ! requests AFTER an "iosync" request.
2198 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2199 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
2200 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
2201 DomainDesc , MemoryOrder , Stagger , DimNames , &
2202 DomainStart , DomainEnd , &
2203 MemoryStart , MemoryEnd , &
2204 PatchStart , PatchEnd )
2205 !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
2206 icurs = icurs + hdrbufsize
2208 IF ( okay_to_write(DataHandle) ) THEN
2210 !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,A2,I6,1x,3('[',I3,',',I3,'] '))") &
2211 !!$ TRIM(DateStr), TRIM(VarName), TRIM(MemoryOrder), &
2212 !!$ (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1), &
2213 !!$PatchStart(1),PatchEnd(1),PatchStart(2),PatchEnd(2),PatchStart(3),PatchEnd(3)
2214 !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2215 !!$ TRIM(DateStr), TRIM(VarName), DomainDesc, &
2216 !!$ DomainStart(1),DomainEnd(1),DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2218 IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
2219 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
2220 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
2221 IF ( FieldType .EQ. WRF_DOUBLE) THEN
2222 ! this branch has not been tested TBH: 20050406
2223 CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
2225 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
2228 #ifdef PNETCDF_QUILT
2229 ! WRITE(mess,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2230 ! TRIM(DateStr), TRIM(VarName), DomainDesc, &
2231 ! DomainStart(1),DomainEnd(1), &
2232 ! DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2233 ! CALL wrf_message(mess)
2235 CALL store_patch_in_outbuf_pnc(obuf(icurs/itypesize), &
2236 dummybuf, TRIM(DateStr), &
2239 TRIM(MemoryOrder), &
2242 DomainStart , DomainEnd ,&
2243 MemoryStart , MemoryEnd ,&
2244 PatchStart , PatchEnd, &
2246 stored_write_record = .true.
2248 !!$ IF(VarName .eq. "PSFC")THEN
2249 !!$ CALL dump_real_array_c(obuf(icurs/itypesize), DomainStart,&
2250 !!$ DomainEnd, PatchStart, PatchEnd, &
2251 !!$ mytask_local, DomainDesc)
2255 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
2256 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2257 #ifdef PNETCDF_QUILT
2258 CALL store_patch_in_outbuf_pnc ( dummybuf, &
2259 obuf(icurs/itypesize) , &
2263 TRIM(MemoryOrder) , &
2264 TRIM(Stagger), DimNames, &
2265 DomainStart , DomainEnd , &
2266 MemoryStart , MemoryEnd , &
2267 PatchStart , PatchEnd , &
2269 stored_write_record = .true.
2271 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
2272 ftypesize = LWORDSIZE
2275 icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)* &
2276 (PatchEnd(2)-PatchStart(2)+1)* &
2277 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
2279 ELSE ! Write metadata only (or do 'training'?)
2281 SELECT CASE (use_package(io_form(DataHandle)))
2283 #ifdef PNETCDF_QUILT
2285 CALL ext_pnc_write_field ( handle(DataHandle) , TRIM(DateStr), &
2286 TRIM(VarName) , dummy , FieldType , mpi_comm_local , mpi_comm_local, &
2287 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger), DimNames , &
2288 DomainStart , DomainEnd , &
2289 MemoryStart , MemoryEnd , &
2290 PatchStart , PatchEnd, &
2295 CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
2296 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
2297 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
2298 DomainStart , DomainEnd , &
2299 DomainStart , DomainEnd , &
2300 DomainStart , DomainEnd , &
2304 ! since this is training and the grib output doesn't need training, disable this branch.
2307 CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
2308 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
2309 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
2310 DomainStart , DomainEnd , &
2311 DomainStart , DomainEnd , &
2312 DomainStart , DomainEnd , &
2321 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2323 icurs = icurs + hdrbufsize
2325 WRITE(mess,*)'quilt: bad tag: ', &
2326 get_hdr_tag( obuf(icurs/itypesize) ),' icurs ',&
2328 CALL wrf_error_fatal( mess )
2332 ! Now, we have finshed handling all commands from the latest
2333 ! call to retrieve_pieces_of_field().
2335 IF (stored_write_record) THEN
2336 ! If any field patches have been stored in internal output buffers
2337 ! (via a call to store_patch_in_outbuf_pnc()) then call write_outbuf_pnc()
2338 ! to write them to disk now.
2339 ! NOTE that the I/O server will only have called
2340 ! store_patch_in_outbuf() when handling write_field (int_field)
2341 ! commands which only arrive AFTER an "iosync" command.
2343 #ifdef PNETCDF_QUILT
2344 CALL write_outbuf_pnc( handle(DataHandle), &
2345 use_package(io_form(DataHandle)), &
2346 mpi_comm_local, mytask_local, &
2349 ! CALL end_timing( "quilt_pnc: call to write_outbuf_pnc" )
2350 stored_write_record = .false.
2351 written_record = .true.
2354 ! If one or more "open_for_write_commit" commands were encountered from the
2355 ! latest call to retrieve_pieces_of_field() then call the package-specific
2356 ! routine to do the commit.
2357 IF (okay_to_commit(DataHandle)) THEN
2359 SELECT CASE (use_package(io_form(DataHandle)))
2360 #ifdef PNETCDF_QUILT
2362 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2363 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2364 CALL ext_pnc_open_for_write_commit(handle(DataHandle),Status)
2365 okay_to_write(DataHandle) = .true.
2370 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2371 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2372 CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
2373 okay_to_write(DataHandle) = .true.
2378 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2379 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2380 CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
2381 okay_to_write(DataHandle) = .true.
2386 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2387 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2388 CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
2389 okay_to_write(DataHandle) = .true.
2394 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2395 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2396 CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
2397 okay_to_write(DataHandle) = .true.
2402 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2403 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2404 CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
2405 okay_to_write(DataHandle) = .true.
2413 okay_to_commit(DataHandle) = .false.
2417 ! Retrieve header and all patches for the next field from the internal
2419 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
2424 ! flush output files if needed
2425 IF (written_record) THEN
2427 SELECT CASE ( use_package(io_form) )
2428 #ifdef PNETCDF_QUILT
2430 CALL ext_pnc_iosync( handle(DataHandle), Status )
2435 written_record = .false.
2436 !CALL end_timing( "quilt_pnc: flush" )
2441 END SUBROUTINE quilt_pnc
2443 ! end of #endif of DM_PARALLEL
2446 SUBROUTINE init_module_wrf_quilt
2448 ! Both client (compute) and server tasks call this routine to initialize the
2449 ! module. Routine setup_quilt_servers() is called from this routine to
2450 ! determine which tasks are compute tasks and which are server tasks. Server
2451 ! tasks then call routine quilt() and remain there for the rest of the model
2452 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
2455 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2459 NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups
2460 INTEGER ntasks, mytask, ierr, io_status
2461 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
2462 INTEGER thread_support_provided, thread_support_requested
2464 INTEGER mpi_comm_here
2466 LOGICAL esmf_coupling
2468 !TODO: Change this to run-time switch
2470 esmf_coupling = .TRUE.
2472 esmf_coupling = .FALSE.
2475 quilting_enabled = .FALSE.
2476 IF ( disable_quilt ) RETURN
2478 DO i = 1,int_num_handles
2479 okay_to_write(i) = .FALSE.
2480 int_handle_in_use(i) = .FALSE.
2481 server_for_handle(i) = 0
2482 int_num_bytes_to_write(i) = 0
2485 CALL MPI_INITIALIZED( mpi_inited, ierr )
2486 IF ( .NOT. mpi_inited ) THEN
2487 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
2488 thread_support_requested = MPI_THREAD_FUNNELED
2489 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
2490 IF ( thread_support_provided .lt. thread_support_requested ) THEN
2491 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
2494 CALL mpi_init ( ierr )
2496 CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
2499 CALL wrf_get_dm_communicator( mpi_comm_here )
2501 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
2502 CALL mpi_x_comm_size ( mpi_comm_here, ntasks, ierr ) ;
2504 IF ( mytask .EQ. 0 ) THEN
2505 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
2507 nio_tasks_per_group = 0
2508 READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
2509 IF (io_status .NE. 0) THEN
2510 CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
2513 IF ( esmf_coupling ) THEN
2514 IF ( nio_tasks_per_group > 0 ) THEN
2515 CALL wrf_error_fatal("frame/module_io_quilt.F: cannot use "// &
2516 "ESMF coupling with quilt tasks") ;
2520 CALL mpi_bcast( nio_tasks_per_group , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2521 CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2523 CALL setup_quilt_servers( nio_tasks_per_group, &
2527 nio_tasks_in_group, &
2532 ! provide the communicator for the integration tasks to RSL
2533 IF ( compute_node ) THEN
2534 CALL wrf_set_dm_communicator( mpi_comm_local )
2536 CALL quilt ! will not return on io server tasks
2540 END SUBROUTINE init_module_wrf_quilt
2541 END MODULE module_wrf_quilt
2544 ! Remaining routines in this file are defined outside of the module
2545 ! either to defeat arg/param type checking or to avoid an explicit use
2549 SUBROUTINE disable_quilting
2551 ! Call this in programs that you never want to be quilting (e.g. real)
2552 ! Must call before call to init_module_wrf_quilt().
2554 USE module_wrf_quilt
2555 disable_quilt = .TRUE.
2557 END SUBROUTINE disable_quilting
2559 LOGICAL FUNCTION use_output_servers()
2561 ! Returns .TRUE. if I/O quilt servers are in-use for write operations.
2562 ! This routine is called only by client (compute) tasks.
2564 USE module_wrf_quilt
2565 use_output_servers = quilting_enabled
2567 END FUNCTION use_output_servers
2569 LOGICAL FUNCTION use_input_servers()
2571 ! Returns .TRUE. if I/O quilt servers are in-use for read operations.
2572 ! This routine is called only by client (compute) tasks.
2574 USE module_wrf_quilt
2575 use_input_servers = .FALSE.
2577 END FUNCTION use_input_servers
2579 SUBROUTINE wrf_quilt_open_for_write_begin( FileName , Comm_compute, Comm_io, SysDepInfo, &
2580 DataHandle , io_form_arg, Status )
2582 ! Instruct the I/O quilt servers to begin data definition ("training") phase
2583 ! for writing to WRF dataset FileName. io_form_arg indicates file format.
2584 ! This routine is called only by client (compute) tasks.
2586 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2587 USE module_wrf_quilt
2588 USE module_state_description, ONLY: IO_PNETCDF
2591 #include "intio_tags.h"
2592 CHARACTER *(*), INTENT(IN) :: FileName
2593 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
2594 CHARACTER *(*), INTENT(IN) :: SysDepInfo
2595 INTEGER , INTENT(OUT) :: DataHandle
2596 INTEGER , INTENT(IN) :: io_form_arg
2597 INTEGER , INTENT(OUT) :: Status
2599 CHARACTER*132 :: locFileName, locSysDepInfo
2600 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2602 INTEGER, EXTERNAL :: use_package
2604 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
2605 CALL int_get_fresh_handle(i)
2606 okay_to_write(i) = .false.
2609 locFileName = FileName
2610 locSysDepInfo = SysDepInfo
2612 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2614 SELECT CASE(use_package(io_form_arg))
2616 #ifdef PNETCDF_QUILT
2618 IF(compute_group_master(1)) THEN
2619 CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
2620 locFileName,locSysDepInfo,io_form_arg,&
2623 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2628 IF ( wrf_dm_on_monitor() ) THEN
2629 CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
2630 locFileName,locSysDepInfo,io_form_arg,DataHandle )
2632 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2637 iserver = get_server_id ( DataHandle )
2638 !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin iserver = ', iserver
2639 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2640 !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin comm_io_group = ', comm_io_group
2642 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2643 !JMDEBUGwrite(0,*)'mpi_x_comm_size tasks_in_group ',tasks_in_group, ierr
2645 !!JMTIMING CALL start_timing
2646 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2648 reduced(1) = hdrbufsize
2649 #ifdef PNETCDF_QUILT
2650 IF ( compute_group_master(1) ) reduced(2) = i
2652 IF ( wrf_dm_on_monitor() ) reduced(2) = i
2654 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2655 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2656 comm_io_group, ierr )
2657 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
2659 ! send data to the i/o processor
2660 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2662 hdrbuf, hdrbufsize , &
2670 END SUBROUTINE wrf_quilt_open_for_write_begin
2672 SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
2674 ! Instruct the I/O quilt servers to switch an internal flag to enable output
2675 ! for the dataset referenced by DataHandle. The call to
2676 ! wrf_quilt_open_for_write_commit() must be paired with a call to
2677 ! wrf_quilt_open_for_write_begin().
2678 ! This routine is called only by client (compute) tasks.
2680 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2681 USE module_wrf_quilt
2684 #include "intio_tags.h"
2685 INTEGER , INTENT(IN ) :: DataHandle
2686 INTEGER , INTENT(OUT) :: Status
2687 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2690 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
2691 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2692 IF ( int_handle_in_use( DataHandle ) ) THEN
2693 okay_to_write( DataHandle ) = .true.
2697 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2699 #ifdef PNETCDF_QUILT
2700 !ARP Only want one command to be received by each IO server when using
2702 IF(compute_group_master(1)) THEN
2703 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2704 DataHandle, int_open_for_write_commit )
2706 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2710 IF ( wrf_dm_on_monitor() ) THEN
2711 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2712 DataHandle, int_open_for_write_commit )
2714 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2718 iserver = get_server_id ( DataHandle )
2719 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2721 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2723 !!JMTIMING CALL start_timing
2724 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2726 reduced(1) = hdrbufsize
2727 #ifdef PNETCDF_QUILT
2728 IF ( compute_group_master(1) ) reduced(2) = DataHandle
2730 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2732 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2733 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2734 comm_io_group, ierr )
2735 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
2737 ! send data to the i/o processor
2738 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2740 hdrbuf, hdrbufsize , &
2747 END SUBROUTINE wrf_quilt_open_for_write_commit
2749 SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
2750 DataHandle , Status )
2752 ! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
2753 ! This routine is called only by client (compute) tasks.
2754 ! This is not yet supported.
2756 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2758 CHARACTER *(*), INTENT(IN) :: FileName
2759 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
2760 CHARACTER *(*), INTENT(IN) :: SysDepInfo
2761 INTEGER , INTENT(OUT) :: DataHandle
2762 INTEGER , INTENT(OUT) :: Status
2764 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
2767 CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
2770 END SUBROUTINE wrf_quilt_open_for_read
2772 SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
2774 ! Inquire if the dataset referenced by DataHandle is open.
2775 ! Does not require communication with I/O servers.
2776 ! This routine is called only by client (compute) tasks.
2778 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2779 USE module_wrf_quilt
2781 #include "wrf_io_flags.h"
2782 INTEGER , INTENT(IN) :: DataHandle
2783 CHARACTER *(*), INTENT(IN) :: FileName
2784 INTEGER , INTENT(OUT) :: FileStatus
2785 INTEGER , INTENT(OUT) :: Status
2789 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
2790 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2791 IF ( int_handle_in_use( DataHandle ) ) THEN
2792 IF ( okay_to_write( DataHandle ) ) THEN
2793 FileStatus = WRF_FILE_OPENED_FOR_WRITE
2801 END SUBROUTINE wrf_quilt_inquire_opened
2803 SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
2805 ! Return the Filename and FileStatus associated with DataHandle.
2806 ! Does not require communication with I/O servers.
2808 ! Note that the current implementation does not actually return FileName.
2809 ! Currenlty, WRF does not use this returned value. Fixing this would simply
2810 ! require saving the file names on the client tasks in an array similar to
2812 ! This routine is called only by client (compute) tasks.
2814 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2815 USE module_wrf_quilt
2817 #include "wrf_io_flags.h"
2818 INTEGER , INTENT(IN) :: DataHandle
2819 CHARACTER *(*), INTENT(OUT) :: FileName
2820 INTEGER , INTENT(OUT) :: FileStatus
2821 INTEGER , INTENT(OUT) :: Status
2822 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
2824 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2825 IF ( int_handle_in_use( DataHandle ) ) THEN
2826 IF ( okay_to_write( DataHandle ) ) THEN
2827 FileStatus = WRF_FILE_OPENED_FOR_WRITE
2829 FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
2832 FileStatus = WRF_FILE_NOT_OPENED
2835 FileName = "bogusfornow"
2841 END SUBROUTINE wrf_quilt_inquire_filename
2843 SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
2845 ! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
2846 ! with memory buffers.
2848 ! After the "iosync" header (request) is sent to the I/O quilt server,
2849 ! the compute tasks will then send the entire contents (headers and data) of
2850 ! int_local_output_buffer to their I/O quilt server. This communication is
2851 ! done in subroutine send_to_io_quilt_servers(). After the I/O quilt servers
2852 ! receive this data, they will write all accumulated fields to disk.
2854 ! Significant time may be required for the I/O quilt servers to organize
2855 ! fields and write them to disk. Therefore, the "iosync" request should be
2856 ! sent only when the compute tasks are ready to run for a while without
2857 ! needing to communicate with the servers. Otherwise, the compute tasks
2858 ! will end up waiting for the servers to finish writing to disk, thus wasting
2859 ! any performance benefits of having servers at all.
2861 ! This routine is called only by client (compute) tasks.
2863 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
2864 USE module_wrf_quilt
2867 INTEGER , INTENT(IN) :: DataHandle
2868 INTEGER , INTENT(OUT) :: Status
2870 INTEGER locsize , itypesize
2871 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
2873 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
2876 IF ( associated ( int_local_output_buffer ) ) THEN
2878 iserver = get_server_id ( DataHandle )
2879 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2881 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2883 locsize = int_num_bytes_to_write(DataHandle)
2886 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2888 reduced(1) = locsize
2889 #ifdef PNETCDF_QUILT
2890 ! ARP Only want one command per IOServer if doing parallel IO
2891 IF ( compute_group_master(1) ) reduced(2) = DataHandle
2893 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2895 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2896 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2897 comm_io_group, ierr )
2898 ! CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
2900 ! send data to the i/o processor
2902 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2904 int_local_output_buffer(1), locsize , &
2907 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2909 int_local_output_buffer, locsize , &
2914 int_local_output_cursor = 1
2915 ! int_num_bytes_to_write(DataHandle) = 0
2916 DEALLOCATE ( int_local_output_buffer )
2917 NULLIFY ( int_local_output_buffer )
2919 CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
2921 ! CALL end_timing("wrf_quilt_iosync")
2925 END SUBROUTINE wrf_quilt_iosync
2927 SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
2929 ! Instruct the I/O quilt servers to close the dataset referenced by
2931 ! This routine also clears the client file handle and, if needed, deallocates
2932 ! int_local_output_buffer.
2933 ! This routine is called only by client (compute) tasks.
2935 #if defined( DM_PARALLEL ) && ! defined( STUBMPI)
2936 USE module_wrf_quilt
2940 #include "intio_tags.h"
2941 INTEGER , INTENT(IN) :: DataHandle
2942 INTEGER , INTENT(OUT) :: Status
2943 INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
2946 !!JMTIMING CALL start_timing
2947 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
2948 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2950 ! If we're using pnetcdf then each IO server will need to receive the
2951 ! handle just once as there is
2952 ! no longer a reduce over the IO servers to get it.
2953 #ifdef PNETCDF_QUILT
2954 IF ( compute_group_master(1) )THEN
2955 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2956 DataHandle, int_ioclose )
2958 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2961 IF ( wrf_dm_on_monitor() ) THEN
2962 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2963 DataHandle , int_ioclose )
2965 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2969 iserver = get_server_id ( DataHandle )
2970 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2972 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2974 !!JMTIMING CALL start_timing
2975 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2977 #ifdef PNETCDF_QUILT
2978 ! If we're using pnetcdf then each IO server will need the handle as there is
2979 ! no longer a reduce over the IO servers to get it.
2980 IF ( compute_group_master(1) ) reduced(2) = DataHandle
2982 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2984 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2985 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2986 comm_io_group, ierr )
2987 !!JMTIMING CALL end_timing("MPI_Reduce in ioclose")
2990 ! send data to the i/o processor
2991 !!JMTIMING CALL start_timing
2992 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2994 hdrbuf, hdrbufsize , &
2996 !!JMTIMING CALL end_timing("collect_on_comm in io_close")
2999 int_handle_in_use(DataHandle) = .false.
3000 CALL set_server_id( DataHandle, 0 )
3001 okay_to_write(DataHandle) = .false.
3002 okay_to_commit(DataHandle) = .false.
3003 int_local_output_cursor = 1
3004 int_num_bytes_to_write(DataHandle) = 0
3005 IF ( associated ( int_local_output_buffer ) ) THEN
3006 DEALLOCATE ( int_local_output_buffer )
3007 NULLIFY ( int_local_output_buffer )
3011 !!JMTIMING CALL end_timing( "wrf_quilt_ioclose" )
3015 END SUBROUTINE wrf_quilt_ioclose
3017 SUBROUTINE wrf_quilt_ioexit( Status )
3019 ! Instruct the I/O quilt servers to shut down the WRF I/O system.
3020 ! Do not call any wrf_quilt_*() routines after this routine has been called.
3021 ! This routine is called only by client (compute) tasks.
3023 #if defined( DM_PARALLEL ) && ! defined (STUBMPI )
3024 USE module_wrf_quilt
3027 #include "intio_tags.h"
3028 INTEGER , INTENT(OUT) :: Status
3029 INTEGER :: DataHandle
3030 INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
3033 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
3034 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3036 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3037 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3038 #ifdef PNETCDF_QUILT
3039 !ARP Send the ioexit message just once to each IOServer when using parallel IO
3040 IF( compute_group_master(1) ) THEN
3041 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3042 DataHandle, int_ioexit )
3044 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3048 IF ( wrf_dm_on_monitor() ) THEN
3049 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3050 DataHandle , int_ioexit ) ! Handle is dummy
3052 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3056 DO iserver = 1, nio_groups
3057 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3059 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3060 CALL mpi_comm_rank( comm_io_group , me , ierr )
3062 ! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
3065 IF ( me .eq. 0 ) reduced(1) = hdrbufsize
3066 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3067 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
3068 comm_io_group, ierr )
3075 END SUBROUTINE wrf_quilt_ioexit
3077 SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
3079 ! Instruct the I/O quilt servers to return the next time stamp.
3080 ! This is not yet supported.
3081 ! This routine is called only by client (compute) tasks.
3083 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3085 INTEGER , INTENT(IN) :: DataHandle
3086 CHARACTER*(*) :: DateStr
3090 END SUBROUTINE wrf_quilt_get_next_time
3092 SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
3094 ! Instruct the I/O quilt servers to return the previous time stamp.
3095 ! This is not yet supported.
3096 ! This routine is called only by client (compute) tasks.
3098 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3100 INTEGER , INTENT(IN) :: DataHandle
3101 CHARACTER*(*) :: DateStr
3105 END SUBROUTINE wrf_quilt_get_previous_time
3107 SUBROUTINE wrf_quilt_set_time ( DataHandle, Data, Status )
3109 ! Instruct the I/O quilt servers to set the time stamp in the dataset
3110 ! referenced by DataHandle.
3111 ! This routine is called only by client (compute) tasks.
3113 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3114 USE module_wrf_quilt
3115 USE module_state_description, ONLY: IO_PNETCDF
3118 #include "intio_tags.h"
3119 INTEGER , INTENT(IN) :: DataHandle
3120 CHARACTER*(*) , INTENT(IN) :: Data
3122 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
3125 INTEGER, EXTERNAL :: use_package
3127 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
3129 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3130 IF ( int_handle_in_use( DataHandle ) ) THEN
3131 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3132 Count = 0 ! there is no count for character strings
3134 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3135 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3136 #ifdef PNETCDF_QUILT
3137 IF(compute_group_master(1) )THEN
3138 ! Only want to send one time header to each IO server as
3139 ! can't tell that's what they are on the IO servers themselves - therefore use
3140 ! the compute_group_master process.
3141 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3142 DataHandle, "TIMESTAMP", "", Data, int_set_time )
3144 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3147 IF ( wrf_dm_on_monitor() ) THEN
3148 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3149 DataHandle, "TIMESTAMP", "", Data, int_set_time )
3151 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3155 iserver = get_server_id ( DataHandle )
3156 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3157 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3159 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3161 reduced(1) = hdrbufsize
3162 #ifdef PNETCDF_QUILT
3163 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3165 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3167 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3168 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
3169 comm_io_group, ierr )
3170 ! send data to the i/o processor
3171 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3173 hdrbuf, hdrbufsize , &
3180 END SUBROUTINE wrf_quilt_set_time
3182 SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
3184 ! When reading, instruct the I/O quilt servers to return the name of the next
3185 ! variable in the current time frame.
3186 ! This is not yet supported.
3187 ! This routine is called only by client (compute) tasks.
3189 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3191 INTEGER , INTENT(IN) :: DataHandle
3192 CHARACTER*(*) :: VarName
3196 END SUBROUTINE wrf_quilt_get_next_var
3198 SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element, Data, Count, Outcount, Status )
3200 ! Instruct the I/O quilt servers to attempt to read Count words of time
3201 ! independent domain metadata named "Element"
3202 ! from the open dataset described by DataHandle.
3203 ! Metadata of type real are
3204 ! stored in array Data.
3205 ! Actual number of words read is returned in OutCount.
3206 ! This routine is called only by client (compute) tasks.
3208 ! This is not yet supported.
3210 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3212 INTEGER , INTENT(IN) :: DataHandle
3213 CHARACTER*(*) , INTENT(IN) :: Element
3214 REAL, INTENT(IN) :: Data(*)
3215 INTEGER , INTENT(IN) :: Count
3218 CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
3221 END SUBROUTINE wrf_quilt_get_dom_ti_real
3223 SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element, Data, Count, Status )
3225 ! Instruct the I/O quilt servers to write Count words of time independent
3226 ! domain metadata named "Element"
3227 ! to the open dataset described by DataHandle.
3228 ! Metadata of type real are
3229 ! copied from array Data.
3230 ! This routine is called only by client (compute) tasks.
3232 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3233 USE module_wrf_quilt
3236 #include "intio_tags.h"
3237 INTEGER , INTENT(IN) :: DataHandle
3238 CHARACTER*(*) , INTENT(IN) :: Element
3239 REAL , INTENT(IN) :: Data(*)
3240 INTEGER , INTENT(IN) :: Count
3243 CHARACTER*132 :: locElement
3244 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3247 !!JMTIMING CALL start_timing
3248 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
3249 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3250 locElement = Element
3252 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3253 IF ( int_handle_in_use( DataHandle ) ) THEN
3254 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3255 CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
3257 #ifdef PNETCDF_QUILT
3258 IF ( compute_group_master(1) ) THEN
3259 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3260 DataHandle, locElement, Data, Count, int_dom_ti_real )
3262 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3265 IF ( wrf_dm_on_monitor() ) THEN
3266 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3267 DataHandle, locElement, Data, Count, int_dom_ti_real )
3269 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3273 iserver = get_server_id ( DataHandle )
3274 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3275 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3277 !!JMTIMING CALL start_timing
3278 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3280 reduced(1) = hdrbufsize
3281 #ifdef PNETCDF_QUILT
3282 IF( compute_group_master(1) ) reduced(2) = DataHandle
3284 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3286 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3287 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
3288 comm_io_group, ierr )
3289 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
3290 ! send data to the i/o processor
3291 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3293 hdrbuf, hdrbufsize , &
3299 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_real")
3302 END SUBROUTINE wrf_quilt_put_dom_ti_real
3304 SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element, Data, Count, Outcount, Status )
3306 ! Instruct the I/O quilt servers to attempt to read Count words of time
3307 ! independent domain metadata named "Element"
3308 ! from the open dataset described by DataHandle.
3309 ! Metadata of type double are
3310 ! stored in array Data.
3311 ! Actual number of words read is returned in OutCount.
3312 ! This routine is called only by client (compute) tasks.
3314 ! This is not yet supported.
3316 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3318 INTEGER , INTENT(IN) :: DataHandle
3319 CHARACTER*(*) , INTENT(IN) :: Element
3321 INTEGER , INTENT(IN) :: Count
3324 CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
3327 END SUBROUTINE wrf_quilt_get_dom_ti_double
3329 SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element, Data, Count, Status )
3331 ! Instruct the I/O quilt servers to write Count words of time independent
3332 ! domain metadata named "Element"
3333 ! to the open dataset described by DataHandle.
3334 ! Metadata of type double are
3335 ! copied from array Data.
3336 ! This routine is called only by client (compute) tasks.
3338 ! This is not yet supported.
3340 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3342 INTEGER , INTENT(IN) :: DataHandle
3343 CHARACTER*(*) , INTENT(IN) :: Element
3344 REAL*8 , INTENT(IN) :: Data(*)
3345 INTEGER , INTENT(IN) :: Count
3347 CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
3350 END SUBROUTINE wrf_quilt_put_dom_ti_double
3352 SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element, Data, Count, Outcount, Status )
3354 ! Instruct the I/O quilt servers to attempt to read Count words of time
3355 ! independent domain metadata named "Element"
3356 ! from the open dataset described by DataHandle.
3357 ! Metadata of type integer are
3358 ! stored in array Data.
3359 ! Actual number of words read is returned in OutCount.
3360 ! This routine is called only by client (compute) tasks.
3362 ! This is not yet supported.
3364 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3366 INTEGER , INTENT(IN) :: DataHandle
3367 CHARACTER*(*) , INTENT(IN) :: Element
3369 INTEGER , INTENT(IN) :: Count
3372 CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
3375 END SUBROUTINE wrf_quilt_get_dom_ti_integer
3377 SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element, Data, Count, Status )
3379 ! Instruct the I/O quilt servers to write Count words of time independent
3380 ! domain metadata named "Element"
3381 ! to the open dataset described by DataHandle.
3382 ! Metadata of type integer are
3383 ! copied from array Data.
3384 ! This routine is called only by client (compute) tasks.
3386 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3387 USE module_wrf_quilt
3388 USE module_state_description, ONLY: IO_PNETCDF
3391 #include "intio_tags.h"
3392 INTEGER , INTENT(IN) :: DataHandle
3393 CHARACTER*(*) , INTENT(IN) :: Element
3394 INTEGER , INTENT(IN) :: Data(*)
3395 INTEGER , INTENT(IN) :: Count
3398 CHARACTER*132 :: locElement
3399 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3401 INTEGER, EXTERNAL :: use_package
3404 !!JMTIMING CALL start_timing
3405 locElement = Element
3407 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
3409 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3410 IF ( int_handle_in_use( DataHandle ) ) THEN
3411 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3412 CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
3414 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3415 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3416 #ifdef PNETCDF_QUILT
3417 IF ( compute_group_master(1) )THEN
3418 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3419 DataHandle, locElement, Data, Count, &
3420 int_dom_ti_integer )
3422 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3425 IF ( wrf_dm_on_monitor() ) THEN
3426 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3427 DataHandle, locElement, Data, Count, &
3428 int_dom_ti_integer )
3430 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3434 iserver = get_server_id ( DataHandle )
3435 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3436 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3438 !!JMTIMING CALL start_timing
3439 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3441 reduced(1) = hdrbufsize
3442 #ifdef PNETCDF_QUILT
3443 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3445 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3447 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3448 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
3449 comm_io_group, ierr )
3451 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
3452 ! send data to the i/o processor
3453 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3455 hdrbuf, hdrbufsize , &
3459 CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
3460 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_integer" )
3464 END SUBROUTINE wrf_quilt_put_dom_ti_integer
3466 SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element, Data, Count, Outcount, Status )
3468 ! Instruct the I/O quilt servers to attempt to read Count words of time
3469 ! independent domain metadata named "Element"
3470 ! from the open dataset described by DataHandle.
3471 ! Metadata of type logical are
3472 ! stored in array Data.
3473 ! Actual number of words read is returned in OutCount.
3474 ! This routine is called only by client (compute) tasks.
3476 ! This is not yet supported.
3478 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3480 INTEGER , INTENT(IN) :: DataHandle
3481 CHARACTER*(*) , INTENT(IN) :: Element
3483 INTEGER , INTENT(IN) :: Count
3486 ! CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
3489 END SUBROUTINE wrf_quilt_get_dom_ti_logical
3491 SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element, Data, Count, Status )
3493 ! Instruct the I/O quilt servers to write Count words of time independent
3494 ! domain metadata named "Element"
3495 ! to the open dataset described by DataHandle.
3496 ! Metadata of type logical are
3497 ! copied from array Data.
3498 ! This routine is called only by client (compute) tasks.
3500 ! This is not yet supported.
3502 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3504 INTEGER , INTENT(IN) :: DataHandle
3505 CHARACTER*(*) , INTENT(IN) :: Element
3506 logical , INTENT(IN) :: Data(*)
3507 INTEGER , INTENT(IN) :: Count
3511 INTEGER one_or_zero(Count)
3521 CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element, one_or_zero, Count, Status )
3524 END SUBROUTINE wrf_quilt_put_dom_ti_logical
3526 SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element, Data, Status )
3528 ! Instruct the I/O quilt servers to attempt to read time independent
3529 ! domain metadata named "Element"
3530 ! from the open dataset described by DataHandle.
3531 ! Metadata of type char are
3532 ! stored in string Data.
3533 ! This routine is called only by client (compute) tasks.
3535 ! This is not yet supported.
3537 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3539 INTEGER , INTENT(IN) :: DataHandle
3540 CHARACTER*(*) , INTENT(IN) :: Element
3541 CHARACTER*(*) :: Data
3543 CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
3546 END SUBROUTINE wrf_quilt_get_dom_ti_char
3548 SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element, Data, Status )
3550 ! Instruct the I/O quilt servers to write time independent
3551 ! domain metadata named "Element"
3552 ! to the open dataset described by DataHandle.
3553 ! Metadata of type char are
3554 ! copied from string Data.
3555 ! This routine is called only by client (compute) tasks.
3557 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3558 USE module_wrf_quilt
3561 #include "intio_tags.h"
3562 INTEGER , INTENT(IN) :: DataHandle
3563 CHARACTER*(*) , INTENT(IN) :: Element
3564 CHARACTER*(*) , INTENT(IN) :: Data
3566 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
3569 !!JMTIMING CALL start_timing
3570 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
3572 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3573 IF ( int_handle_in_use( DataHandle ) ) THEN
3574 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3576 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3577 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3578 #ifdef PNETCDF_QUILT
3579 IF(compute_group_master(1))THEN
3580 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3581 DataHandle, Element, "", Data, &
3584 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3587 IF ( wrf_dm_on_monitor() ) THEN
3588 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3589 DataHandle, Element, "", Data, int_dom_ti_char )
3591 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3595 iserver = get_server_id ( DataHandle )
3596 ! write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
3597 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3598 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3599 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
3600 !!JMTIMING! CALL start_timing
3601 !write(0,*)'calling MPI_Barrier'
3602 ! CALL MPI_Barrier( mpi_comm_local, ierr )
3603 !write(0,*)'back from MPI_Barrier'
3604 !!JMTIMING! CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
3606 !!JMTIMING CALL start_timing
3607 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3610 reduced(1) = hdrbufsize
3611 #ifdef PNETCDF_QUILT
3612 IF(compute_group_master(1)) reduced(2) = DataHandle
3614 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3616 !call mpi_comm_rank( comm_io_group , me, ierr )
3618 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3619 MPI_SUM, tasks_in_group-1, & ! nio_tasks_in_group-1 is me
3620 comm_io_group, ierr )
3622 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
3623 ! send data to the i/o processor
3624 !!JMTIMING CALL start_timing
3626 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3628 hdrbuf, hdrbufsize , &
3630 !!JMTIMING CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
3633 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char")
3637 END SUBROUTINE wrf_quilt_put_dom_ti_char
3639 SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
3641 ! Instruct the I/O quilt servers to attempt to read Count words of time
3642 ! dependent domain metadata named "Element" valid at time DateStr
3643 ! from the open dataset described by DataHandle.
3644 ! Metadata of type real are
3645 ! stored in array Data.
3646 ! Actual number of words read is returned in OutCount.
3647 ! This routine is called only by client (compute) tasks.
3649 ! This is not yet supported.
3651 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3653 INTEGER , INTENT(IN) :: DataHandle
3654 CHARACTER*(*) , INTENT(IN) :: Element
3655 CHARACTER*(*) , INTENT(IN) :: DateStr
3657 INTEGER , INTENT(IN) :: Count
3662 END SUBROUTINE wrf_quilt_get_dom_td_real
3664 SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Status )
3666 ! Instruct the I/O quilt servers to write Count words of time dependent
3667 ! domain metadata named "Element" valid at time DateStr
3668 ! to the open dataset described by DataHandle.
3669 ! Metadata of type real are
3670 ! copied from array Data.
3671 ! This routine is called only by client (compute) tasks.
3673 ! This is not yet supported.
3675 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3677 INTEGER , INTENT(IN) :: DataHandle
3678 CHARACTER*(*) , INTENT(IN) :: Element
3679 CHARACTER*(*) , INTENT(IN) :: DateStr
3680 real , INTENT(IN) :: Data(*)
3681 INTEGER , INTENT(IN) :: Count
3685 END SUBROUTINE wrf_quilt_put_dom_td_real
3687 SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
3689 ! Instruct the I/O quilt servers to attempt to read Count words of time
3690 ! dependent domain metadata named "Element" valid at time DateStr
3691 ! from the open dataset described by DataHandle.
3692 ! Metadata of type double are
3693 ! stored in array Data.
3694 ! Actual number of words read is returned in OutCount.
3695 ! This routine is called only by client (compute) tasks.
3697 ! This is not yet supported.
3699 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3701 INTEGER , INTENT(IN) :: DataHandle
3702 CHARACTER*(*) , INTENT(IN) :: Element
3703 CHARACTER*(*) , INTENT(IN) :: DateStr
3705 INTEGER , INTENT(IN) :: Count
3709 CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
3711 END SUBROUTINE wrf_quilt_get_dom_td_double
3713 SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Status )
3715 ! Instruct the I/O quilt servers to write Count words of time dependent
3716 ! domain metadata named "Element" valid at time DateStr
3717 ! to the open dataset described by DataHandle.
3718 ! Metadata of type double are
3719 ! copied from array Data.
3720 ! This routine is called only by client (compute) tasks.
3722 ! This is not yet supported.
3724 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3726 INTEGER , INTENT(IN) :: DataHandle
3727 CHARACTER*(*) , INTENT(IN) :: Element
3728 CHARACTER*(*) , INTENT(IN) :: DateStr
3729 real*8 , INTENT(IN) :: Data(*)
3730 INTEGER , INTENT(IN) :: Count
3733 CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
3735 END SUBROUTINE wrf_quilt_put_dom_td_double
3737 SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
3739 ! Instruct the I/O quilt servers to attempt to read Count words of time
3740 ! dependent domain metadata named "Element" valid at time DateStr
3741 ! from the open dataset described by DataHandle.
3742 ! Metadata of type integer are
3743 ! stored in array Data.
3744 ! Actual number of words read is returned in OutCount.
3745 ! This routine is called only by client (compute) tasks.
3747 ! This is not yet supported.
3749 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3751 INTEGER , INTENT(IN) :: DataHandle
3752 CHARACTER*(*) , INTENT(IN) :: Element
3753 CHARACTER*(*) , INTENT(IN) :: DateStr
3755 INTEGER , INTENT(IN) :: Count
3760 END SUBROUTINE wrf_quilt_get_dom_td_integer
3762 SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Status )
3764 ! Instruct the I/O quilt servers to write Count words of time dependent
3765 ! domain metadata named "Element" valid at time DateStr
3766 ! to the open dataset described by DataHandle.
3767 ! Metadata of type integer are
3768 ! copied from array Data.
3769 ! This routine is called only by client (compute) tasks.
3771 ! This is not yet supported.
3773 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3775 INTEGER , INTENT(IN) :: DataHandle
3776 CHARACTER*(*) , INTENT(IN) :: Element
3777 CHARACTER*(*) , INTENT(IN) :: DateStr
3778 integer , INTENT(IN) :: Data(*)
3779 INTEGER , INTENT(IN) :: Count
3783 END SUBROUTINE wrf_quilt_put_dom_td_integer
3785 SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
3787 ! Instruct the I/O quilt servers to attempt to read Count words of time
3788 ! dependent domain metadata named "Element" valid at time DateStr
3789 ! from the open dataset described by DataHandle.
3790 ! Metadata of type logical are
3791 ! stored in array Data.
3792 ! Actual number of words read is returned in OutCount.
3793 ! This routine is called only by client (compute) tasks.
3795 ! This is not yet supported.
3797 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3799 INTEGER , INTENT(IN) :: DataHandle
3800 CHARACTER*(*) , INTENT(IN) :: Element
3801 CHARACTER*(*) , INTENT(IN) :: DateStr
3803 INTEGER , INTENT(IN) :: Count
3808 END SUBROUTINE wrf_quilt_get_dom_td_logical
3810 SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Status )
3812 ! Instruct the I/O quilt servers to write Count words of time dependent
3813 ! domain metadata named "Element" valid at time DateStr
3814 ! to the open dataset described by DataHandle.
3815 ! Metadata of type logical are
3816 ! copied from array Data.
3817 ! This routine is called only by client (compute) tasks.
3819 ! This is not yet supported.
3821 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3823 INTEGER , INTENT(IN) :: DataHandle
3824 CHARACTER*(*) , INTENT(IN) :: Element
3825 CHARACTER*(*) , INTENT(IN) :: DateStr
3826 logical , INTENT(IN) :: Data(*)
3827 INTEGER , INTENT(IN) :: Count
3831 END SUBROUTINE wrf_quilt_put_dom_td_logical
3833 SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
3835 ! Instruct the I/O quilt servers to attempt to read time dependent
3836 ! domain metadata named "Element" valid at time DateStr
3837 ! from the open dataset described by DataHandle.
3838 ! Metadata of type char are
3839 ! stored in string Data.
3840 ! This routine is called only by client (compute) tasks.
3842 ! This is not yet supported.
3844 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3846 INTEGER , INTENT(IN) :: DataHandle
3847 CHARACTER*(*) , INTENT(IN) :: Element
3848 CHARACTER*(*) , INTENT(IN) :: DateStr
3849 CHARACTER*(*) :: Data
3853 END SUBROUTINE wrf_quilt_get_dom_td_char
3855 SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
3857 ! Instruct $he I/O quilt servers to write time dependent
3858 ! domain metadata named "Element" valid at time DateStr
3859 ! to the open dataset described by DataHandle.
3860 ! Metadata of type char are
3861 ! copied from string Data.
3862 ! This routine is called only by client (compute) tasks.
3864 ! This is not yet supported.
3866 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3868 INTEGER , INTENT(IN) :: DataHandle
3869 CHARACTER*(*) , INTENT(IN) :: Element
3870 CHARACTER*(*) , INTENT(IN) :: DateStr
3871 CHARACTER*(*) , INTENT(IN) :: Data
3875 END SUBROUTINE wrf_quilt_put_dom_td_char
3877 SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
3879 ! Instruct the I/O quilt servers to attempt to read Count words of time
3880 ! independent attribute "Element" of variable "Varname"
3881 ! from the open dataset described by DataHandle.
3882 ! Attribute of type real is
3883 ! stored in array Data.
3884 ! Actual number of words read is returned in OutCount.
3885 ! This routine is called only by client (compute) tasks.
3887 ! This is not yet supported.
3889 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3891 INTEGER , INTENT(IN) :: DataHandle
3892 CHARACTER*(*) , INTENT(IN) :: Element
3893 CHARACTER*(*) , INTENT(IN) :: VarName
3895 INTEGER , INTENT(IN) :: Count
3900 END SUBROUTINE wrf_quilt_get_var_ti_real
3902 SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element, Varname, Data, Count, Status )
3904 ! Instruct the I/O quilt servers to write Count words of time independent
3905 ! attribute "Element" of variable "Varname"
3906 ! to the open dataset described by DataHandle.
3907 ! Attribute of type real is
3908 ! copied from array Data.
3909 ! This routine is called only by client (compute) tasks.
3911 ! This is not yet supported.
3913 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3915 INTEGER , INTENT(IN) :: DataHandle
3916 CHARACTER*(*) , INTENT(IN) :: Element
3917 CHARACTER*(*) , INTENT(IN) :: VarName
3918 real , INTENT(IN) :: Data(*)
3919 INTEGER , INTENT(IN) :: Count
3923 END SUBROUTINE wrf_quilt_put_var_ti_real
3925 SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
3927 ! Instruct the I/O quilt servers to attempt to read Count words of time
3928 ! independent attribute "Element" of variable "Varname"
3929 ! from the open dataset described by DataHandle.
3930 ! Attribute of type double is
3931 ! stored in array Data.
3932 ! Actual number of words read is returned in OutCount.
3933 ! This routine is called only by client (compute) tasks.
3935 ! This is not yet supported.
3937 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3939 INTEGER , INTENT(IN) :: DataHandle
3940 CHARACTER*(*) , INTENT(IN) :: Element
3941 CHARACTER*(*) , INTENT(IN) :: VarName
3943 INTEGER , INTENT(IN) :: Count
3947 CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
3949 END SUBROUTINE wrf_quilt_get_var_ti_double
3951 SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element, Varname, Data, Count, Status )
3953 ! Instruct the I/O quilt servers to write Count words of time independent
3954 ! attribute "Element" of variable "Varname"
3955 ! to the open dataset described by DataHandle.
3956 ! Attribute of type double is
3957 ! copied from array Data.
3958 ! This routine is called only by client (compute) tasks.
3960 ! This is not yet supported.
3962 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3964 INTEGER , INTENT(IN) :: DataHandle
3965 CHARACTER*(*) , INTENT(IN) :: Element
3966 CHARACTER*(*) , INTENT(IN) :: VarName
3967 real*8 , INTENT(IN) :: Data(*)
3968 INTEGER , INTENT(IN) :: Count
3971 CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
3973 END SUBROUTINE wrf_quilt_put_var_ti_double
3975 SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
3977 ! Instruct the I/O quilt servers to attempt to read Count words of time
3978 ! independent attribute "Element" of variable "Varname"
3979 ! from the open dataset described by DataHandle.
3980 ! Attribute of type integer is
3981 ! stored in array Data.
3982 ! Actual number of words read is returned in OutCount.
3983 ! This routine is called only by client (compute) tasks.
3985 ! This is not yet supported.
3987 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3989 INTEGER , INTENT(IN) :: DataHandle
3990 CHARACTER*(*) , INTENT(IN) :: Element
3991 CHARACTER*(*) , INTENT(IN) :: VarName
3993 INTEGER , INTENT(IN) :: Count
3998 END SUBROUTINE wrf_quilt_get_var_ti_integer
4000 SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Status )
4002 ! Instruct the I/O quilt servers to write Count words of time independent
4003 ! attribute "Element" of variable "Varname"
4004 ! to the open dataset described by DataHandle.
4005 ! Attribute of type integer is
4006 ! copied from array Data.
4007 ! This routine is called only by client (compute) tasks.
4009 ! This is not yet supported.
4011 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4013 INTEGER , INTENT(IN) :: DataHandle
4014 CHARACTER*(*) , INTENT(IN) :: Element
4015 CHARACTER*(*) , INTENT(IN) :: VarName
4016 integer , INTENT(IN) :: Data(*)
4017 INTEGER , INTENT(IN) :: Count
4021 END SUBROUTINE wrf_quilt_put_var_ti_integer
4023 SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
4025 ! Instruct the I/O quilt servers to attempt to read Count words of time
4026 ! independent attribute "Element" of variable "Varname"
4027 ! from the open dataset described by DataHandle.
4028 ! Attribute of type logical is
4029 ! stored in array Data.
4030 ! Actual number of words read is returned in OutCount.
4031 ! This routine is called only by client (compute) tasks.
4033 ! This is not yet supported.
4035 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4037 INTEGER , INTENT(IN) :: DataHandle
4038 CHARACTER*(*) , INTENT(IN) :: Element
4039 CHARACTER*(*) , INTENT(IN) :: VarName
4041 INTEGER , INTENT(IN) :: Count
4046 END SUBROUTINE wrf_quilt_get_var_ti_logical
4048 SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Status )
4050 ! Instruct the I/O quilt servers to write Count words of time independent
4051 ! attribute "Element" of variable "Varname"
4052 ! to the open dataset described by DataHandle.
4053 ! Attribute of type logical is
4054 ! copied from array Data.
4055 ! This routine is called only by client (compute) tasks.
4057 ! This is not yet supported.
4059 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4061 INTEGER , INTENT(IN) :: DataHandle
4062 CHARACTER*(*) , INTENT(IN) :: Element
4063 CHARACTER*(*) , INTENT(IN) :: VarName
4064 logical , INTENT(IN) :: Data(*)
4065 INTEGER , INTENT(IN) :: Count
4069 END SUBROUTINE wrf_quilt_put_var_ti_logical
4071 SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element, Varname, Data, Status )
4073 ! Instruct the I/O quilt servers to attempt to read time independent
4074 ! attribute "Element" of variable "Varname"
4075 ! from the open dataset described by DataHandle.
4076 ! Attribute of type char is
4077 ! stored in string Data.
4078 ! This routine is called only by client (compute) tasks.
4080 ! This is not yet supported.
4082 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4084 INTEGER , INTENT(IN) :: DataHandle
4085 CHARACTER*(*) , INTENT(IN) :: Element
4086 CHARACTER*(*) , INTENT(IN) :: VarName
4087 CHARACTER*(*) :: Data
4091 END SUBROUTINE wrf_quilt_get_var_ti_char
4093 SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element, Varname, Data, Status )
4095 ! Instruct the I/O quilt servers to write time independent
4096 ! attribute "Element" of variable "Varname"
4097 ! to the open dataset described by DataHandle.
4098 ! Attribute of type char is
4099 ! copied from string Data.
4100 ! This routine is called only by client (compute) tasks.
4103 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4104 USE module_wrf_quilt
4107 #include "intio_tags.h"
4108 INTEGER , INTENT(IN) :: DataHandle
4109 CHARACTER*(*) , INTENT(IN) :: Element
4110 CHARACTER*(*) , INTENT(IN) :: VarName
4111 CHARACTER*(*) , INTENT(IN) :: Data
4113 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
4117 !!JMTIMING CALL start_timing
4118 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
4120 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
4121 IF ( int_handle_in_use( DataHandle ) ) THEN
4122 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
4124 #ifdef PNETCDF_QUILT
4125 IF ( compute_group_master(1) ) THEN
4126 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4127 DataHandle, TRIM(Element), &
4128 TRIM(VarName), TRIM(Data), int_var_ti_char )
4130 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4133 IF ( wrf_dm_on_monitor() ) THEN
4134 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4135 DataHandle, TRIM(Element), &
4136 TRIM(VarName), TRIM(Data), int_var_ti_char )
4138 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4142 iserver = get_server_id ( DataHandle )
4143 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4144 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4146 !!JMTIMING CALL start_timing
4147 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4149 reduced(1) = hdrbufsize
4150 #ifdef PNETCDF_QUILT
4151 IF ( compute_group_master(1) ) reduced(2) = DataHandle
4153 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
4155 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
4156 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
4157 comm_io_group, ierr )
4158 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
4159 ! send data to the i/o processor
4160 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
4162 hdrbuf, hdrbufsize , &
4166 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char" )
4170 END SUBROUTINE wrf_quilt_put_var_ti_char
4172 SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4174 ! Instruct the I/O quilt servers to attempt to read Count words of time
4175 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4176 ! from the open dataset described by DataHandle.
4177 ! Attribute of type real is
4178 ! stored in array Data.
4179 ! Actual number of words read is returned in OutCount.
4180 ! This routine is called only by client (compute) tasks.
4182 ! This is not yet supported.
4184 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4186 INTEGER , INTENT(IN) :: DataHandle
4187 CHARACTER*(*) , INTENT(IN) :: Element
4188 CHARACTER*(*) , INTENT(IN) :: DateStr
4189 CHARACTER*(*) , INTENT(IN) :: VarName
4191 INTEGER , INTENT(IN) :: Count
4196 END SUBROUTINE wrf_quilt_get_var_td_real
4198 SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4200 ! Instruct the I/O quilt servers to write Count words of time dependent
4201 ! attribute "Element" of variable "Varname" valid at time DateStr
4202 ! to the open dataset described by DataHandle.
4203 ! Attribute of type real is
4204 ! copied from array Data.
4205 ! This routine is called only by client (compute) tasks.
4207 ! This is not yet supported.
4209 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4211 INTEGER , INTENT(IN) :: DataHandle
4212 CHARACTER*(*) , INTENT(IN) :: Element
4213 CHARACTER*(*) , INTENT(IN) :: DateStr
4214 CHARACTER*(*) , INTENT(IN) :: VarName
4215 real , INTENT(IN) :: Data(*)
4216 INTEGER , INTENT(IN) :: Count
4220 END SUBROUTINE wrf_quilt_put_var_td_real
4222 SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4224 ! Instruct the I/O quilt servers to attempt to read Count words of time
4225 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4226 ! from the open dataset described by DataHandle.
4227 ! Attribute of type double is
4228 ! stored in array Data.
4229 ! Actual number of words read is returned in OutCount.
4230 ! This routine is called only by client (compute) tasks.
4232 ! This is not yet supported.
4234 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4236 INTEGER , INTENT(IN) :: DataHandle
4237 CHARACTER*(*) , INTENT(IN) :: Element
4238 CHARACTER*(*) , INTENT(IN) :: DateStr
4239 CHARACTER*(*) , INTENT(IN) :: VarName
4241 INTEGER , INTENT(IN) :: Count
4245 CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
4247 END SUBROUTINE wrf_quilt_get_var_td_double
4249 SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4251 ! Instruct the I/O quilt servers to write Count words of time dependent
4252 ! attribute "Element" of variable "Varname" valid at time DateStr
4253 ! to the open dataset described by DataHandle.
4254 ! Attribute of type double is
4255 ! copied from array Data.
4256 ! This routine is called only by client (compute) tasks.
4258 ! This is not yet supported.
4260 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4262 INTEGER , INTENT(IN) :: DataHandle
4263 CHARACTER*(*) , INTENT(IN) :: Element
4264 CHARACTER*(*) , INTENT(IN) :: DateStr
4265 CHARACTER*(*) , INTENT(IN) :: VarName
4266 real*8 , INTENT(IN) :: Data(*)
4267 INTEGER , INTENT(IN) :: Count
4270 CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
4272 END SUBROUTINE wrf_quilt_put_var_td_double
4274 SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount,Status)
4276 ! Instruct the I/O quilt servers to attempt to read Count words of time
4277 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4278 ! from the open dataset described by DataHandle.
4279 ! Attribute of type integer is
4280 ! stored in array Data.
4281 ! Actual number of words read is returned in OutCount.
4282 ! This routine is called only by client (compute) tasks.
4284 ! This is not yet supported.
4286 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4288 INTEGER , INTENT(IN) :: DataHandle
4289 CHARACTER*(*) , INTENT(IN) :: Element
4290 CHARACTER*(*) , INTENT(IN) :: DateStr
4291 CHARACTER*(*) , INTENT(IN) :: VarName
4293 INTEGER , INTENT(IN) :: Count
4298 END SUBROUTINE wrf_quilt_get_var_td_integer
4300 SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4302 ! Instruct the I/O quilt servers to write Count words of time dependent
4303 ! attribute "Element" of variable "Varname" valid at time DateStr
4304 ! to the open dataset described by DataHandle.
4305 ! Attribute of type integer is
4306 ! copied from array Data.
4307 ! This routine is called only by client (compute) tasks.
4309 ! This is not yet supported.
4311 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4313 INTEGER , INTENT(IN) :: DataHandle
4314 CHARACTER*(*) , INTENT(IN) :: Element
4315 CHARACTER*(*) , INTENT(IN) :: DateStr
4316 CHARACTER*(*) , INTENT(IN) :: VarName
4317 integer , INTENT(IN) :: Data(*)
4318 INTEGER , INTENT(IN) :: Count
4322 END SUBROUTINE wrf_quilt_put_var_td_integer
4324 SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4326 ! Instruct the I/O quilt servers to attempt to read Count words of time
4327 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4328 ! from the open dataset described by DataHandle.
4329 ! Attribute of type logical is
4330 ! stored in array Data.
4331 ! Actual number of words read is returned in OutCount.
4332 ! This routine is called only by client (compute) tasks.
4334 ! This is not yet supported.
4336 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4338 INTEGER , INTENT(IN) :: DataHandle
4339 CHARACTER*(*) , INTENT(IN) :: Element
4340 CHARACTER*(*) , INTENT(IN) :: DateStr
4341 CHARACTER*(*) , INTENT(IN) :: VarName
4343 INTEGER , INTENT(IN) :: Count
4348 END SUBROUTINE wrf_quilt_get_var_td_logical
4350 SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4352 ! Instruct the I/O quilt servers to write Count words of time dependent
4353 ! attribute "Element" of variable "Varname" valid at time DateStr
4354 ! to the open dataset described by DataHandle.
4355 ! Attribute of type logical is
4356 ! copied from array Data.
4357 ! This routine is called only by client (compute) tasks.
4359 ! This is not yet supported.
4361 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4363 INTEGER , INTENT(IN) :: DataHandle
4364 CHARACTER*(*) , INTENT(IN) :: Element
4365 CHARACTER*(*) , INTENT(IN) :: DateStr
4366 CHARACTER*(*) , INTENT(IN) :: VarName
4367 logical , INTENT(IN) :: Data(*)
4368 INTEGER , INTENT(IN) :: Count
4372 END SUBROUTINE wrf_quilt_put_var_td_logical
4374 SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
4376 ! Instruct the I/O quilt servers to attempt to read time dependent
4377 ! attribute "Element" of variable "Varname" valid at time DateStr
4378 ! from the open dataset described by DataHandle.
4379 ! Attribute of type char is
4380 ! stored in string Data.
4381 ! This routine is called only by client (compute) tasks.
4383 ! This is not yet supported.
4385 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4387 INTEGER , INTENT(IN) :: DataHandle
4388 CHARACTER*(*) , INTENT(IN) :: Element
4389 CHARACTER*(*) , INTENT(IN) :: DateStr
4390 CHARACTER*(*) , INTENT(IN) :: VarName
4391 CHARACTER*(*) :: Data
4395 END SUBROUTINE wrf_quilt_get_var_td_char
4397 SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
4399 ! Instruct the I/O quilt servers to write time dependent
4400 ! attribute "Element" of variable "Varname" valid at time DateStr
4401 ! to the open dataset described by DataHandle.
4402 ! Attribute of type char is
4403 ! copied from string Data.
4404 ! This routine is called only by client (compute) tasks.
4406 ! This is not yet supported.
4408 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4410 INTEGER , INTENT(IN) :: DataHandle
4411 CHARACTER*(*) , INTENT(IN) :: Element
4412 CHARACTER*(*) , INTENT(IN) :: DateStr
4413 CHARACTER*(*) , INTENT(IN) :: VarName
4414 CHARACTER*(*) , INTENT(IN) :: Data
4418 END SUBROUTINE wrf_quilt_put_var_td_char
4420 SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4421 DomainDesc , MemoryOrder , Stagger , DimNames , &
4422 DomainStart , DomainEnd , &
4423 MemoryStart , MemoryEnd , &
4424 PatchStart , PatchEnd , &
4427 ! Instruct the I/O quilt servers to read the variable named VarName from the
4428 ! dataset pointed to by DataHandle.
4429 ! This routine is called only by client (compute) tasks.
4431 ! This is not yet supported.
4433 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4435 INTEGER , INTENT(IN) :: DataHandle
4436 CHARACTER*(*) , INTENT(INOUT) :: DateStr
4437 CHARACTER*(*) , INTENT(INOUT) :: VarName
4438 INTEGER , INTENT(INOUT) :: Field(*)
4439 integer ,intent(in) :: FieldType
4440 integer ,intent(inout) :: Comm
4441 integer ,intent(inout) :: IOComm
4442 integer ,intent(in) :: DomainDesc
4443 character*(*) ,intent(in) :: MemoryOrder
4444 character*(*) ,intent(in) :: Stagger
4445 character*(*) , dimension (*) ,intent(in) :: DimNames
4446 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
4447 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
4448 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
4449 integer ,intent(out) :: Status
4453 END SUBROUTINE wrf_quilt_read_field
4455 SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4456 DomainDesc , MemoryOrder , Stagger , DimNames , &
4457 DomainStart , DomainEnd , &
4458 MemoryStart , MemoryEnd , &
4459 PatchStart , PatchEnd , &
4462 ! Prepare instructions for the I/O quilt servers to write the variable named
4463 ! VarName to the dataset pointed to by DataHandle.
4465 ! During a "training" write this routine accumulates number and sizes of
4466 ! messages that will be sent to the I/O server associated with this compute
4469 ! During a "real" write, this routine begins by allocating
4470 ! int_local_output_buffer if it has not already been allocated. Sizes
4471 ! accumulated during "training" are used to determine how big
4472 ! int_local_output_buffer must be. This routine then stores "int_field"
4473 ! headers and associated field data in int_local_output_buffer. The contents
4474 ! of int_local_output_buffer are actually sent to the I/O quilt server in
4475 ! routine wrf_quilt_iosync(). This scheme allows output of multiple variables
4476 ! to be aggregated into a single "iosync" operation.
4477 ! This routine is called only by client (compute) tasks.
4479 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4480 USE module_state_description
4481 USE module_wrf_quilt
4484 #include "wrf_io_flags.h"
4485 INTEGER , INTENT(IN) :: DataHandle
4486 CHARACTER*(*) , INTENT(IN) :: DateStr
4487 CHARACTER*(*) , INTENT(IN) :: VarName
4488 ! INTEGER , INTENT(IN) :: Field(*)
4489 integer ,intent(in) :: FieldType
4490 integer ,intent(inout) :: Comm
4491 integer ,intent(inout) :: IOComm
4492 integer ,intent(in) :: DomainDesc
4493 character*(*) ,intent(in) :: MemoryOrder
4494 character*(*) ,intent(in) :: Stagger
4495 character*(*) , dimension (*) ,intent(in) :: DimNames
4496 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
4497 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
4498 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
4499 integer ,intent(out) :: Status
4501 integer ii,jj,kk,myrank
4503 REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
4504 MemoryStart(2):MemoryEnd(2), &
4505 MemoryStart(3):MemoryEnd(3) ) :: Field
4506 INTEGER locsize , typesize, itypesize
4507 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
4508 INTEGER, EXTERNAL :: use_package
4510 !!ARPTIMING CALL start_timing
4511 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
4513 IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
4514 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
4516 IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
4517 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
4520 locsize = (PatchEnd(1)-PatchStart(1)+1)* &
4521 (PatchEnd(2)-PatchStart(2)+1)* &
4522 (PatchEnd(3)-PatchStart(3)+1)
4524 CALL mpi_type_size( MPI_INTEGER, itypesize, ierr )
4525 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
4526 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
4527 IF ( FieldType .EQ. WRF_DOUBLE ) THEN
4528 CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
4529 ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
4530 CALL mpi_type_size( MPI_REAL, typesize, ierr )
4531 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
4532 CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
4533 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
4534 CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
4537 IF ( .NOT. okay_to_write( DataHandle ) ) THEN
4539 ! This is a "training" write.
4540 ! it is not okay to actually write; what we do here is just "bookkeep": count up
4541 ! the number and size of messages that we will output to io server associated with
4544 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
4545 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4546 333933 , MemoryOrder , Stagger , DimNames , & ! 333933 means training; magic number
4547 DomainStart , DomainEnd , &
4548 MemoryStart , MemoryEnd , &
4549 PatchStart , PatchEnd )
4551 int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
4553 ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
4555 iserver = get_server_id ( DataHandle )
4556 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
4557 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4558 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
4560 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4563 IF ( .NOT. wrf_dm_on_monitor() ) THEN ! only one task in compute grid sends this message; send noops on others
4564 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4569 !!ARPTIMING CALL start_timing
4570 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4572 reduced(1) = hdrbufsize
4573 #ifdef PNETCDF_QUILT
4574 IF ( compute_group_master(1) ) reduced(2) = DataHandle
4576 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
4578 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
4579 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
4580 comm_io_group, ierr )
4581 !!ARPTIMING CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
4582 ! send data to the i/o processor
4584 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
4586 hdrbuf, hdrbufsize , &
4591 IF ( .NOT. associated( int_local_output_buffer ) ) THEN
4592 ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/itypesize ), Stat=ierr )
4594 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: allocate of int_local_output_buffer failed" )
4596 int_local_output_cursor = 1
4598 iserver = get_server_id ( DataHandle )
4599 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
4601 ! This is NOT a "training" write. It is OK to write now.
4602 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
4603 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4604 0 , MemoryOrder , Stagger , DimNames , & ! non-333933 means okay to write; magic number
4605 DomainStart , DomainEnd , &
4606 MemoryStart , MemoryEnd , &
4607 PatchStart , PatchEnd )
4609 ! Pack header into int_local_output_buffer. It will be sent to the
4610 ! I/O servers during the next "iosync" operation.
4612 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
4614 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
4617 ! Pack field data into int_local_output_buffer. It will be sent to the
4618 ! I/O servers during the next "iosync" operation.
4620 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
4621 locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
4623 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
4624 locsize * typesize , int_local_output_buffer, int_local_output_cursor )
4629 !!ARPTIMING CALL end_timing("wrf_quilt_write_field")
4633 END SUBROUTINE wrf_quilt_write_field
4635 SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
4636 DomainStart , DomainEnd , Status )
4638 ! This routine applies only to a dataset that is open for read. It instructs
4639 ! the I/O quilt servers to return information about variable VarName.
4640 ! This routine is called only by client (compute) tasks.
4642 ! This is not yet supported.
4644 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4646 integer ,intent(in) :: DataHandle
4647 character*(*) ,intent(in) :: VarName
4649 character*(*) :: MemoryOrder
4650 character*(*) :: Stagger
4651 integer ,dimension(*) :: DomainStart, DomainEnd
4655 END SUBROUTINE wrf_quilt_get_var_info
4657 SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
4659 ! This routine returns the compute+io communicator to which this
4660 ! compute task belongs for I/O server group "isrvr".
4661 ! This routine is called only by client (compute) tasks.
4663 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4664 USE module_wrf_quilt
4666 INTEGER, INTENT(IN ) :: isrvr
4667 INTEGER, INTENT(OUT) :: retval
4668 retval = mpi_comm_io_groups(isrvr)
4671 END SUBROUTINE get_mpi_comm_io_groups
4673 SUBROUTINE get_nio_tasks_in_group( retval )
4675 ! This routine returns the number of I/O server tasks in each
4676 ! I/O server group. It can be called by both clients and
4679 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4680 USE module_wrf_quilt
4682 INTEGER, INTENT(OUT) :: retval
4683 retval = nio_tasks_in_group
4686 END SUBROUTINE get_nio_tasks_in_group
4688 SUBROUTINE collect_on_comm_debug(file,line, comm_io_group, &
4690 hdrbuf, hdrbufsize , &
4691 outbuf, outbufsize )
4695 INTEGER comm_io_group
4697 INTEGER hdrbuf(*), outbuf(*)
4698 INTEGER hdrbufsize, outbufsize
4700 !write(0,*)'collect_on_comm_debug ',trim(file),line,sze,hdrbufsize,outbufsize
4701 CALL collect_on_comm( comm_io_group, &
4703 hdrbuf, hdrbufsize , &
4704 outbuf, outbufsize )
4705 !write(0,*)trim(file),line,'returning'
4710 SUBROUTINE collect_on_comm_debug2(file,line,var,tag,sz,hdr_rec_size, &
4713 hdrbuf, hdrbufsize , &
4714 outbuf, outbufsize )
4716 CHARACTER*(*) file,var
4717 INTEGER line,tag,sz,hdr_rec_size
4718 INTEGER comm_io_group
4720 INTEGER hdrbuf(*), outbuf(*)
4721 INTEGER hdrbufsize, outbufsize
4723 ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,trim(var),tag,sz,hdr_rec_size,sze,hdrbufsize,outbufsize
4724 CALL collect_on_comm( comm_io_group, &
4726 hdrbuf, hdrbufsize , &
4727 outbuf, outbufsize )
4728 ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,'returning for ',trim(var)