/wrfv2_fire/frame/module_io_quilt.F
FORTRAN Legacy | 5118 lines | 2434 code | 470 blank | 2214 comment | 109 complexity | f10468d5f1c7c19ff9c349ed06e1796e MD5 | raw file
Possible License(s): AGPL-1.0
Large files files are truncated, but you can click here to view the full file
- !WRF:DRIVER_LAYER:IO
- !
- #define DEBUG_LVL 50
- !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
- #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
- ! Workaround for bug in the IBM MPI implementation. Look near the
- ! bottom of this file for an explanation.
- #ifdef IBM_REDUCE_BUG_WORKAROUND
- #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) reduce_add_integer(sb,rb,c,r,com)
- #else
- #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) MPI_Reduce(sb,rb,c,dt,op,r,com,ierr)
- #endif
- MODULE module_wrf_quilt
- !<DESCRIPTION>
- !<PRE>
- ! This module contains WRF-specific I/O quilt routines called by both
- ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
- ! a run-time optimization that allow I/O operations, executed on the I/O
- ! quilt server tasks, to be overlapped with useful computation, executed on
- ! the compute tasks. Since I/O operations are often quite slow compared to
- ! computation, this performance optimization can increase parallel
- ! efficiency.
- !
- ! Currently, one group of I/O servers can be specified at run-time. Namelist
- ! variable "nio_tasks_per_group" is used to specify the number of I/O server
- ! tasks in this group. In most cases, parallel efficiency is optimized when
- ! the minimum number of I/O server tasks are used. If memory needed to cache
- ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
- ! If not, increase the number of I/O server tasks until I/O operations fit in
- ! memory. In the future, multiple groups of I/O server tasks will be
- ! supported. The number of groups will be specified by namelist variable
- ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
- ! only support overlap of output operations with computation. Also, only I/O
- ! packages that do no support native parallel I/O may be used with I/O server
- ! tasks. This excludes PHDF5 and MCEL.
- !
- ! In this module, the I/O quilt server tasks call package-dependent
- ! WRF-specific I/O interfaces to perform I/O operations requested by the
- ! client (compute) tasks. All of these calls occur inside subroutine
- ! quilt().
- !
- ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
- ! interfaces that send requests to the I/O quilt servers. All of these calls
- ! are made from module_io.F.
- !
- ! These routines have the same names and (roughly) the same arguments as those
- ! specified in the WRF I/O API except that:
- ! - "Quilt I/O" routines defined in this file and called by routines in
- ! module_io.F have the "wrf_quilt_" prefix.
- ! - Package-dependent routines called from routines in this file are defined
- ! in the external I/O packages and have the "ext_" prefix.
- !
- ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
- ! which then calls setup_quilt_servers() determine which tasks are compute
- ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
- ! server tasks call routine quilt() and remain there for the rest of the model
- ! run. Compute tasks return from init_module_wrf_quilt() to perform model
- ! computations.
- !
- ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
- ! version of the WRF I/O API. This document includes detailed descriptions
- ! of subroutines and their arguments that are not duplicated here.
- !</PRE>
- !</DESCRIPTION>
- USE module_internal_header_util
- USE module_timing
- INTEGER, PARAMETER :: int_num_handles = 99
- INTEGER, PARAMETER :: max_servers = int_num_handles+1 ! why +1?
- LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
- INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
- REAL, POINTER,SAVE :: int_local_output_buffer(:)
- INTEGER, SAVE :: int_local_output_cursor
- LOGICAL :: quilting_enabled
- LOGICAL :: disable_quilt = .FALSE.
- INTEGER :: prev_server_for_handle = -1
- INTEGER :: server_for_handle(int_num_handles)
- INTEGER :: reduced(2), reduced_dummy(2)
- LOGICAL, EXTERNAL :: wrf_dm_on_monitor
- INTEGER :: mpi_comm_avail,availrank
- LOGICAL :: in_avail=.false., poll_servers=.false.
- INTEGER nio_groups
- #ifdef DM_PARALLEL
- INTEGER :: mpi_comm_local
- LOGICAL :: compute_node
- LOGICAL :: compute_group_master(max_servers)
- INTEGER :: mpi_comm_io_groups(max_servers)
- INTEGER :: nio_tasks_in_group
- INTEGER :: nio_tasks_per_group
- INTEGER :: ncompute_tasks
- INTEGER :: ntasks
- INTEGER :: mytask
- INTEGER, PARAMETER :: onebyte = 1
- INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
- INTEGER, DIMENSION(4096) :: hdrbuf
- INTEGER, DIMENSION(int_num_handles) :: handle
- #endif
- #ifdef IBM_REDUCE_BUG_WORKAROUND
- ! Workaround for bug in the IBM MPI implementation. Look near the
- ! bottom of this file for an explanation.
- interface reduce_add_integer
- module procedure reduce_add_int_arr
- module procedure reduce_add_int_scl
- end interface
- #endif
- CONTAINS
- #if defined(DM_PARALLEL) && !defined( STUBMPI )
- INTEGER FUNCTION get_server_id ( dhandle )
- !<DESCRIPTION>
- ! Logic in the client side to know which io server
- ! group to send to. If the unit corresponds to a file that's
- ! already been opened, then we have no choice but to send the
- ! data to that group again, regardless of whether there are
- ! other server-groups. If it's a new file, we can chose a new
- ! server group. I.e. opening a file locks it onto a server
- ! group. Closing the file unlocks it.
- !</DESCRIPTION>
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: dhandle
- IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
- IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
- get_server_id = server_for_handle ( dhandle )
- ELSE
- IF(poll_servers) THEN
- ! Poll server group masters to find an inactive I/O server group:
- call wrf_quilt_find_server(server_for_handle(dhandle))
- ELSE
- ! Server polling is disabled, so cycle through servers:
- prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
- server_for_handle( dhandle ) = prev_server_for_handle+1
- ENDIF
- get_server_id=server_for_handle(dhandle)
- ENDIF
- ELSE
- CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
- ENDIF
- END FUNCTION get_server_id
- #endif
- SUBROUTINE set_server_id ( dhandle, value )
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: dhandle, value
- IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
- server_for_handle(dhandle) = value
- ELSE
- CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
- ENDIF
- END SUBROUTINE set_server_id
- LOGICAL FUNCTION get_poll_servers()
- implicit none
- get_poll_servers=poll_servers
- end FUNCTION get_poll_servers
- #if defined( DM_PARALLEL ) && !defined( STUBMPI )
- SUBROUTINE int_get_fresh_handle( retval )
- !<DESCRIPTION>
- ! Find an unused "client file handle" and return it in retval.
- ! The "client file handle" is used to remember how a file was opened
- ! so clients do not need to ask the I/O quilt servers for this information.
- ! It is also used as a file identifier in communications with the I/O
- ! server task.
- !
- ! Note that client tasks know nothing about package-specific handles.
- ! Only the I/O quilt servers know about them.
- !</DESCRIPTION>
- INTEGER i, retval
- retval = -1
- DO i = 1, int_num_handles
- IF ( .NOT. int_handle_in_use(i) ) THEN
- retval = i
- GOTO 33
- ENDIF
- ENDDO
- 33 CONTINUE
- IF ( retval < 0 ) THEN
- CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
- ENDIF
- int_handle_in_use(i) = .TRUE.
- NULLIFY ( int_local_output_buffer )
- END SUBROUTINE int_get_fresh_handle
- SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
- mytask, &
- ntasks, &
- n_groups_arg, &
- nio, &
- mpi_comm_wrld, &
- mpi_comm_local, &
- mpi_comm_io_groups)
- !<DESCRIPTION>
- ! Both client (compute) and server tasks call this routine to
- ! determine which tasks are compute tasks and which are I/O server tasks.
- !
- ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
- ! contain MPI communicators as follows:
- !
- ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
- ! compute tasks it is the group of compute tasks; for a server group it the
- ! communicator of tasks in the server group.
- !
- ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
- ! more compute tasks and a single I/O server assigned to those compute tasks.
- ! The I/O server tasks is always the last task in these communicators.
- ! On a compute task, which has a single associate in each of the server
- ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
- ! to a different server group.
- ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
- ! because each server task is part of only one io_group.
- !
- ! I/O server tasks in each I/O server group are divided among compute tasks as
- ! evenly as possible.
- !
- ! When multiple I/O server groups are used, each must have the same number of
- ! tasks. When the total number of extra I/O tasks does not divide evenly by
- ! the number of io server groups requested, the remainder tasks are not used
- ! (wasted).
- !
- ! For example, communicator membership for 18 tasks with nio_groups=2 and
- ! nio_tasks_per_group=3 is shown below:
- !
- !<PRE>
- ! Membership for MPI_COMM_LOCAL communicators:
- ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
- ! 1ST I/O SERVER GROUP: 12 13 14
- ! 2ND I/O SERVER GROUP: 15 16 17
- !
- ! Membership for MPI_COMM_IO_GROUPS(1):
- ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
- ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
- ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
- ! I/O SERVER TASK 12: 0 3 6 9 12
- ! I/O SERVER TASK 13: 1 4 7 10 13
- ! I/O SERVER TASK 14: 2 5 8 11 14
- ! I/O SERVER TASK 15: 0 3 6 9 15
- ! I/O SERVER TASK 16: 1 4 7 10 16
- ! I/O SERVER TASK 17: 2 5 8 11 17
- !
- ! Membership for MPI_COMM_IO_GROUPS(2):
- ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
- ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
- ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
- ! I/O SERVER TASK 12: ** not used **
- ! I/O SERVER TASK 13: ** not used **
- ! I/O SERVER TASK 14: ** not used **
- ! I/O SERVER TASK 15: ** not used **
- ! I/O SERVER TASK 16: ** not used **
- ! I/O SERVER TASK 17: ** not used **
- !</PRE>
- !</DESCRIPTION>
- USE module_configure
- #ifdef DM_PARALLEL
- USE module_dm, ONLY : compute_mesh
- #endif
- IMPLICIT NONE
- INCLUDE 'mpif.h'
- INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
- n_groups_arg, mpi_comm_wrld
- INTEGER, INTENT(OUT) :: mpi_comm_local, nio
- INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
- ! Local
- INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
- INTEGER, DIMENSION(ntasks) :: icolor
- CHARACTER*128 mess
- INTEGER :: io_form_setting
- INTEGER :: me
- INTEGER :: k, m, nprocx, nprocy
- LOGICAL :: reorder_mesh
- !check the namelist and make sure there are no output forms specified
- !that cannot be quilted
- CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
- CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
- CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
- CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
- CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
- CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
- CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
- CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
- CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
- CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
- CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
- CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
- CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
- n_groups = n_groups_arg
- IF ( n_groups .LT. 1 ) n_groups = 1
- compute_node = .TRUE.
- !<DESCRIPTION>
- ! nio is number of io tasks per group. If there arent enough tasks to satisfy
- ! the requirement that there be at least as many compute tasks as io tasks in
- ! each group, then just print a warning and dump out of quilting
- !</DESCRIPTION>
- nio = nio_tasks_per_group
- ncompute_tasks = ntasks - (nio * n_groups)
- IF ( ncompute_tasks .LT. nio ) THEN
- WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
- nio = 0
- ncompute_tasks = ntasks
- ELSE
- WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
- ENDIF
- CALL wrf_message(mess)
- IF ( nio .LT. 0 ) THEN
- nio = 0
- ENDIF
- IF ( nio .EQ. 0 ) THEN
- quilting_enabled = .FALSE.
- mpi_comm_local = mpi_comm_wrld
- mpi_comm_io_groups = mpi_comm_wrld
- RETURN
- ENDIF
- quilting_enabled = .TRUE.
- ! First construct the local communicators
- ! prepare to split the communicator by designating compute-only tasks
- DO i = 1, ncompute_tasks
- icolor(i) = 0
- ENDDO
- ii = 1
- ! and designating the groups of i/o tasks
- DO i = ncompute_tasks+1, ntasks, nio
- DO j = i, i+nio-1
- icolor(j) = ii
- ENDDO
- ii = ii+1
- ENDDO
- CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
- CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
- ! Now construct the communicators for the io_groups
- CALL nl_get_reorder_mesh(1,reorder_mesh)
- IF ( reorder_mesh ) THEN
- reorder_mesh = .FALSE.
- CALL nl_set_reorder_mesh(1,reorder_mesh)
- CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
- ENDIF
- ! assign the compute tasks to the i/o tasks in full rows
- CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
- nio = min(nio,nprocy)
- m = mod(nprocy,nio) ! divide up remainder, 1 row per, until gone
- ii = 1
- DO j = 1, nio, 1
- DO k = 1,nprocy/nio+min(m,1)
- DO i = 1, nprocx
- icolor(ii) = j - 1
- ii = ii + 1
- ENDDO
- ENDDO
- m = max(m-1,0)
- ENDDO
- ! ... and add the io servers as the last task in each group
- DO j = 1, n_groups
- ! TBH: each I/O group will contain only one I/O server
- DO i = ncompute_tasks+1,ntasks
- icolor(i) = MPI_UNDEFINED
- ENDDO
- ii = 0
- DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
- icolor(i) = ii
- ii = ii+1
- ENDDO
- CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
- CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
- mpi_comm_io_groups(j),ierr)
- ENDDO
- #ifdef PNETCDF_QUILT
- if(poll_servers) then
- poll_servers=.false.
- call wrf_message('Warning: server polling does not work with pnetcdf_quilt. Disabled poll_servers.')
- else
- #endif
- if(nio_groups==1) then
- poll_servers=.false.
- call wrf_message('Server polling is useless with one io group. Disabled poll_servers.')
- endif
- #ifdef PNETCDF_QUILT
- endif
- #endif
- if(poll_servers) then
- ! If server polling is enabled, we need to create mpi_comm_avail,
- ! which contains the monitor process, and the I/O server master process
- ! for each I/O server group. This will be used in the routines
- ! wrf_quilt_find_server and wrf_quilt_server_ready to find inactive
- ! I/O servers for new data handles in get_server_id.
- ! The "in_avail" is set to true iff I am in the mpi_comm_avail.
- call mpi_comm_rank(mpi_comm_wrld,me,ierr)
- icolor=MPI_UNDEFINED
- in_avail=.false.
- if(wrf_dm_on_monitor()) then
- in_avail=.true. ! monitor process is in mpi_comm_avail
- endif
- icolor(1)=1
- do j=1,n_groups
- i=ncompute_tasks+j*nio-1
- if(me+1==i) then
- in_avail=.true. ! I/O server masters are in mpi_comm_avail
- endif
- icolor(i)=1
- enddo
- CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
- CALL MPI_Comm_split(comdup,icolor(me+1),me, &
- mpi_comm_avail,ierr)
- availrank=MPI_UNDEFINED
- if(in_avail) then
- call mpi_comm_rank(mpi_comm_avail,availrank,ierr)
- endif
- endif
- compute_group_master = .FALSE.
- compute_node = .FALSE.
- DO j = 1, n_groups
- IF ( mytask .LT. ncompute_tasks .OR. & ! I am a compute task
- (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) & ! I am the I/O server for this group
- ) THEN
- CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
- ! Get the rank of this compute task in the compute+io
- ! communicator to which it belongs
- CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
- ! If I am an I/O server for this group then make that group's
- ! communicator the first element in the mpi_comm_io_groups array
- ! (I will ignore all of the other elements).
- IF (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) THEN
- mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
- ELSE
- compute_node = .TRUE.
- ! If I am a compute task, check whether I am the member of my
- ! group that will communicate things that should be sent just
- ! once (e.g. commands) to the IO server of my group.
- compute_group_master(j) = (me .EQ. 0)
- ! IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
- ENDIF
- ENDIF
- ENDDO
- END SUBROUTINE setup_quilt_servers
- SUBROUTINE sokay ( stream, io_form )
- USE module_state_description
- CHARACTER*(*) stream
- CHARACTER*256 mess
- INTEGER io_form
- SELECT CASE (io_form)
- #ifdef NETCDF
- CASE ( IO_NETCDF )
- RETURN
- #endif
- #ifdef INTIO
- CASE ( IO_INTIO )
- RETURN
- #endif
- #ifdef YYY
- CASE ( IO_YYY )
- RETURN
- #endif
- #ifdef GRIB1
- CASE ( IO_GRIB1 )
- RETURN
- #endif
- #ifdef GRIB2
- CASE ( IO_GRIB2 )
- RETURN
- #endif
- CASE (0)
- RETURN
- CASE DEFAULT
- WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
- CALL wrf_error_fatal(mess)
- END SELECT
- END SUBROUTINE sokay
- SUBROUTINE quilt
- !<DESCRIPTION>
- ! I/O server tasks call this routine and remain in it for the rest of the
- ! model run. I/O servers receive I/O requests from compute tasks and
- ! perform requested I/O operations by calling package-dependent WRF-specific
- ! I/O interfaces. Requests are sent in the form of "data headers". Each
- ! request has a unique "header" message associated with it. For requests that
- ! contain large amounts of data, the data is appended to the header. See
- ! file module_internal_header_util.F for detailed descriptions of all
- ! headers.
- !
- ! We wish to be able to link to different packages depending on whether
- ! the I/O is restart, initial, history, or boundary.
- !</DESCRIPTION>
- USE module_state_description
- USE module_quilt_outbuf_ops
- IMPLICIT NONE
- INCLUDE 'mpif.h'
- #include "intio_tags.h"
- #include "wrf_io_flags.h"
- INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
- INTEGER istat
- INTEGER mytask_io_group
- INTEGER :: nout_set = 0
- INTEGER :: obufsize, bigbufsize, chunksize, sz
- REAL, DIMENSION(1) :: dummy
- INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
- REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
- INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
- CHARACTER (LEN=512) :: CDATA
- CHARACTER (LEN=80) :: fname
- INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
- INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
- INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
- INTEGER :: dummybuf(1)
- INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
- CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
- INTEGER, EXTERNAL :: use_package
- LOGICAL :: stored_write_record, retval
- INTEGER iii, jjj, vid, CC, DD
- LOGICAL :: call_server_ready
- logical okay_to_w
- character*120 sysline
- ! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
- ! within the group of I/O servers rather than gathering the data onto the
- ! root I/O server. Unfortunately, this approach means that we can no-longer
- ! select different I/O layers for use with quilting at run time. ARPDBG.
- ! This code is sufficiently different that it is kept in the separate
- ! quilt_pnc() routine.
- #ifdef PNETCDF_QUILT
- CALL quilt_pnc()
- RETURN
- #endif
- ! Call ext_pkg_ioinit() routines to initialize I/O packages.
- SysDepInfo = " "
- #ifdef NETCDF
- CALL ext_ncd_ioinit( SysDepInfo, ierr)
- #endif
- #ifdef INTIO
- CALL ext_int_ioinit( SysDepInfo, ierr )
- #endif
- #ifdef XXX
- CALL ext_xxx_ioinit( SysDepInfo, ierr)
- #endif
- #ifdef YYY
- CALL ext_yyy_ioinit( SysDepInfo, ierr)
- #endif
- #ifdef ZZZ
- CALL ext_zzz_ioinit( SysDepInfo, ierr)
- #endif
- #ifdef GRIB1
- CALL ext_gr1_ioinit( SysDepInfo, ierr)
- #endif
- #ifdef GRIB2
- CALL ext_gr2_ioinit( SysDepInfo, ierr)
- #endif
- call_server_ready = .true. ! = true when the server is ready for a new file
- okay_to_commit = .false.
- stored_write_record = .false.
- ninbuf = 0
- ! get info. about the I/O server group that this I/O server task
- ! belongs to
- ! Last task in this I/O server group is the I/O server "root"
- ! The I/O server "root" actually writes data to disk
- ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
- CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
- CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
- CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
- CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
- CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
- IF ( itypesize <= 0 ) THEN
- CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
- ENDIF
- ! Work out whether this i/o server processor has one fewer associated compute proc than
- ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
- ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
- ! same message when they start commmunicating to stitch together an output.
- !
- ! Compute processes associated with this task:
- CC = ntasks_io_group - 1
- ! Number of compute tasks per I/O task (less remainder)
- DD = ncompute_tasks / ntasks_local_group
- !
- ! If CC-DD is 1 on servrs with the maximum number of compute clients,
- ! 0 on servrs with one less than maximum
- ! infinite loop until shutdown message received
- ! This is the main request-handling loop. I/O quilt servers stay in this loop
- ! until the model run ends.
- okay_to_w = .false.
- DO WHILE (.TRUE.) ! {
- !<DESCRIPTION>
- ! Each I/O server receives requests from its compute tasks. Each request
- ! is contained in a data header (see module_internal_header_util.F for
- ! detailed descriptions of data headers).
- ! Each request is sent in two phases. First, sizes of all messages that
- ! will be sent from the compute tasks to this I/O server are summed on the
- ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
- ! and receives concatenated messages from the compute tasks in it via the
- ! call to collect_on_comm(). Note that "sizes" are generally expressed in
- ! *bytes* in this code so conversion to "count" (number of Fortran words) is
- ! required for Fortran indexing and MPI calls.
- !</DESCRIPTION>
- if(poll_servers .and. call_server_ready) then
- call_server_ready=.false.
- ! Send a message to the monitor telling it we're ready
- ! for a new data handle.
- call wrf_quilt_server_ready()
- endif
- ! wait for info from compute tasks in the I/O group that we're ready to rock
- ! obufsize will contain number of *bytes*
- !CALL start_timing()
- ! first element of reduced is obufsize, second is DataHandle
- ! if needed (currently needed only for ioclose).
- reduced_dummy = 0
- CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
- obufsize = reduced(1)
- !CALL end_timing("MPI_Reduce at top of forever loop")
- !JMDEBUGwrite(0,*)'obufsize = ',obufsize
- ! Negative obufsize will trigger I/O server exit.
- IF ( obufsize .LT. 0 ) THEN
- IF ( obufsize .EQ. -100 ) THEN ! magic number
- #ifdef NETCDF
- CALL ext_ncd_ioexit( Status )
- #endif
- #ifdef INTIO
- CALL ext_int_ioexit( Status )
- #endif
- #ifdef XXX
- CALL ext_xxx_ioexit( Status )
- #endif
- #ifdef YYY
- CALL ext_yyy_ioexit( Status )
- #endif
- #ifdef ZZZ
- CALL ext_zzz_ioexit( Status )
- #endif
- #ifdef GRIB1
- CALL ext_gr1_ioexit( Status )
- #endif
- #ifdef GRIB2
- CALL ext_gr2_ioexit( Status )
- #endif
- CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
- CALL mpi_finalize(ierr)
- STOP
- ELSE
- WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
- CALL wrf_error_fatal(mess)
- ENDIF
- ENDIF
- ! CALL start_timing()
- ! Obufsize of zero signals a close
- ! Allocate buffer obuf to be big enough for the data the compute tasks
- ! will send. Note: obuf is size in *bytes* so we need to pare this
- ! down, since the buffer is INTEGER.
- IF ( obufsize .GT. 0 ) THEN
- ALLOCATE( obuf( (obufsize+1)/itypesize ) )
- ! let's roll; get the data from the compute procs and put in obuf
- CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
- onebyte, &
- dummy, 0, &
- obuf, obufsize )
- ! CALL end_timing( "quilt on server: collecting data from compute procs" )
- ELSE
- ! Necessarily, the compute processes send the ioclose signal,
- ! if there is one, after the iosync, which means they
- ! will stall on the ioclose message waiting for the quilt
- ! processes if we handle the way other messages are collected,
- ! using collect_on_comm. This avoids this, but we need
- ! a special signal (obufsize zero) and the DataHandle
- ! to be closed. That handle is send as the second
- ! word of the io_close message received by the MPI_Reduce above.
- ! Then a header representing the ioclose message is constructed
- ! here and handled below as if it were received from the
- ! compute processes. The clients (compute processes) must be
- ! careful to send this correctly (one compule process sends the actual
- ! handle and everone else sends a zero, so the result sums to
- ! the value of the handle).
- !
- ALLOCATE( obuf( 4096 ) )
- ! DataHandle is provided as second element of reduced
- CALL int_gen_handle_header( obuf, obufsize, itypesize, &
- reduced(2) , int_ioclose )
- if(poll_servers) then
- ! Once we're done closing, we need to tell the master
- ! process that we're ready for more data.
- call_server_ready=.true.
- endif
- ENDIF
- !write(0,*)'calling init_store_piece_of_field'
- ! Now all messages received from the compute clients are stored in
- ! obuf. Scan through obuf and extract headers and field data and store in
- ! internal buffers. The scan is done twice, first to determine sizes of
- ! internal buffers required for storage of headers and fields and second to
- ! actually store the headers and fields. This bit of code does not do the
- ! "quilting" (assembly of patches into full domains). For each field, it
- ! simply concatenates all received patches for the field into a separate
- ! internal buffer (i.e. one buffer per field). Quilting is done later by
- ! routine store_patch_in_outbuf().
- CALL init_store_piece_of_field
- CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
- !write(0,*)'mpi_type_size returns ', itypesize
- ! Scan obuf the first time to calculate the size of the buffer required for
- ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
- vid = 0
- icurs = itypesize
- num_noops = 0
- num_commit_messages = 0
- num_field_training_msgs = 0
- DO WHILE ( icurs .lt. obufsize ) ! {
- hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
- SELECT CASE ( hdr_tag )
- CASE ( int_field )
- CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
- DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
- DomainDesc , MemoryOrder , Stagger , DimNames , &
- DomainStart , DomainEnd , &
- MemoryStart , MemoryEnd , &
- PatchStart , PatchEnd )
- chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
- (PatchEnd(3)-PatchStart(3)+1)*ftypesize
- IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
- IF ( num_field_training_msgs .EQ. 0 ) THEN
- call add_to_bufsize_for_field( VarName, hdrbufsize )
- !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- ENDIF
- num_field_training_msgs = num_field_training_msgs + 1
- ELSE
- call add_to_bufsize_for_field( VarName, hdrbufsize )
- !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- ENDIF
- icurs = icurs + hdrbufsize
- !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- ! If this is a real write (i.e. not a training write), accumulate
- ! buffersize for this field.
- IF ( DomainDesc .NE. 333933 ) THEN ! magic number
- !write(0,*) 'X-1a', chunksize, TRIM(VarName)
- call add_to_bufsize_for_field( VarName, chunksize )
- icurs = icurs + chunksize
- ENDIF
- CASE ( int_open_for_write_commit ) ! only one per group of tasks
- hdrbufsize = obuf(icurs/itypesize)
- IF (num_commit_messages.EQ.0) THEN
- call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
- ENDIF
- num_commit_messages = num_commit_messages + 1
- icurs = icurs + hdrbufsize
- CASE DEFAULT
- hdrbufsize = obuf(icurs/itypesize)
- ! This logic and the logic in the loop below is used to determine whether
- ! to send a noop records sent by the compute processes to allow to go
- ! through. The purpose is to make sure that the communications between this
- ! server and the other servers in this quilt group stay synchronized in
- ! the collection loop below, even when the servers are serving different
- ! numbers of clients. Here are some conditions:
- !
- ! 1. The number of compute clients served will not differ by more than 1
- ! 2. The servers with +1 number of compute clients begin with task 0
- ! of mpi_comm_local, the commicator shared by this group of servers
- !
- ! 3. For each collective field or metadata output from the compute tasks,
- ! there will be one record sent to the associated i/o server task. The
- ! i/o server task collects these records and stores them contiguously
- ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
- ! server task will contain one record from each associated compute
- ! task, in order.
- !
- ! 4. In the case of replicated output from the compute tasks
- ! (e.g. put_dom_ti records and control records like
- ! open_for_write_commit type records), compute task 0 is the only
- ! one that sends the record. The other compute tasks send noop
- ! records. Thus, obuf on server task zero will contain the output
- ! record from task 0 followed by noop records from the rest of the
- ! compute tasks associated with task 0. Obuf on the other server
- ! tasks will contain nothing but noop records.
- !
- ! 5. The logic below will not allow any noop records from server task 0.
- ! It allows only one noop record from each of the other server tasks
- ! in the i/o group. This way, for replicated output, when the records
- ! are collected on one server task below, using collect_on_comm on
- ! mpi_comm_local, each task will provide exactly one record for each
- ! call to collect_on_comm: 1 bona fide output record from server task
- ! 0 and noops from the rest.
- IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
- .OR.hdr_tag.NE.int_noop) THEN
- write(VarName,'(I5.5)')vid
- !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- call add_to_bufsize_for_field( VarName, hdrbufsize )
- vid = vid+1
- ENDIF
- IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
- icurs = icurs + hdrbufsize
- END SELECT
- ENDDO ! }
- ! Store the headers and field data in internal buffers. The first call to
- ! store_piece_of_field() allocates internal buffers using sizes computed by
- ! calls to add_to_bufsize_for_field().
- vid = 0
- icurs = itypesize
- num_noops = 0
- num_commit_messages = 0
- num_field_training_msgs = 0
- DO WHILE ( icurs .lt. obufsize ) !{
- !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
- hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
- SELECT CASE ( hdr_tag )
- CASE ( int_field )
- CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
- DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
- DomainDesc , MemoryOrder , Stagger , DimNames , &
- DomainStart , DomainEnd , &
- MemoryStart , MemoryEnd , &
- PatchStart , PatchEnd )
- chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
- (PatchEnd(3)-PatchStart(3)+1)*ftypesize
- IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
- IF ( num_field_training_msgs .EQ. 0 ) THEN
- call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
- !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- ENDIF
- num_field_training_msgs = num_field_training_msgs + 1
- ELSE
- call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
- !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- ENDIF
- icurs = icurs + hdrbufsize
- ! If this is a real write (i.e. not a training write), store
- ! this piece of this field.
- IF ( DomainDesc .NE. 333933 ) THEN ! magic number
- !write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
- call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
- icurs = icurs + chunksize
- ENDIF
- CASE ( int_open_for_write_commit ) ! only one per group of tasks
- hdrbufsize = obuf(icurs/itypesize)
- IF (num_commit_messages.EQ.0) THEN
- call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
- ENDIF
- num_commit_messages = num_commit_messages + 1
- icurs = icurs + hdrbufsize
- CASE DEFAULT
- hdrbufsize = obuf(icurs/itypesize)
- IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
- .OR.hdr_tag.NE.int_noop) THEN
- write(VarName,'(I5.5)')vid
- !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
- call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
- vid = vid+1
- ENDIF
- IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
- icurs = icurs + hdrbufsize
- END SELECT
- ENDDO !}
- ! Now, for each field, retrieve headers and patches (data) from the internal
- ! buffers and collect them all on the I/O quilt server "root" task.
- CALL init_retrieve_pieces_of_field
- ! Retrieve header and all patches for the first field from the internal
- ! buffers.
- CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
- ! Sum sizes of all headers and patches (data) for this field from all I/O
- ! servers in this I/O server group onto the I/O server "root".
- CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER, MPI_SUM, ntasks_local_group-1, mpi_comm_local, ierr )
- !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
- ! Loop until there are no more fields to retrieve from the internal buffers.
- DO WHILE ( retval ) !{
- #if 0
- #else
- ! I/O server "root" allocates space to collect headers and fields from all
- ! other servers in this I/O server group.
- IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
- ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
- ENDIF
- ! Collect buffers and fields from all I/O servers in this I/O server group
- ! onto the I/O server "root"
- CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName), &
- get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), &
- mpi_comm_local, &
- onebyte, &
- obuf, sz, &
- bigbuf, bigbufsize )
- ! The I/O server "root" now handles collected requests from all compute
- ! tasks served by this I/O server group (i.e. all compute tasks).
- IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
- !jjj = 4
- !do iii = 1, ntasks_local_group
- ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
- ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
- !enddo
- icurs = itypesize ! icurs is a byte counter, but buffer is integer
- stored_write_record = .false.
- ! The I/O server "root" loops over the collected requests.
- DO WHILE ( icurs .lt. bigbufsize ) !{
- CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
- !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
- ! The I/O server "root" gets the request out of the next header and
- ! handles it by, in most cases, calling the appropriate external I/O package
- ! interface.
- SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
- ! The I/O server "root" handles the "noop" (do nothing) request. This is
- ! actually quite easy. "Noop" requests exist to help avoid race conditions.
- ! In some cases, only one compute task will everything about a request so
- ! other compute tasks send "noop" requests.
- CASE ( int_noop )
- CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
- icurs = icurs + hdrbufsize
- ! The I/O server "root" handles the "put_dom_td_real" request.
- CASE ( int_dom_td_real )
- CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
- ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
- CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
- DataHandle, DateStr, Element, RData, Count, code )
- icurs = icurs + hdrbufsize
- SELECT CASE (use_package(io_form(DataHandle)))
- #ifdef NETCDF
- CASE ( IO_NETCDF )
- CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
- #endif
- #ifdef INTIO
- CASE ( IO_INTIO )
- CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
- #endif
- #ifdef YYY
- CASE ( IO_YYY )
- CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
- #endif
- #ifdef GRIB1
- CASE ( IO_GRIB1 )
- CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
- #endif
- #ifdef GRIB2
- CASE ( IO_GRIB2 )
- CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
- #endif
- CASE DEFAULT
- Status = 0
- END SELECT
- DEALLOCATE( RData )
- ! The I/O server "root" handles the "put_dom_ti_real" request.
- CASE ( int_dom_ti_real )
- !write(0,*)' int_dom_ti_real '
- CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
- ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
- CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
- DataHandle, Element, RData, Count, code )
- icurs = icurs + hdrbufsize
- SELECT CASE (use_package(io_form(DataHandle)))
- #ifdef NETCDF
- CASE ( IO_NETCDF )
- CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
- !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
- #endif
- #ifdef INTIO
- CASE ( IO_INTIO )
- CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
- #endif
- #ifdef YYY
- CASE ( IO_YYY )
- CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
- #endif
- #ifdef GRIB1
- CASE ( IO_GRIB1 )
- CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
- #endif
- #ifdef GRIB2
- CASE ( IO_GRIB2 )
- CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
- #endif
- CASE DEFAULT
- Status = 0
- END SELECT
- DEALLOCATE( RData )
- ! The I/O server "root" handles the "put_dom_td_integer" request.
- CASE ( int_dom_td_integer )
- !write(0,*)' int_dom_td_integer '
- CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
- ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
- CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
- DataHandle, DateStr, Element, IData, Count, code )
- icurs = icurs + hdrbufsize
- SELECT CASE (use_package(io_form(DataHandle)))
- #ifdef NETCDF
- CASE ( IO_NETCDF )
- CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
- #endif
- #ifdef INTIO
- CASE ( IO_INTIO )
- CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
- #endif
- #ifdef YYY
- CASE ( IO_YYY )
- CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
- #endif
- #ifdef GRIB1
- CASE ( IO_GRIB1 )
- CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
- #endif
- #ifdef GRIB2
- CASE ( IO_GRIB2 )
- CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
- #endif
- CASE DEFAULT
- Status = 0
- END SELECT
- DEALLOCATE( IData )
- ! The I/O server "root" handles the "put_dom_ti_integer" request.
- CASE ( int_dom_ti_integer )
- !write(0,*)' int_dom_ti_integer '
- CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
- ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
- CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
- DataHandle, Element, IData, Count, code )
- icurs = icurs + hdrbufsize
- SELECT CASE (use_package(io_form(DataHandle)))
- #ifdef NETCDF
- CASE ( IO_NETCDF )
- CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
- !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
- #endif
- #ifdef INTIO
- CASE ( IO_INTIO )
- CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
- #endif
- #ifdef YYY
- CASE ( IO_YYY )
- CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
- #endif
- #ifdef GRIB1
- CASE ( IO_GRIB1 )
- CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
- #endif
- #ifdef GRIB2
- CASE ( IO_GRIB2 )
- CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
- #endif
- CASE DEFAULT
- Status = 0
- END SELECT
- DEALLOCATE( IData)
-
- ! The I/O server "root" handles the "set_time" request.
- CASE ( int_set_time )
- !write(0,*)' int_set_time '
- CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
- DataHandle, Element, VarName, CData, code )
- SELECT CASE (use_package(io_form(DataHandle)))
- #ifdef INTIO
- CASE ( IO_INTIO )
- CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
- #endif
- …
Large files files are truncated, but you can click here to view the full file