PageRenderTime 67ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 2ms

/wrfv2_fire/frame/module_io_quilt.F

http://github.com/jbeezley/wrf-fire
FORTRAN Legacy | 5118 lines | 2434 code | 470 blank | 2214 comment | 109 complexity | f10468d5f1c7c19ff9c349ed06e1796e MD5 | raw file
Possible License(s): AGPL-1.0

Large files files are truncated, but you can click here to view the full file

  1. !WRF:DRIVER_LAYER:IO
  2. !
  3. #define DEBUG_LVL 50
  4. !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
  5. #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
  6. ! Workaround for bug in the IBM MPI implementation. Look near the
  7. ! bottom of this file for an explanation.
  8. #ifdef IBM_REDUCE_BUG_WORKAROUND
  9. #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) reduce_add_integer(sb,rb,c,r,com)
  10. #else
  11. #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) MPI_Reduce(sb,rb,c,dt,op,r,com,ierr)
  12. #endif
  13. MODULE module_wrf_quilt
  14. !<DESCRIPTION>
  15. !<PRE>
  16. ! This module contains WRF-specific I/O quilt routines called by both
  17. ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
  18. ! a run-time optimization that allow I/O operations, executed on the I/O
  19. ! quilt server tasks, to be overlapped with useful computation, executed on
  20. ! the compute tasks. Since I/O operations are often quite slow compared to
  21. ! computation, this performance optimization can increase parallel
  22. ! efficiency.
  23. !
  24. ! Currently, one group of I/O servers can be specified at run-time. Namelist
  25. ! variable "nio_tasks_per_group" is used to specify the number of I/O server
  26. ! tasks in this group. In most cases, parallel efficiency is optimized when
  27. ! the minimum number of I/O server tasks are used. If memory needed to cache
  28. ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
  29. ! If not, increase the number of I/O server tasks until I/O operations fit in
  30. ! memory. In the future, multiple groups of I/O server tasks will be
  31. ! supported. The number of groups will be specified by namelist variable
  32. ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
  33. ! only support overlap of output operations with computation. Also, only I/O
  34. ! packages that do no support native parallel I/O may be used with I/O server
  35. ! tasks. This excludes PHDF5 and MCEL.
  36. !
  37. ! In this module, the I/O quilt server tasks call package-dependent
  38. ! WRF-specific I/O interfaces to perform I/O operations requested by the
  39. ! client (compute) tasks. All of these calls occur inside subroutine
  40. ! quilt().
  41. !
  42. ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
  43. ! interfaces that send requests to the I/O quilt servers. All of these calls
  44. ! are made from module_io.F.
  45. !
  46. ! These routines have the same names and (roughly) the same arguments as those
  47. ! specified in the WRF I/O API except that:
  48. ! - "Quilt I/O" routines defined in this file and called by routines in
  49. ! module_io.F have the "wrf_quilt_" prefix.
  50. ! - Package-dependent routines called from routines in this file are defined
  51. ! in the external I/O packages and have the "ext_" prefix.
  52. !
  53. ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
  54. ! which then calls setup_quilt_servers() determine which tasks are compute
  55. ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
  56. ! server tasks call routine quilt() and remain there for the rest of the model
  57. ! run. Compute tasks return from init_module_wrf_quilt() to perform model
  58. ! computations.
  59. !
  60. ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
  61. ! version of the WRF I/O API. This document includes detailed descriptions
  62. ! of subroutines and their arguments that are not duplicated here.
  63. !</PRE>
  64. !</DESCRIPTION>
  65. USE module_internal_header_util
  66. USE module_timing
  67. INTEGER, PARAMETER :: int_num_handles = 99
  68. INTEGER, PARAMETER :: max_servers = int_num_handles+1 ! why +1?
  69. LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
  70. INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
  71. REAL, POINTER,SAVE :: int_local_output_buffer(:)
  72. INTEGER, SAVE :: int_local_output_cursor
  73. LOGICAL :: quilting_enabled
  74. LOGICAL :: disable_quilt = .FALSE.
  75. INTEGER :: prev_server_for_handle = -1
  76. INTEGER :: server_for_handle(int_num_handles)
  77. INTEGER :: reduced(2), reduced_dummy(2)
  78. LOGICAL, EXTERNAL :: wrf_dm_on_monitor
  79. INTEGER :: mpi_comm_avail,availrank
  80. LOGICAL :: in_avail=.false., poll_servers=.false.
  81. INTEGER nio_groups
  82. #ifdef DM_PARALLEL
  83. INTEGER :: mpi_comm_local
  84. LOGICAL :: compute_node
  85. LOGICAL :: compute_group_master(max_servers)
  86. INTEGER :: mpi_comm_io_groups(max_servers)
  87. INTEGER :: nio_tasks_in_group
  88. INTEGER :: nio_tasks_per_group
  89. INTEGER :: ncompute_tasks
  90. INTEGER :: ntasks
  91. INTEGER :: mytask
  92. INTEGER, PARAMETER :: onebyte = 1
  93. INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
  94. INTEGER, DIMENSION(4096) :: hdrbuf
  95. INTEGER, DIMENSION(int_num_handles) :: handle
  96. #endif
  97. #ifdef IBM_REDUCE_BUG_WORKAROUND
  98. ! Workaround for bug in the IBM MPI implementation. Look near the
  99. ! bottom of this file for an explanation.
  100. interface reduce_add_integer
  101. module procedure reduce_add_int_arr
  102. module procedure reduce_add_int_scl
  103. end interface
  104. #endif
  105. CONTAINS
  106. #if defined(DM_PARALLEL) && !defined( STUBMPI )
  107. INTEGER FUNCTION get_server_id ( dhandle )
  108. !<DESCRIPTION>
  109. ! Logic in the client side to know which io server
  110. ! group to send to. If the unit corresponds to a file that's
  111. ! already been opened, then we have no choice but to send the
  112. ! data to that group again, regardless of whether there are
  113. ! other server-groups. If it's a new file, we can chose a new
  114. ! server group. I.e. opening a file locks it onto a server
  115. ! group. Closing the file unlocks it.
  116. !</DESCRIPTION>
  117. IMPLICIT NONE
  118. INTEGER, INTENT(IN) :: dhandle
  119. IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
  120. IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
  121. get_server_id = server_for_handle ( dhandle )
  122. ELSE
  123. IF(poll_servers) THEN
  124. ! Poll server group masters to find an inactive I/O server group:
  125. call wrf_quilt_find_server(server_for_handle(dhandle))
  126. ELSE
  127. ! Server polling is disabled, so cycle through servers:
  128. prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
  129. server_for_handle( dhandle ) = prev_server_for_handle+1
  130. ENDIF
  131. get_server_id=server_for_handle(dhandle)
  132. ENDIF
  133. ELSE
  134. CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
  135. ENDIF
  136. END FUNCTION get_server_id
  137. #endif
  138. SUBROUTINE set_server_id ( dhandle, value )
  139. IMPLICIT NONE
  140. INTEGER, INTENT(IN) :: dhandle, value
  141. IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
  142. server_for_handle(dhandle) = value
  143. ELSE
  144. CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
  145. ENDIF
  146. END SUBROUTINE set_server_id
  147. LOGICAL FUNCTION get_poll_servers()
  148. implicit none
  149. get_poll_servers=poll_servers
  150. end FUNCTION get_poll_servers
  151. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  152. SUBROUTINE int_get_fresh_handle( retval )
  153. !<DESCRIPTION>
  154. ! Find an unused "client file handle" and return it in retval.
  155. ! The "client file handle" is used to remember how a file was opened
  156. ! so clients do not need to ask the I/O quilt servers for this information.
  157. ! It is also used as a file identifier in communications with the I/O
  158. ! server task.
  159. !
  160. ! Note that client tasks know nothing about package-specific handles.
  161. ! Only the I/O quilt servers know about them.
  162. !</DESCRIPTION>
  163. INTEGER i, retval
  164. retval = -1
  165. DO i = 1, int_num_handles
  166. IF ( .NOT. int_handle_in_use(i) ) THEN
  167. retval = i
  168. GOTO 33
  169. ENDIF
  170. ENDDO
  171. 33 CONTINUE
  172. IF ( retval < 0 ) THEN
  173. CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
  174. ENDIF
  175. int_handle_in_use(i) = .TRUE.
  176. NULLIFY ( int_local_output_buffer )
  177. END SUBROUTINE int_get_fresh_handle
  178. SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
  179. mytask, &
  180. ntasks, &
  181. n_groups_arg, &
  182. nio, &
  183. mpi_comm_wrld, &
  184. mpi_comm_local, &
  185. mpi_comm_io_groups)
  186. !<DESCRIPTION>
  187. ! Both client (compute) and server tasks call this routine to
  188. ! determine which tasks are compute tasks and which are I/O server tasks.
  189. !
  190. ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
  191. ! contain MPI communicators as follows:
  192. !
  193. ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
  194. ! compute tasks it is the group of compute tasks; for a server group it the
  195. ! communicator of tasks in the server group.
  196. !
  197. ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
  198. ! more compute tasks and a single I/O server assigned to those compute tasks.
  199. ! The I/O server tasks is always the last task in these communicators.
  200. ! On a compute task, which has a single associate in each of the server
  201. ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
  202. ! to a different server group.
  203. ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
  204. ! because each server task is part of only one io_group.
  205. !
  206. ! I/O server tasks in each I/O server group are divided among compute tasks as
  207. ! evenly as possible.
  208. !
  209. ! When multiple I/O server groups are used, each must have the same number of
  210. ! tasks. When the total number of extra I/O tasks does not divide evenly by
  211. ! the number of io server groups requested, the remainder tasks are not used
  212. ! (wasted).
  213. !
  214. ! For example, communicator membership for 18 tasks with nio_groups=2 and
  215. ! nio_tasks_per_group=3 is shown below:
  216. !
  217. !<PRE>
  218. ! Membership for MPI_COMM_LOCAL communicators:
  219. ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
  220. ! 1ST I/O SERVER GROUP: 12 13 14
  221. ! 2ND I/O SERVER GROUP: 15 16 17
  222. !
  223. ! Membership for MPI_COMM_IO_GROUPS(1):
  224. ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
  225. ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
  226. ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
  227. ! I/O SERVER TASK 12: 0 3 6 9 12
  228. ! I/O SERVER TASK 13: 1 4 7 10 13
  229. ! I/O SERVER TASK 14: 2 5 8 11 14
  230. ! I/O SERVER TASK 15: 0 3 6 9 15
  231. ! I/O SERVER TASK 16: 1 4 7 10 16
  232. ! I/O SERVER TASK 17: 2 5 8 11 17
  233. !
  234. ! Membership for MPI_COMM_IO_GROUPS(2):
  235. ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
  236. ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
  237. ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
  238. ! I/O SERVER TASK 12: ** not used **
  239. ! I/O SERVER TASK 13: ** not used **
  240. ! I/O SERVER TASK 14: ** not used **
  241. ! I/O SERVER TASK 15: ** not used **
  242. ! I/O SERVER TASK 16: ** not used **
  243. ! I/O SERVER TASK 17: ** not used **
  244. !</PRE>
  245. !</DESCRIPTION>
  246. USE module_configure
  247. #ifdef DM_PARALLEL
  248. USE module_dm, ONLY : compute_mesh
  249. #endif
  250. IMPLICIT NONE
  251. INCLUDE 'mpif.h'
  252. INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
  253. n_groups_arg, mpi_comm_wrld
  254. INTEGER, INTENT(OUT) :: mpi_comm_local, nio
  255. INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
  256. ! Local
  257. INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
  258. INTEGER, DIMENSION(ntasks) :: icolor
  259. CHARACTER*128 mess
  260. INTEGER :: io_form_setting
  261. INTEGER :: me
  262. INTEGER :: k, m, nprocx, nprocy
  263. LOGICAL :: reorder_mesh
  264. !check the namelist and make sure there are no output forms specified
  265. !that cannot be quilted
  266. CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
  267. CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
  268. CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
  269. CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
  270. CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
  271. CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
  272. CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
  273. CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
  274. CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
  275. CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
  276. CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
  277. CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
  278. CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
  279. n_groups = n_groups_arg
  280. IF ( n_groups .LT. 1 ) n_groups = 1
  281. compute_node = .TRUE.
  282. !<DESCRIPTION>
  283. ! nio is number of io tasks per group. If there arent enough tasks to satisfy
  284. ! the requirement that there be at least as many compute tasks as io tasks in
  285. ! each group, then just print a warning and dump out of quilting
  286. !</DESCRIPTION>
  287. nio = nio_tasks_per_group
  288. ncompute_tasks = ntasks - (nio * n_groups)
  289. IF ( ncompute_tasks .LT. nio ) THEN
  290. WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
  291. nio = 0
  292. ncompute_tasks = ntasks
  293. ELSE
  294. WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
  295. ENDIF
  296. CALL wrf_message(mess)
  297. IF ( nio .LT. 0 ) THEN
  298. nio = 0
  299. ENDIF
  300. IF ( nio .EQ. 0 ) THEN
  301. quilting_enabled = .FALSE.
  302. mpi_comm_local = mpi_comm_wrld
  303. mpi_comm_io_groups = mpi_comm_wrld
  304. RETURN
  305. ENDIF
  306. quilting_enabled = .TRUE.
  307. ! First construct the local communicators
  308. ! prepare to split the communicator by designating compute-only tasks
  309. DO i = 1, ncompute_tasks
  310. icolor(i) = 0
  311. ENDDO
  312. ii = 1
  313. ! and designating the groups of i/o tasks
  314. DO i = ncompute_tasks+1, ntasks, nio
  315. DO j = i, i+nio-1
  316. icolor(j) = ii
  317. ENDDO
  318. ii = ii+1
  319. ENDDO
  320. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  321. CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
  322. ! Now construct the communicators for the io_groups
  323. CALL nl_get_reorder_mesh(1,reorder_mesh)
  324. IF ( reorder_mesh ) THEN
  325. reorder_mesh = .FALSE.
  326. CALL nl_set_reorder_mesh(1,reorder_mesh)
  327. CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
  328. ENDIF
  329. ! assign the compute tasks to the i/o tasks in full rows
  330. CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
  331. nio = min(nio,nprocy)
  332. m = mod(nprocy,nio) ! divide up remainder, 1 row per, until gone
  333. ii = 1
  334. DO j = 1, nio, 1
  335. DO k = 1,nprocy/nio+min(m,1)
  336. DO i = 1, nprocx
  337. icolor(ii) = j - 1
  338. ii = ii + 1
  339. ENDDO
  340. ENDDO
  341. m = max(m-1,0)
  342. ENDDO
  343. ! ... and add the io servers as the last task in each group
  344. DO j = 1, n_groups
  345. ! TBH: each I/O group will contain only one I/O server
  346. DO i = ncompute_tasks+1,ntasks
  347. icolor(i) = MPI_UNDEFINED
  348. ENDDO
  349. ii = 0
  350. DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
  351. icolor(i) = ii
  352. ii = ii+1
  353. ENDDO
  354. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  355. CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
  356. mpi_comm_io_groups(j),ierr)
  357. ENDDO
  358. #ifdef PNETCDF_QUILT
  359. if(poll_servers) then
  360. poll_servers=.false.
  361. call wrf_message('Warning: server polling does not work with pnetcdf_quilt. Disabled poll_servers.')
  362. else
  363. #endif
  364. if(nio_groups==1) then
  365. poll_servers=.false.
  366. call wrf_message('Server polling is useless with one io group. Disabled poll_servers.')
  367. endif
  368. #ifdef PNETCDF_QUILT
  369. endif
  370. #endif
  371. if(poll_servers) then
  372. ! If server polling is enabled, we need to create mpi_comm_avail,
  373. ! which contains the monitor process, and the I/O server master process
  374. ! for each I/O server group. This will be used in the routines
  375. ! wrf_quilt_find_server and wrf_quilt_server_ready to find inactive
  376. ! I/O servers for new data handles in get_server_id.
  377. ! The "in_avail" is set to true iff I am in the mpi_comm_avail.
  378. call mpi_comm_rank(mpi_comm_wrld,me,ierr)
  379. icolor=MPI_UNDEFINED
  380. in_avail=.false.
  381. if(wrf_dm_on_monitor()) then
  382. in_avail=.true. ! monitor process is in mpi_comm_avail
  383. endif
  384. icolor(1)=1
  385. do j=1,n_groups
  386. i=ncompute_tasks+j*nio-1
  387. if(me+1==i) then
  388. in_avail=.true. ! I/O server masters are in mpi_comm_avail
  389. endif
  390. icolor(i)=1
  391. enddo
  392. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  393. CALL MPI_Comm_split(comdup,icolor(me+1),me, &
  394. mpi_comm_avail,ierr)
  395. availrank=MPI_UNDEFINED
  396. if(in_avail) then
  397. call mpi_comm_rank(mpi_comm_avail,availrank,ierr)
  398. endif
  399. endif
  400. compute_group_master = .FALSE.
  401. compute_node = .FALSE.
  402. DO j = 1, n_groups
  403. IF ( mytask .LT. ncompute_tasks .OR. & ! I am a compute task
  404. (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) & ! I am the I/O server for this group
  405. ) THEN
  406. CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
  407. ! Get the rank of this compute task in the compute+io
  408. ! communicator to which it belongs
  409. CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
  410. ! If I am an I/O server for this group then make that group's
  411. ! communicator the first element in the mpi_comm_io_groups array
  412. ! (I will ignore all of the other elements).
  413. IF (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) THEN
  414. mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
  415. ELSE
  416. compute_node = .TRUE.
  417. ! If I am a compute task, check whether I am the member of my
  418. ! group that will communicate things that should be sent just
  419. ! once (e.g. commands) to the IO server of my group.
  420. compute_group_master(j) = (me .EQ. 0)
  421. ! IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
  422. ENDIF
  423. ENDIF
  424. ENDDO
  425. END SUBROUTINE setup_quilt_servers
  426. SUBROUTINE sokay ( stream, io_form )
  427. USE module_state_description
  428. CHARACTER*(*) stream
  429. CHARACTER*256 mess
  430. INTEGER io_form
  431. SELECT CASE (io_form)
  432. #ifdef NETCDF
  433. CASE ( IO_NETCDF )
  434. RETURN
  435. #endif
  436. #ifdef INTIO
  437. CASE ( IO_INTIO )
  438. RETURN
  439. #endif
  440. #ifdef YYY
  441. CASE ( IO_YYY )
  442. RETURN
  443. #endif
  444. #ifdef GRIB1
  445. CASE ( IO_GRIB1 )
  446. RETURN
  447. #endif
  448. #ifdef GRIB2
  449. CASE ( IO_GRIB2 )
  450. RETURN
  451. #endif
  452. CASE (0)
  453. RETURN
  454. CASE DEFAULT
  455. WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
  456. CALL wrf_error_fatal(mess)
  457. END SELECT
  458. END SUBROUTINE sokay
  459. SUBROUTINE quilt
  460. !<DESCRIPTION>
  461. ! I/O server tasks call this routine and remain in it for the rest of the
  462. ! model run. I/O servers receive I/O requests from compute tasks and
  463. ! perform requested I/O operations by calling package-dependent WRF-specific
  464. ! I/O interfaces. Requests are sent in the form of "data headers". Each
  465. ! request has a unique "header" message associated with it. For requests that
  466. ! contain large amounts of data, the data is appended to the header. See
  467. ! file module_internal_header_util.F for detailed descriptions of all
  468. ! headers.
  469. !
  470. ! We wish to be able to link to different packages depending on whether
  471. ! the I/O is restart, initial, history, or boundary.
  472. !</DESCRIPTION>
  473. USE module_state_description
  474. USE module_quilt_outbuf_ops
  475. IMPLICIT NONE
  476. INCLUDE 'mpif.h'
  477. #include "intio_tags.h"
  478. #include "wrf_io_flags.h"
  479. INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
  480. INTEGER istat
  481. INTEGER mytask_io_group
  482. INTEGER :: nout_set = 0
  483. INTEGER :: obufsize, bigbufsize, chunksize, sz
  484. REAL, DIMENSION(1) :: dummy
  485. INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
  486. REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
  487. INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
  488. CHARACTER (LEN=512) :: CDATA
  489. CHARACTER (LEN=80) :: fname
  490. INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
  491. INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
  492. INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
  493. INTEGER :: dummybuf(1)
  494. INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
  495. CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
  496. INTEGER, EXTERNAL :: use_package
  497. LOGICAL :: stored_write_record, retval
  498. INTEGER iii, jjj, vid, CC, DD
  499. LOGICAL :: call_server_ready
  500. logical okay_to_w
  501. character*120 sysline
  502. ! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
  503. ! within the group of I/O servers rather than gathering the data onto the
  504. ! root I/O server. Unfortunately, this approach means that we can no-longer
  505. ! select different I/O layers for use with quilting at run time. ARPDBG.
  506. ! This code is sufficiently different that it is kept in the separate
  507. ! quilt_pnc() routine.
  508. #ifdef PNETCDF_QUILT
  509. CALL quilt_pnc()
  510. RETURN
  511. #endif
  512. ! Call ext_pkg_ioinit() routines to initialize I/O packages.
  513. SysDepInfo = " "
  514. #ifdef NETCDF
  515. CALL ext_ncd_ioinit( SysDepInfo, ierr)
  516. #endif
  517. #ifdef INTIO
  518. CALL ext_int_ioinit( SysDepInfo, ierr )
  519. #endif
  520. #ifdef XXX
  521. CALL ext_xxx_ioinit( SysDepInfo, ierr)
  522. #endif
  523. #ifdef YYY
  524. CALL ext_yyy_ioinit( SysDepInfo, ierr)
  525. #endif
  526. #ifdef ZZZ
  527. CALL ext_zzz_ioinit( SysDepInfo, ierr)
  528. #endif
  529. #ifdef GRIB1
  530. CALL ext_gr1_ioinit( SysDepInfo, ierr)
  531. #endif
  532. #ifdef GRIB2
  533. CALL ext_gr2_ioinit( SysDepInfo, ierr)
  534. #endif
  535. call_server_ready = .true. ! = true when the server is ready for a new file
  536. okay_to_commit = .false.
  537. stored_write_record = .false.
  538. ninbuf = 0
  539. ! get info. about the I/O server group that this I/O server task
  540. ! belongs to
  541. ! Last task in this I/O server group is the I/O server "root"
  542. ! The I/O server "root" actually writes data to disk
  543. ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
  544. CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
  545. CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
  546. CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
  547. CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
  548. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  549. IF ( itypesize <= 0 ) THEN
  550. CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
  551. ENDIF
  552. ! Work out whether this i/o server processor has one fewer associated compute proc than
  553. ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
  554. ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
  555. ! same message when they start commmunicating to stitch together an output.
  556. !
  557. ! Compute processes associated with this task:
  558. CC = ntasks_io_group - 1
  559. ! Number of compute tasks per I/O task (less remainder)
  560. DD = ncompute_tasks / ntasks_local_group
  561. !
  562. ! If CC-DD is 1 on servrs with the maximum number of compute clients,
  563. ! 0 on servrs with one less than maximum
  564. ! infinite loop until shutdown message received
  565. ! This is the main request-handling loop. I/O quilt servers stay in this loop
  566. ! until the model run ends.
  567. okay_to_w = .false.
  568. DO WHILE (.TRUE.) ! {
  569. !<DESCRIPTION>
  570. ! Each I/O server receives requests from its compute tasks. Each request
  571. ! is contained in a data header (see module_internal_header_util.F for
  572. ! detailed descriptions of data headers).
  573. ! Each request is sent in two phases. First, sizes of all messages that
  574. ! will be sent from the compute tasks to this I/O server are summed on the
  575. ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
  576. ! and receives concatenated messages from the compute tasks in it via the
  577. ! call to collect_on_comm(). Note that "sizes" are generally expressed in
  578. ! *bytes* in this code so conversion to "count" (number of Fortran words) is
  579. ! required for Fortran indexing and MPI calls.
  580. !</DESCRIPTION>
  581. if(poll_servers .and. call_server_ready) then
  582. call_server_ready=.false.
  583. ! Send a message to the monitor telling it we're ready
  584. ! for a new data handle.
  585. call wrf_quilt_server_ready()
  586. endif
  587. ! wait for info from compute tasks in the I/O group that we're ready to rock
  588. ! obufsize will contain number of *bytes*
  589. !CALL start_timing()
  590. ! first element of reduced is obufsize, second is DataHandle
  591. ! if needed (currently needed only for ioclose).
  592. reduced_dummy = 0
  593. CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
  594. obufsize = reduced(1)
  595. !CALL end_timing("MPI_Reduce at top of forever loop")
  596. !JMDEBUGwrite(0,*)'obufsize = ',obufsize
  597. ! Negative obufsize will trigger I/O server exit.
  598. IF ( obufsize .LT. 0 ) THEN
  599. IF ( obufsize .EQ. -100 ) THEN ! magic number
  600. #ifdef NETCDF
  601. CALL ext_ncd_ioexit( Status )
  602. #endif
  603. #ifdef INTIO
  604. CALL ext_int_ioexit( Status )
  605. #endif
  606. #ifdef XXX
  607. CALL ext_xxx_ioexit( Status )
  608. #endif
  609. #ifdef YYY
  610. CALL ext_yyy_ioexit( Status )
  611. #endif
  612. #ifdef ZZZ
  613. CALL ext_zzz_ioexit( Status )
  614. #endif
  615. #ifdef GRIB1
  616. CALL ext_gr1_ioexit( Status )
  617. #endif
  618. #ifdef GRIB2
  619. CALL ext_gr2_ioexit( Status )
  620. #endif
  621. CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
  622. CALL mpi_finalize(ierr)
  623. STOP
  624. ELSE
  625. WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
  626. CALL wrf_error_fatal(mess)
  627. ENDIF
  628. ENDIF
  629. ! CALL start_timing()
  630. ! Obufsize of zero signals a close
  631. ! Allocate buffer obuf to be big enough for the data the compute tasks
  632. ! will send. Note: obuf is size in *bytes* so we need to pare this
  633. ! down, since the buffer is INTEGER.
  634. IF ( obufsize .GT. 0 ) THEN
  635. ALLOCATE( obuf( (obufsize+1)/itypesize ) )
  636. ! let's roll; get the data from the compute procs and put in obuf
  637. CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
  638. onebyte, &
  639. dummy, 0, &
  640. obuf, obufsize )
  641. ! CALL end_timing( "quilt on server: collecting data from compute procs" )
  642. ELSE
  643. ! Necessarily, the compute processes send the ioclose signal,
  644. ! if there is one, after the iosync, which means they
  645. ! will stall on the ioclose message waiting for the quilt
  646. ! processes if we handle the way other messages are collected,
  647. ! using collect_on_comm. This avoids this, but we need
  648. ! a special signal (obufsize zero) and the DataHandle
  649. ! to be closed. That handle is send as the second
  650. ! word of the io_close message received by the MPI_Reduce above.
  651. ! Then a header representing the ioclose message is constructed
  652. ! here and handled below as if it were received from the
  653. ! compute processes. The clients (compute processes) must be
  654. ! careful to send this correctly (one compule process sends the actual
  655. ! handle and everone else sends a zero, so the result sums to
  656. ! the value of the handle).
  657. !
  658. ALLOCATE( obuf( 4096 ) )
  659. ! DataHandle is provided as second element of reduced
  660. CALL int_gen_handle_header( obuf, obufsize, itypesize, &
  661. reduced(2) , int_ioclose )
  662. if(poll_servers) then
  663. ! Once we're done closing, we need to tell the master
  664. ! process that we're ready for more data.
  665. call_server_ready=.true.
  666. endif
  667. ENDIF
  668. !write(0,*)'calling init_store_piece_of_field'
  669. ! Now all messages received from the compute clients are stored in
  670. ! obuf. Scan through obuf and extract headers and field data and store in
  671. ! internal buffers. The scan is done twice, first to determine sizes of
  672. ! internal buffers required for storage of headers and fields and second to
  673. ! actually store the headers and fields. This bit of code does not do the
  674. ! "quilting" (assembly of patches into full domains). For each field, it
  675. ! simply concatenates all received patches for the field into a separate
  676. ! internal buffer (i.e. one buffer per field). Quilting is done later by
  677. ! routine store_patch_in_outbuf().
  678. CALL init_store_piece_of_field
  679. CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
  680. !write(0,*)'mpi_type_size returns ', itypesize
  681. ! Scan obuf the first time to calculate the size of the buffer required for
  682. ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
  683. vid = 0
  684. icurs = itypesize
  685. num_noops = 0
  686. num_commit_messages = 0
  687. num_field_training_msgs = 0
  688. DO WHILE ( icurs .lt. obufsize ) ! {
  689. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  690. SELECT CASE ( hdr_tag )
  691. CASE ( int_field )
  692. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  693. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  694. DomainDesc , MemoryOrder , Stagger , DimNames , &
  695. DomainStart , DomainEnd , &
  696. MemoryStart , MemoryEnd , &
  697. PatchStart , PatchEnd )
  698. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  699. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  700. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  701. IF ( num_field_training_msgs .EQ. 0 ) THEN
  702. call add_to_bufsize_for_field( VarName, hdrbufsize )
  703. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  704. ENDIF
  705. num_field_training_msgs = num_field_training_msgs + 1
  706. ELSE
  707. call add_to_bufsize_for_field( VarName, hdrbufsize )
  708. !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  709. ENDIF
  710. icurs = icurs + hdrbufsize
  711. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  712. ! If this is a real write (i.e. not a training write), accumulate
  713. ! buffersize for this field.
  714. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  715. !write(0,*) 'X-1a', chunksize, TRIM(VarName)
  716. call add_to_bufsize_for_field( VarName, chunksize )
  717. icurs = icurs + chunksize
  718. ENDIF
  719. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  720. hdrbufsize = obuf(icurs/itypesize)
  721. IF (num_commit_messages.EQ.0) THEN
  722. call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
  723. ENDIF
  724. num_commit_messages = num_commit_messages + 1
  725. icurs = icurs + hdrbufsize
  726. CASE DEFAULT
  727. hdrbufsize = obuf(icurs/itypesize)
  728. ! This logic and the logic in the loop below is used to determine whether
  729. ! to send a noop records sent by the compute processes to allow to go
  730. ! through. The purpose is to make sure that the communications between this
  731. ! server and the other servers in this quilt group stay synchronized in
  732. ! the collection loop below, even when the servers are serving different
  733. ! numbers of clients. Here are some conditions:
  734. !
  735. ! 1. The number of compute clients served will not differ by more than 1
  736. ! 2. The servers with +1 number of compute clients begin with task 0
  737. ! of mpi_comm_local, the commicator shared by this group of servers
  738. !
  739. ! 3. For each collective field or metadata output from the compute tasks,
  740. ! there will be one record sent to the associated i/o server task. The
  741. ! i/o server task collects these records and stores them contiguously
  742. ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
  743. ! server task will contain one record from each associated compute
  744. ! task, in order.
  745. !
  746. ! 4. In the case of replicated output from the compute tasks
  747. ! (e.g. put_dom_ti records and control records like
  748. ! open_for_write_commit type records), compute task 0 is the only
  749. ! one that sends the record. The other compute tasks send noop
  750. ! records. Thus, obuf on server task zero will contain the output
  751. ! record from task 0 followed by noop records from the rest of the
  752. ! compute tasks associated with task 0. Obuf on the other server
  753. ! tasks will contain nothing but noop records.
  754. !
  755. ! 5. The logic below will not allow any noop records from server task 0.
  756. ! It allows only one noop record from each of the other server tasks
  757. ! in the i/o group. This way, for replicated output, when the records
  758. ! are collected on one server task below, using collect_on_comm on
  759. ! mpi_comm_local, each task will provide exactly one record for each
  760. ! call to collect_on_comm: 1 bona fide output record from server task
  761. ! 0 and noops from the rest.
  762. IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
  763. .OR.hdr_tag.NE.int_noop) THEN
  764. write(VarName,'(I5.5)')vid
  765. !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  766. call add_to_bufsize_for_field( VarName, hdrbufsize )
  767. vid = vid+1
  768. ENDIF
  769. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  770. icurs = icurs + hdrbufsize
  771. END SELECT
  772. ENDDO ! }
  773. ! Store the headers and field data in internal buffers. The first call to
  774. ! store_piece_of_field() allocates internal buffers using sizes computed by
  775. ! calls to add_to_bufsize_for_field().
  776. vid = 0
  777. icurs = itypesize
  778. num_noops = 0
  779. num_commit_messages = 0
  780. num_field_training_msgs = 0
  781. DO WHILE ( icurs .lt. obufsize ) !{
  782. !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
  783. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  784. SELECT CASE ( hdr_tag )
  785. CASE ( int_field )
  786. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  787. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  788. DomainDesc , MemoryOrder , Stagger , DimNames , &
  789. DomainStart , DomainEnd , &
  790. MemoryStart , MemoryEnd , &
  791. PatchStart , PatchEnd )
  792. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  793. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  794. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  795. IF ( num_field_training_msgs .EQ. 0 ) THEN
  796. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  797. !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  798. ENDIF
  799. num_field_training_msgs = num_field_training_msgs + 1
  800. ELSE
  801. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  802. !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  803. ENDIF
  804. icurs = icurs + hdrbufsize
  805. ! If this is a real write (i.e. not a training write), store
  806. ! this piece of this field.
  807. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  808. !write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
  809. call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
  810. icurs = icurs + chunksize
  811. ENDIF
  812. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  813. hdrbufsize = obuf(icurs/itypesize)
  814. IF (num_commit_messages.EQ.0) THEN
  815. call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
  816. ENDIF
  817. num_commit_messages = num_commit_messages + 1
  818. icurs = icurs + hdrbufsize
  819. CASE DEFAULT
  820. hdrbufsize = obuf(icurs/itypesize)
  821. IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
  822. .OR.hdr_tag.NE.int_noop) THEN
  823. write(VarName,'(I5.5)')vid
  824. !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  825. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  826. vid = vid+1
  827. ENDIF
  828. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  829. icurs = icurs + hdrbufsize
  830. END SELECT
  831. ENDDO !}
  832. ! Now, for each field, retrieve headers and patches (data) from the internal
  833. ! buffers and collect them all on the I/O quilt server "root" task.
  834. CALL init_retrieve_pieces_of_field
  835. ! Retrieve header and all patches for the first field from the internal
  836. ! buffers.
  837. CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
  838. ! Sum sizes of all headers and patches (data) for this field from all I/O
  839. ! servers in this I/O server group onto the I/O server "root".
  840. CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER, MPI_SUM, ntasks_local_group-1, mpi_comm_local, ierr )
  841. !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
  842. ! Loop until there are no more fields to retrieve from the internal buffers.
  843. DO WHILE ( retval ) !{
  844. #if 0
  845. #else
  846. ! I/O server "root" allocates space to collect headers and fields from all
  847. ! other servers in this I/O server group.
  848. IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
  849. ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
  850. ENDIF
  851. ! Collect buffers and fields from all I/O servers in this I/O server group
  852. ! onto the I/O server "root"
  853. CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName), &
  854. get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), &
  855. mpi_comm_local, &
  856. onebyte, &
  857. obuf, sz, &
  858. bigbuf, bigbufsize )
  859. ! The I/O server "root" now handles collected requests from all compute
  860. ! tasks served by this I/O server group (i.e. all compute tasks).
  861. IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
  862. !jjj = 4
  863. !do iii = 1, ntasks_local_group
  864. ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
  865. ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
  866. !enddo
  867. icurs = itypesize ! icurs is a byte counter, but buffer is integer
  868. stored_write_record = .false.
  869. ! The I/O server "root" loops over the collected requests.
  870. DO WHILE ( icurs .lt. bigbufsize ) !{
  871. CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
  872. !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
  873. ! The I/O server "root" gets the request out of the next header and
  874. ! handles it by, in most cases, calling the appropriate external I/O package
  875. ! interface.
  876. SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
  877. ! The I/O server "root" handles the "noop" (do nothing) request. This is
  878. ! actually quite easy. "Noop" requests exist to help avoid race conditions.
  879. ! In some cases, only one compute task will everything about a request so
  880. ! other compute tasks send "noop" requests.
  881. CASE ( int_noop )
  882. CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
  883. icurs = icurs + hdrbufsize
  884. ! The I/O server "root" handles the "put_dom_td_real" request.
  885. CASE ( int_dom_td_real )
  886. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  887. ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  888. CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  889. DataHandle, DateStr, Element, RData, Count, code )
  890. icurs = icurs + hdrbufsize
  891. SELECT CASE (use_package(io_form(DataHandle)))
  892. #ifdef NETCDF
  893. CASE ( IO_NETCDF )
  894. CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  895. #endif
  896. #ifdef INTIO
  897. CASE ( IO_INTIO )
  898. CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  899. #endif
  900. #ifdef YYY
  901. CASE ( IO_YYY )
  902. CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  903. #endif
  904. #ifdef GRIB1
  905. CASE ( IO_GRIB1 )
  906. CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  907. #endif
  908. #ifdef GRIB2
  909. CASE ( IO_GRIB2 )
  910. CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  911. #endif
  912. CASE DEFAULT
  913. Status = 0
  914. END SELECT
  915. DEALLOCATE( RData )
  916. ! The I/O server "root" handles the "put_dom_ti_real" request.
  917. CASE ( int_dom_ti_real )
  918. !write(0,*)' int_dom_ti_real '
  919. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  920. ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  921. CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  922. DataHandle, Element, RData, Count, code )
  923. icurs = icurs + hdrbufsize
  924. SELECT CASE (use_package(io_form(DataHandle)))
  925. #ifdef NETCDF
  926. CASE ( IO_NETCDF )
  927. CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  928. !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
  929. #endif
  930. #ifdef INTIO
  931. CASE ( IO_INTIO )
  932. CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  933. #endif
  934. #ifdef YYY
  935. CASE ( IO_YYY )
  936. CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  937. #endif
  938. #ifdef GRIB1
  939. CASE ( IO_GRIB1 )
  940. CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  941. #endif
  942. #ifdef GRIB2
  943. CASE ( IO_GRIB2 )
  944. CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  945. #endif
  946. CASE DEFAULT
  947. Status = 0
  948. END SELECT
  949. DEALLOCATE( RData )
  950. ! The I/O server "root" handles the "put_dom_td_integer" request.
  951. CASE ( int_dom_td_integer )
  952. !write(0,*)' int_dom_td_integer '
  953. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  954. ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  955. CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  956. DataHandle, DateStr, Element, IData, Count, code )
  957. icurs = icurs + hdrbufsize
  958. SELECT CASE (use_package(io_form(DataHandle)))
  959. #ifdef NETCDF
  960. CASE ( IO_NETCDF )
  961. CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  962. #endif
  963. #ifdef INTIO
  964. CASE ( IO_INTIO )
  965. CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  966. #endif
  967. #ifdef YYY
  968. CASE ( IO_YYY )
  969. CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  970. #endif
  971. #ifdef GRIB1
  972. CASE ( IO_GRIB1 )
  973. CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  974. #endif
  975. #ifdef GRIB2
  976. CASE ( IO_GRIB2 )
  977. CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  978. #endif
  979. CASE DEFAULT
  980. Status = 0
  981. END SELECT
  982. DEALLOCATE( IData )
  983. ! The I/O server "root" handles the "put_dom_ti_integer" request.
  984. CASE ( int_dom_ti_integer )
  985. !write(0,*)' int_dom_ti_integer '
  986. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  987. ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  988. CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  989. DataHandle, Element, IData, Count, code )
  990. icurs = icurs + hdrbufsize
  991. SELECT CASE (use_package(io_form(DataHandle)))
  992. #ifdef NETCDF
  993. CASE ( IO_NETCDF )
  994. CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  995. !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
  996. #endif
  997. #ifdef INTIO
  998. CASE ( IO_INTIO )
  999. CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1000. #endif
  1001. #ifdef YYY
  1002. CASE ( IO_YYY )
  1003. CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1004. #endif
  1005. #ifdef GRIB1
  1006. CASE ( IO_GRIB1 )
  1007. CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1008. #endif
  1009. #ifdef GRIB2
  1010. CASE ( IO_GRIB2 )
  1011. CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1012. #endif
  1013. CASE DEFAULT
  1014. Status = 0
  1015. END SELECT
  1016. DEALLOCATE( IData)
  1017. ! The I/O server "root" handles the "set_time" request.
  1018. CASE ( int_set_time )
  1019. !write(0,*)' int_set_time '
  1020. CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1021. DataHandle, Element, VarName, CData, code )
  1022. SELECT CASE (use_package(io_form(DataHandle)))
  1023. #ifdef INTIO
  1024. CASE ( IO_INTIO )
  1025. CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
  1026. #endif

Large files files are truncated, but you can click here to view the full file