PageRenderTime 77ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/wrfv2_fire/frame/module_io_quilt.F

http://github.com/jbeezley/wrf-fire
FORTRAN Legacy | 5118 lines | 2434 code | 470 blank | 2214 comment | 109 complexity | f10468d5f1c7c19ff9c349ed06e1796e MD5 | raw file
Possible License(s): AGPL-1.0
  1. !WRF:DRIVER_LAYER:IO
  2. !
  3. #define DEBUG_LVL 50
  4. !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
  5. #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
  6. ! Workaround for bug in the IBM MPI implementation. Look near the
  7. ! bottom of this file for an explanation.
  8. #ifdef IBM_REDUCE_BUG_WORKAROUND
  9. #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) reduce_add_integer(sb,rb,c,r,com)
  10. #else
  11. #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) MPI_Reduce(sb,rb,c,dt,op,r,com,ierr)
  12. #endif
  13. MODULE module_wrf_quilt
  14. !<DESCRIPTION>
  15. !<PRE>
  16. ! This module contains WRF-specific I/O quilt routines called by both
  17. ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
  18. ! a run-time optimization that allow I/O operations, executed on the I/O
  19. ! quilt server tasks, to be overlapped with useful computation, executed on
  20. ! the compute tasks. Since I/O operations are often quite slow compared to
  21. ! computation, this performance optimization can increase parallel
  22. ! efficiency.
  23. !
  24. ! Currently, one group of I/O servers can be specified at run-time. Namelist
  25. ! variable "nio_tasks_per_group" is used to specify the number of I/O server
  26. ! tasks in this group. In most cases, parallel efficiency is optimized when
  27. ! the minimum number of I/O server tasks are used. If memory needed to cache
  28. ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
  29. ! If not, increase the number of I/O server tasks until I/O operations fit in
  30. ! memory. In the future, multiple groups of I/O server tasks will be
  31. ! supported. The number of groups will be specified by namelist variable
  32. ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
  33. ! only support overlap of output operations with computation. Also, only I/O
  34. ! packages that do no support native parallel I/O may be used with I/O server
  35. ! tasks. This excludes PHDF5 and MCEL.
  36. !
  37. ! In this module, the I/O quilt server tasks call package-dependent
  38. ! WRF-specific I/O interfaces to perform I/O operations requested by the
  39. ! client (compute) tasks. All of these calls occur inside subroutine
  40. ! quilt().
  41. !
  42. ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
  43. ! interfaces that send requests to the I/O quilt servers. All of these calls
  44. ! are made from module_io.F.
  45. !
  46. ! These routines have the same names and (roughly) the same arguments as those
  47. ! specified in the WRF I/O API except that:
  48. ! - "Quilt I/O" routines defined in this file and called by routines in
  49. ! module_io.F have the "wrf_quilt_" prefix.
  50. ! - Package-dependent routines called from routines in this file are defined
  51. ! in the external I/O packages and have the "ext_" prefix.
  52. !
  53. ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
  54. ! which then calls setup_quilt_servers() determine which tasks are compute
  55. ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
  56. ! server tasks call routine quilt() and remain there for the rest of the model
  57. ! run. Compute tasks return from init_module_wrf_quilt() to perform model
  58. ! computations.
  59. !
  60. ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
  61. ! version of the WRF I/O API. This document includes detailed descriptions
  62. ! of subroutines and their arguments that are not duplicated here.
  63. !</PRE>
  64. !</DESCRIPTION>
  65. USE module_internal_header_util
  66. USE module_timing
  67. INTEGER, PARAMETER :: int_num_handles = 99
  68. INTEGER, PARAMETER :: max_servers = int_num_handles+1 ! why +1?
  69. LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
  70. INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
  71. REAL, POINTER,SAVE :: int_local_output_buffer(:)
  72. INTEGER, SAVE :: int_local_output_cursor
  73. LOGICAL :: quilting_enabled
  74. LOGICAL :: disable_quilt = .FALSE.
  75. INTEGER :: prev_server_for_handle = -1
  76. INTEGER :: server_for_handle(int_num_handles)
  77. INTEGER :: reduced(2), reduced_dummy(2)
  78. LOGICAL, EXTERNAL :: wrf_dm_on_monitor
  79. INTEGER :: mpi_comm_avail,availrank
  80. LOGICAL :: in_avail=.false., poll_servers=.false.
  81. INTEGER nio_groups
  82. #ifdef DM_PARALLEL
  83. INTEGER :: mpi_comm_local
  84. LOGICAL :: compute_node
  85. LOGICAL :: compute_group_master(max_servers)
  86. INTEGER :: mpi_comm_io_groups(max_servers)
  87. INTEGER :: nio_tasks_in_group
  88. INTEGER :: nio_tasks_per_group
  89. INTEGER :: ncompute_tasks
  90. INTEGER :: ntasks
  91. INTEGER :: mytask
  92. INTEGER, PARAMETER :: onebyte = 1
  93. INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
  94. INTEGER, DIMENSION(4096) :: hdrbuf
  95. INTEGER, DIMENSION(int_num_handles) :: handle
  96. #endif
  97. #ifdef IBM_REDUCE_BUG_WORKAROUND
  98. ! Workaround for bug in the IBM MPI implementation. Look near the
  99. ! bottom of this file for an explanation.
  100. interface reduce_add_integer
  101. module procedure reduce_add_int_arr
  102. module procedure reduce_add_int_scl
  103. end interface
  104. #endif
  105. CONTAINS
  106. #if defined(DM_PARALLEL) && !defined( STUBMPI )
  107. INTEGER FUNCTION get_server_id ( dhandle )
  108. !<DESCRIPTION>
  109. ! Logic in the client side to know which io server
  110. ! group to send to. If the unit corresponds to a file that's
  111. ! already been opened, then we have no choice but to send the
  112. ! data to that group again, regardless of whether there are
  113. ! other server-groups. If it's a new file, we can chose a new
  114. ! server group. I.e. opening a file locks it onto a server
  115. ! group. Closing the file unlocks it.
  116. !</DESCRIPTION>
  117. IMPLICIT NONE
  118. INTEGER, INTENT(IN) :: dhandle
  119. IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
  120. IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
  121. get_server_id = server_for_handle ( dhandle )
  122. ELSE
  123. IF(poll_servers) THEN
  124. ! Poll server group masters to find an inactive I/O server group:
  125. call wrf_quilt_find_server(server_for_handle(dhandle))
  126. ELSE
  127. ! Server polling is disabled, so cycle through servers:
  128. prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
  129. server_for_handle( dhandle ) = prev_server_for_handle+1
  130. ENDIF
  131. get_server_id=server_for_handle(dhandle)
  132. ENDIF
  133. ELSE
  134. CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
  135. ENDIF
  136. END FUNCTION get_server_id
  137. #endif
  138. SUBROUTINE set_server_id ( dhandle, value )
  139. IMPLICIT NONE
  140. INTEGER, INTENT(IN) :: dhandle, value
  141. IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
  142. server_for_handle(dhandle) = value
  143. ELSE
  144. CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
  145. ENDIF
  146. END SUBROUTINE set_server_id
  147. LOGICAL FUNCTION get_poll_servers()
  148. implicit none
  149. get_poll_servers=poll_servers
  150. end FUNCTION get_poll_servers
  151. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  152. SUBROUTINE int_get_fresh_handle( retval )
  153. !<DESCRIPTION>
  154. ! Find an unused "client file handle" and return it in retval.
  155. ! The "client file handle" is used to remember how a file was opened
  156. ! so clients do not need to ask the I/O quilt servers for this information.
  157. ! It is also used as a file identifier in communications with the I/O
  158. ! server task.
  159. !
  160. ! Note that client tasks know nothing about package-specific handles.
  161. ! Only the I/O quilt servers know about them.
  162. !</DESCRIPTION>
  163. INTEGER i, retval
  164. retval = -1
  165. DO i = 1, int_num_handles
  166. IF ( .NOT. int_handle_in_use(i) ) THEN
  167. retval = i
  168. GOTO 33
  169. ENDIF
  170. ENDDO
  171. 33 CONTINUE
  172. IF ( retval < 0 ) THEN
  173. CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
  174. ENDIF
  175. int_handle_in_use(i) = .TRUE.
  176. NULLIFY ( int_local_output_buffer )
  177. END SUBROUTINE int_get_fresh_handle
  178. SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
  179. mytask, &
  180. ntasks, &
  181. n_groups_arg, &
  182. nio, &
  183. mpi_comm_wrld, &
  184. mpi_comm_local, &
  185. mpi_comm_io_groups)
  186. !<DESCRIPTION>
  187. ! Both client (compute) and server tasks call this routine to
  188. ! determine which tasks are compute tasks and which are I/O server tasks.
  189. !
  190. ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
  191. ! contain MPI communicators as follows:
  192. !
  193. ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
  194. ! compute tasks it is the group of compute tasks; for a server group it the
  195. ! communicator of tasks in the server group.
  196. !
  197. ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
  198. ! more compute tasks and a single I/O server assigned to those compute tasks.
  199. ! The I/O server tasks is always the last task in these communicators.
  200. ! On a compute task, which has a single associate in each of the server
  201. ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
  202. ! to a different server group.
  203. ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
  204. ! because each server task is part of only one io_group.
  205. !
  206. ! I/O server tasks in each I/O server group are divided among compute tasks as
  207. ! evenly as possible.
  208. !
  209. ! When multiple I/O server groups are used, each must have the same number of
  210. ! tasks. When the total number of extra I/O tasks does not divide evenly by
  211. ! the number of io server groups requested, the remainder tasks are not used
  212. ! (wasted).
  213. !
  214. ! For example, communicator membership for 18 tasks with nio_groups=2 and
  215. ! nio_tasks_per_group=3 is shown below:
  216. !
  217. !<PRE>
  218. ! Membership for MPI_COMM_LOCAL communicators:
  219. ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
  220. ! 1ST I/O SERVER GROUP: 12 13 14
  221. ! 2ND I/O SERVER GROUP: 15 16 17
  222. !
  223. ! Membership for MPI_COMM_IO_GROUPS(1):
  224. ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
  225. ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
  226. ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
  227. ! I/O SERVER TASK 12: 0 3 6 9 12
  228. ! I/O SERVER TASK 13: 1 4 7 10 13
  229. ! I/O SERVER TASK 14: 2 5 8 11 14
  230. ! I/O SERVER TASK 15: 0 3 6 9 15
  231. ! I/O SERVER TASK 16: 1 4 7 10 16
  232. ! I/O SERVER TASK 17: 2 5 8 11 17
  233. !
  234. ! Membership for MPI_COMM_IO_GROUPS(2):
  235. ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
  236. ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
  237. ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
  238. ! I/O SERVER TASK 12: ** not used **
  239. ! I/O SERVER TASK 13: ** not used **
  240. ! I/O SERVER TASK 14: ** not used **
  241. ! I/O SERVER TASK 15: ** not used **
  242. ! I/O SERVER TASK 16: ** not used **
  243. ! I/O SERVER TASK 17: ** not used **
  244. !</PRE>
  245. !</DESCRIPTION>
  246. USE module_configure
  247. #ifdef DM_PARALLEL
  248. USE module_dm, ONLY : compute_mesh
  249. #endif
  250. IMPLICIT NONE
  251. INCLUDE 'mpif.h'
  252. INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
  253. n_groups_arg, mpi_comm_wrld
  254. INTEGER, INTENT(OUT) :: mpi_comm_local, nio
  255. INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
  256. ! Local
  257. INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
  258. INTEGER, DIMENSION(ntasks) :: icolor
  259. CHARACTER*128 mess
  260. INTEGER :: io_form_setting
  261. INTEGER :: me
  262. INTEGER :: k, m, nprocx, nprocy
  263. LOGICAL :: reorder_mesh
  264. !check the namelist and make sure there are no output forms specified
  265. !that cannot be quilted
  266. CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
  267. CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
  268. CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
  269. CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
  270. CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
  271. CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
  272. CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
  273. CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
  274. CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
  275. CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
  276. CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
  277. CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
  278. CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
  279. n_groups = n_groups_arg
  280. IF ( n_groups .LT. 1 ) n_groups = 1
  281. compute_node = .TRUE.
  282. !<DESCRIPTION>
  283. ! nio is number of io tasks per group. If there arent enough tasks to satisfy
  284. ! the requirement that there be at least as many compute tasks as io tasks in
  285. ! each group, then just print a warning and dump out of quilting
  286. !</DESCRIPTION>
  287. nio = nio_tasks_per_group
  288. ncompute_tasks = ntasks - (nio * n_groups)
  289. IF ( ncompute_tasks .LT. nio ) THEN
  290. WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
  291. nio = 0
  292. ncompute_tasks = ntasks
  293. ELSE
  294. WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
  295. ENDIF
  296. CALL wrf_message(mess)
  297. IF ( nio .LT. 0 ) THEN
  298. nio = 0
  299. ENDIF
  300. IF ( nio .EQ. 0 ) THEN
  301. quilting_enabled = .FALSE.
  302. mpi_comm_local = mpi_comm_wrld
  303. mpi_comm_io_groups = mpi_comm_wrld
  304. RETURN
  305. ENDIF
  306. quilting_enabled = .TRUE.
  307. ! First construct the local communicators
  308. ! prepare to split the communicator by designating compute-only tasks
  309. DO i = 1, ncompute_tasks
  310. icolor(i) = 0
  311. ENDDO
  312. ii = 1
  313. ! and designating the groups of i/o tasks
  314. DO i = ncompute_tasks+1, ntasks, nio
  315. DO j = i, i+nio-1
  316. icolor(j) = ii
  317. ENDDO
  318. ii = ii+1
  319. ENDDO
  320. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  321. CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
  322. ! Now construct the communicators for the io_groups
  323. CALL nl_get_reorder_mesh(1,reorder_mesh)
  324. IF ( reorder_mesh ) THEN
  325. reorder_mesh = .FALSE.
  326. CALL nl_set_reorder_mesh(1,reorder_mesh)
  327. CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
  328. ENDIF
  329. ! assign the compute tasks to the i/o tasks in full rows
  330. CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
  331. nio = min(nio,nprocy)
  332. m = mod(nprocy,nio) ! divide up remainder, 1 row per, until gone
  333. ii = 1
  334. DO j = 1, nio, 1
  335. DO k = 1,nprocy/nio+min(m,1)
  336. DO i = 1, nprocx
  337. icolor(ii) = j - 1
  338. ii = ii + 1
  339. ENDDO
  340. ENDDO
  341. m = max(m-1,0)
  342. ENDDO
  343. ! ... and add the io servers as the last task in each group
  344. DO j = 1, n_groups
  345. ! TBH: each I/O group will contain only one I/O server
  346. DO i = ncompute_tasks+1,ntasks
  347. icolor(i) = MPI_UNDEFINED
  348. ENDDO
  349. ii = 0
  350. DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
  351. icolor(i) = ii
  352. ii = ii+1
  353. ENDDO
  354. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  355. CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
  356. mpi_comm_io_groups(j),ierr)
  357. ENDDO
  358. #ifdef PNETCDF_QUILT
  359. if(poll_servers) then
  360. poll_servers=.false.
  361. call wrf_message('Warning: server polling does not work with pnetcdf_quilt. Disabled poll_servers.')
  362. else
  363. #endif
  364. if(nio_groups==1) then
  365. poll_servers=.false.
  366. call wrf_message('Server polling is useless with one io group. Disabled poll_servers.')
  367. endif
  368. #ifdef PNETCDF_QUILT
  369. endif
  370. #endif
  371. if(poll_servers) then
  372. ! If server polling is enabled, we need to create mpi_comm_avail,
  373. ! which contains the monitor process, and the I/O server master process
  374. ! for each I/O server group. This will be used in the routines
  375. ! wrf_quilt_find_server and wrf_quilt_server_ready to find inactive
  376. ! I/O servers for new data handles in get_server_id.
  377. ! The "in_avail" is set to true iff I am in the mpi_comm_avail.
  378. call mpi_comm_rank(mpi_comm_wrld,me,ierr)
  379. icolor=MPI_UNDEFINED
  380. in_avail=.false.
  381. if(wrf_dm_on_monitor()) then
  382. in_avail=.true. ! monitor process is in mpi_comm_avail
  383. endif
  384. icolor(1)=1
  385. do j=1,n_groups
  386. i=ncompute_tasks+j*nio-1
  387. if(me+1==i) then
  388. in_avail=.true. ! I/O server masters are in mpi_comm_avail
  389. endif
  390. icolor(i)=1
  391. enddo
  392. CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
  393. CALL MPI_Comm_split(comdup,icolor(me+1),me, &
  394. mpi_comm_avail,ierr)
  395. availrank=MPI_UNDEFINED
  396. if(in_avail) then
  397. call mpi_comm_rank(mpi_comm_avail,availrank,ierr)
  398. endif
  399. endif
  400. compute_group_master = .FALSE.
  401. compute_node = .FALSE.
  402. DO j = 1, n_groups
  403. IF ( mytask .LT. ncompute_tasks .OR. & ! I am a compute task
  404. (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) & ! I am the I/O server for this group
  405. ) THEN
  406. CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
  407. ! Get the rank of this compute task in the compute+io
  408. ! communicator to which it belongs
  409. CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
  410. ! If I am an I/O server for this group then make that group's
  411. ! communicator the first element in the mpi_comm_io_groups array
  412. ! (I will ignore all of the other elements).
  413. IF (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) THEN
  414. mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
  415. ELSE
  416. compute_node = .TRUE.
  417. ! If I am a compute task, check whether I am the member of my
  418. ! group that will communicate things that should be sent just
  419. ! once (e.g. commands) to the IO server of my group.
  420. compute_group_master(j) = (me .EQ. 0)
  421. ! IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
  422. ENDIF
  423. ENDIF
  424. ENDDO
  425. END SUBROUTINE setup_quilt_servers
  426. SUBROUTINE sokay ( stream, io_form )
  427. USE module_state_description
  428. CHARACTER*(*) stream
  429. CHARACTER*256 mess
  430. INTEGER io_form
  431. SELECT CASE (io_form)
  432. #ifdef NETCDF
  433. CASE ( IO_NETCDF )
  434. RETURN
  435. #endif
  436. #ifdef INTIO
  437. CASE ( IO_INTIO )
  438. RETURN
  439. #endif
  440. #ifdef YYY
  441. CASE ( IO_YYY )
  442. RETURN
  443. #endif
  444. #ifdef GRIB1
  445. CASE ( IO_GRIB1 )
  446. RETURN
  447. #endif
  448. #ifdef GRIB2
  449. CASE ( IO_GRIB2 )
  450. RETURN
  451. #endif
  452. CASE (0)
  453. RETURN
  454. CASE DEFAULT
  455. WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
  456. CALL wrf_error_fatal(mess)
  457. END SELECT
  458. END SUBROUTINE sokay
  459. SUBROUTINE quilt
  460. !<DESCRIPTION>
  461. ! I/O server tasks call this routine and remain in it for the rest of the
  462. ! model run. I/O servers receive I/O requests from compute tasks and
  463. ! perform requested I/O operations by calling package-dependent WRF-specific
  464. ! I/O interfaces. Requests are sent in the form of "data headers". Each
  465. ! request has a unique "header" message associated with it. For requests that
  466. ! contain large amounts of data, the data is appended to the header. See
  467. ! file module_internal_header_util.F for detailed descriptions of all
  468. ! headers.
  469. !
  470. ! We wish to be able to link to different packages depending on whether
  471. ! the I/O is restart, initial, history, or boundary.
  472. !</DESCRIPTION>
  473. USE module_state_description
  474. USE module_quilt_outbuf_ops
  475. IMPLICIT NONE
  476. INCLUDE 'mpif.h'
  477. #include "intio_tags.h"
  478. #include "wrf_io_flags.h"
  479. INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
  480. INTEGER istat
  481. INTEGER mytask_io_group
  482. INTEGER :: nout_set = 0
  483. INTEGER :: obufsize, bigbufsize, chunksize, sz
  484. REAL, DIMENSION(1) :: dummy
  485. INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
  486. REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
  487. INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
  488. CHARACTER (LEN=512) :: CDATA
  489. CHARACTER (LEN=80) :: fname
  490. INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
  491. INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
  492. INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
  493. INTEGER :: dummybuf(1)
  494. INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
  495. CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
  496. INTEGER, EXTERNAL :: use_package
  497. LOGICAL :: stored_write_record, retval
  498. INTEGER iii, jjj, vid, CC, DD
  499. LOGICAL :: call_server_ready
  500. logical okay_to_w
  501. character*120 sysline
  502. ! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
  503. ! within the group of I/O servers rather than gathering the data onto the
  504. ! root I/O server. Unfortunately, this approach means that we can no-longer
  505. ! select different I/O layers for use with quilting at run time. ARPDBG.
  506. ! This code is sufficiently different that it is kept in the separate
  507. ! quilt_pnc() routine.
  508. #ifdef PNETCDF_QUILT
  509. CALL quilt_pnc()
  510. RETURN
  511. #endif
  512. ! Call ext_pkg_ioinit() routines to initialize I/O packages.
  513. SysDepInfo = " "
  514. #ifdef NETCDF
  515. CALL ext_ncd_ioinit( SysDepInfo, ierr)
  516. #endif
  517. #ifdef INTIO
  518. CALL ext_int_ioinit( SysDepInfo, ierr )
  519. #endif
  520. #ifdef XXX
  521. CALL ext_xxx_ioinit( SysDepInfo, ierr)
  522. #endif
  523. #ifdef YYY
  524. CALL ext_yyy_ioinit( SysDepInfo, ierr)
  525. #endif
  526. #ifdef ZZZ
  527. CALL ext_zzz_ioinit( SysDepInfo, ierr)
  528. #endif
  529. #ifdef GRIB1
  530. CALL ext_gr1_ioinit( SysDepInfo, ierr)
  531. #endif
  532. #ifdef GRIB2
  533. CALL ext_gr2_ioinit( SysDepInfo, ierr)
  534. #endif
  535. call_server_ready = .true. ! = true when the server is ready for a new file
  536. okay_to_commit = .false.
  537. stored_write_record = .false.
  538. ninbuf = 0
  539. ! get info. about the I/O server group that this I/O server task
  540. ! belongs to
  541. ! Last task in this I/O server group is the I/O server "root"
  542. ! The I/O server "root" actually writes data to disk
  543. ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
  544. CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
  545. CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
  546. CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
  547. CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
  548. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  549. IF ( itypesize <= 0 ) THEN
  550. CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
  551. ENDIF
  552. ! Work out whether this i/o server processor has one fewer associated compute proc than
  553. ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
  554. ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
  555. ! same message when they start commmunicating to stitch together an output.
  556. !
  557. ! Compute processes associated with this task:
  558. CC = ntasks_io_group - 1
  559. ! Number of compute tasks per I/O task (less remainder)
  560. DD = ncompute_tasks / ntasks_local_group
  561. !
  562. ! If CC-DD is 1 on servrs with the maximum number of compute clients,
  563. ! 0 on servrs with one less than maximum
  564. ! infinite loop until shutdown message received
  565. ! This is the main request-handling loop. I/O quilt servers stay in this loop
  566. ! until the model run ends.
  567. okay_to_w = .false.
  568. DO WHILE (.TRUE.) ! {
  569. !<DESCRIPTION>
  570. ! Each I/O server receives requests from its compute tasks. Each request
  571. ! is contained in a data header (see module_internal_header_util.F for
  572. ! detailed descriptions of data headers).
  573. ! Each request is sent in two phases. First, sizes of all messages that
  574. ! will be sent from the compute tasks to this I/O server are summed on the
  575. ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
  576. ! and receives concatenated messages from the compute tasks in it via the
  577. ! call to collect_on_comm(). Note that "sizes" are generally expressed in
  578. ! *bytes* in this code so conversion to "count" (number of Fortran words) is
  579. ! required for Fortran indexing and MPI calls.
  580. !</DESCRIPTION>
  581. if(poll_servers .and. call_server_ready) then
  582. call_server_ready=.false.
  583. ! Send a message to the monitor telling it we're ready
  584. ! for a new data handle.
  585. call wrf_quilt_server_ready()
  586. endif
  587. ! wait for info from compute tasks in the I/O group that we're ready to rock
  588. ! obufsize will contain number of *bytes*
  589. !CALL start_timing()
  590. ! first element of reduced is obufsize, second is DataHandle
  591. ! if needed (currently needed only for ioclose).
  592. reduced_dummy = 0
  593. CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
  594. obufsize = reduced(1)
  595. !CALL end_timing("MPI_Reduce at top of forever loop")
  596. !JMDEBUGwrite(0,*)'obufsize = ',obufsize
  597. ! Negative obufsize will trigger I/O server exit.
  598. IF ( obufsize .LT. 0 ) THEN
  599. IF ( obufsize .EQ. -100 ) THEN ! magic number
  600. #ifdef NETCDF
  601. CALL ext_ncd_ioexit( Status )
  602. #endif
  603. #ifdef INTIO
  604. CALL ext_int_ioexit( Status )
  605. #endif
  606. #ifdef XXX
  607. CALL ext_xxx_ioexit( Status )
  608. #endif
  609. #ifdef YYY
  610. CALL ext_yyy_ioexit( Status )
  611. #endif
  612. #ifdef ZZZ
  613. CALL ext_zzz_ioexit( Status )
  614. #endif
  615. #ifdef GRIB1
  616. CALL ext_gr1_ioexit( Status )
  617. #endif
  618. #ifdef GRIB2
  619. CALL ext_gr2_ioexit( Status )
  620. #endif
  621. CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
  622. CALL mpi_finalize(ierr)
  623. STOP
  624. ELSE
  625. WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
  626. CALL wrf_error_fatal(mess)
  627. ENDIF
  628. ENDIF
  629. ! CALL start_timing()
  630. ! Obufsize of zero signals a close
  631. ! Allocate buffer obuf to be big enough for the data the compute tasks
  632. ! will send. Note: obuf is size in *bytes* so we need to pare this
  633. ! down, since the buffer is INTEGER.
  634. IF ( obufsize .GT. 0 ) THEN
  635. ALLOCATE( obuf( (obufsize+1)/itypesize ) )
  636. ! let's roll; get the data from the compute procs and put in obuf
  637. CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
  638. onebyte, &
  639. dummy, 0, &
  640. obuf, obufsize )
  641. ! CALL end_timing( "quilt on server: collecting data from compute procs" )
  642. ELSE
  643. ! Necessarily, the compute processes send the ioclose signal,
  644. ! if there is one, after the iosync, which means they
  645. ! will stall on the ioclose message waiting for the quilt
  646. ! processes if we handle the way other messages are collected,
  647. ! using collect_on_comm. This avoids this, but we need
  648. ! a special signal (obufsize zero) and the DataHandle
  649. ! to be closed. That handle is send as the second
  650. ! word of the io_close message received by the MPI_Reduce above.
  651. ! Then a header representing the ioclose message is constructed
  652. ! here and handled below as if it were received from the
  653. ! compute processes. The clients (compute processes) must be
  654. ! careful to send this correctly (one compule process sends the actual
  655. ! handle and everone else sends a zero, so the result sums to
  656. ! the value of the handle).
  657. !
  658. ALLOCATE( obuf( 4096 ) )
  659. ! DataHandle is provided as second element of reduced
  660. CALL int_gen_handle_header( obuf, obufsize, itypesize, &
  661. reduced(2) , int_ioclose )
  662. if(poll_servers) then
  663. ! Once we're done closing, we need to tell the master
  664. ! process that we're ready for more data.
  665. call_server_ready=.true.
  666. endif
  667. ENDIF
  668. !write(0,*)'calling init_store_piece_of_field'
  669. ! Now all messages received from the compute clients are stored in
  670. ! obuf. Scan through obuf and extract headers and field data and store in
  671. ! internal buffers. The scan is done twice, first to determine sizes of
  672. ! internal buffers required for storage of headers and fields and second to
  673. ! actually store the headers and fields. This bit of code does not do the
  674. ! "quilting" (assembly of patches into full domains). For each field, it
  675. ! simply concatenates all received patches for the field into a separate
  676. ! internal buffer (i.e. one buffer per field). Quilting is done later by
  677. ! routine store_patch_in_outbuf().
  678. CALL init_store_piece_of_field
  679. CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
  680. !write(0,*)'mpi_type_size returns ', itypesize
  681. ! Scan obuf the first time to calculate the size of the buffer required for
  682. ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
  683. vid = 0
  684. icurs = itypesize
  685. num_noops = 0
  686. num_commit_messages = 0
  687. num_field_training_msgs = 0
  688. DO WHILE ( icurs .lt. obufsize ) ! {
  689. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  690. SELECT CASE ( hdr_tag )
  691. CASE ( int_field )
  692. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  693. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  694. DomainDesc , MemoryOrder , Stagger , DimNames , &
  695. DomainStart , DomainEnd , &
  696. MemoryStart , MemoryEnd , &
  697. PatchStart , PatchEnd )
  698. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  699. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  700. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  701. IF ( num_field_training_msgs .EQ. 0 ) THEN
  702. call add_to_bufsize_for_field( VarName, hdrbufsize )
  703. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  704. ENDIF
  705. num_field_training_msgs = num_field_training_msgs + 1
  706. ELSE
  707. call add_to_bufsize_for_field( VarName, hdrbufsize )
  708. !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  709. ENDIF
  710. icurs = icurs + hdrbufsize
  711. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  712. ! If this is a real write (i.e. not a training write), accumulate
  713. ! buffersize for this field.
  714. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  715. !write(0,*) 'X-1a', chunksize, TRIM(VarName)
  716. call add_to_bufsize_for_field( VarName, chunksize )
  717. icurs = icurs + chunksize
  718. ENDIF
  719. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  720. hdrbufsize = obuf(icurs/itypesize)
  721. IF (num_commit_messages.EQ.0) THEN
  722. call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
  723. ENDIF
  724. num_commit_messages = num_commit_messages + 1
  725. icurs = icurs + hdrbufsize
  726. CASE DEFAULT
  727. hdrbufsize = obuf(icurs/itypesize)
  728. ! This logic and the logic in the loop below is used to determine whether
  729. ! to send a noop records sent by the compute processes to allow to go
  730. ! through. The purpose is to make sure that the communications between this
  731. ! server and the other servers in this quilt group stay synchronized in
  732. ! the collection loop below, even when the servers are serving different
  733. ! numbers of clients. Here are some conditions:
  734. !
  735. ! 1. The number of compute clients served will not differ by more than 1
  736. ! 2. The servers with +1 number of compute clients begin with task 0
  737. ! of mpi_comm_local, the commicator shared by this group of servers
  738. !
  739. ! 3. For each collective field or metadata output from the compute tasks,
  740. ! there will be one record sent to the associated i/o server task. The
  741. ! i/o server task collects these records and stores them contiguously
  742. ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
  743. ! server task will contain one record from each associated compute
  744. ! task, in order.
  745. !
  746. ! 4. In the case of replicated output from the compute tasks
  747. ! (e.g. put_dom_ti records and control records like
  748. ! open_for_write_commit type records), compute task 0 is the only
  749. ! one that sends the record. The other compute tasks send noop
  750. ! records. Thus, obuf on server task zero will contain the output
  751. ! record from task 0 followed by noop records from the rest of the
  752. ! compute tasks associated with task 0. Obuf on the other server
  753. ! tasks will contain nothing but noop records.
  754. !
  755. ! 5. The logic below will not allow any noop records from server task 0.
  756. ! It allows only one noop record from each of the other server tasks
  757. ! in the i/o group. This way, for replicated output, when the records
  758. ! are collected on one server task below, using collect_on_comm on
  759. ! mpi_comm_local, each task will provide exactly one record for each
  760. ! call to collect_on_comm: 1 bona fide output record from server task
  761. ! 0 and noops from the rest.
  762. IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
  763. .OR.hdr_tag.NE.int_noop) THEN
  764. write(VarName,'(I5.5)')vid
  765. !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  766. call add_to_bufsize_for_field( VarName, hdrbufsize )
  767. vid = vid+1
  768. ENDIF
  769. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  770. icurs = icurs + hdrbufsize
  771. END SELECT
  772. ENDDO ! }
  773. ! Store the headers and field data in internal buffers. The first call to
  774. ! store_piece_of_field() allocates internal buffers using sizes computed by
  775. ! calls to add_to_bufsize_for_field().
  776. vid = 0
  777. icurs = itypesize
  778. num_noops = 0
  779. num_commit_messages = 0
  780. num_field_training_msgs = 0
  781. DO WHILE ( icurs .lt. obufsize ) !{
  782. !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
  783. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  784. SELECT CASE ( hdr_tag )
  785. CASE ( int_field )
  786. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  787. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  788. DomainDesc , MemoryOrder , Stagger , DimNames , &
  789. DomainStart , DomainEnd , &
  790. MemoryStart , MemoryEnd , &
  791. PatchStart , PatchEnd )
  792. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  793. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  794. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  795. IF ( num_field_training_msgs .EQ. 0 ) THEN
  796. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  797. !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  798. ENDIF
  799. num_field_training_msgs = num_field_training_msgs + 1
  800. ELSE
  801. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  802. !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  803. ENDIF
  804. icurs = icurs + hdrbufsize
  805. ! If this is a real write (i.e. not a training write), store
  806. ! this piece of this field.
  807. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  808. !write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
  809. call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
  810. icurs = icurs + chunksize
  811. ENDIF
  812. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  813. hdrbufsize = obuf(icurs/itypesize)
  814. IF (num_commit_messages.EQ.0) THEN
  815. call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
  816. ENDIF
  817. num_commit_messages = num_commit_messages + 1
  818. icurs = icurs + hdrbufsize
  819. CASE DEFAULT
  820. hdrbufsize = obuf(icurs/itypesize)
  821. IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
  822. .OR.hdr_tag.NE.int_noop) THEN
  823. write(VarName,'(I5.5)')vid
  824. !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  825. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  826. vid = vid+1
  827. ENDIF
  828. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  829. icurs = icurs + hdrbufsize
  830. END SELECT
  831. ENDDO !}
  832. ! Now, for each field, retrieve headers and patches (data) from the internal
  833. ! buffers and collect them all on the I/O quilt server "root" task.
  834. CALL init_retrieve_pieces_of_field
  835. ! Retrieve header and all patches for the first field from the internal
  836. ! buffers.
  837. CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
  838. ! Sum sizes of all headers and patches (data) for this field from all I/O
  839. ! servers in this I/O server group onto the I/O server "root".
  840. CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER, MPI_SUM, ntasks_local_group-1, mpi_comm_local, ierr )
  841. !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
  842. ! Loop until there are no more fields to retrieve from the internal buffers.
  843. DO WHILE ( retval ) !{
  844. #if 0
  845. #else
  846. ! I/O server "root" allocates space to collect headers and fields from all
  847. ! other servers in this I/O server group.
  848. IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
  849. ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
  850. ENDIF
  851. ! Collect buffers and fields from all I/O servers in this I/O server group
  852. ! onto the I/O server "root"
  853. CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName), &
  854. get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), &
  855. mpi_comm_local, &
  856. onebyte, &
  857. obuf, sz, &
  858. bigbuf, bigbufsize )
  859. ! The I/O server "root" now handles collected requests from all compute
  860. ! tasks served by this I/O server group (i.e. all compute tasks).
  861. IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
  862. !jjj = 4
  863. !do iii = 1, ntasks_local_group
  864. ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
  865. ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
  866. !enddo
  867. icurs = itypesize ! icurs is a byte counter, but buffer is integer
  868. stored_write_record = .false.
  869. ! The I/O server "root" loops over the collected requests.
  870. DO WHILE ( icurs .lt. bigbufsize ) !{
  871. CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
  872. !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
  873. ! The I/O server "root" gets the request out of the next header and
  874. ! handles it by, in most cases, calling the appropriate external I/O package
  875. ! interface.
  876. SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
  877. ! The I/O server "root" handles the "noop" (do nothing) request. This is
  878. ! actually quite easy. "Noop" requests exist to help avoid race conditions.
  879. ! In some cases, only one compute task will everything about a request so
  880. ! other compute tasks send "noop" requests.
  881. CASE ( int_noop )
  882. CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
  883. icurs = icurs + hdrbufsize
  884. ! The I/O server "root" handles the "put_dom_td_real" request.
  885. CASE ( int_dom_td_real )
  886. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  887. ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  888. CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  889. DataHandle, DateStr, Element, RData, Count, code )
  890. icurs = icurs + hdrbufsize
  891. SELECT CASE (use_package(io_form(DataHandle)))
  892. #ifdef NETCDF
  893. CASE ( IO_NETCDF )
  894. CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  895. #endif
  896. #ifdef INTIO
  897. CASE ( IO_INTIO )
  898. CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  899. #endif
  900. #ifdef YYY
  901. CASE ( IO_YYY )
  902. CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  903. #endif
  904. #ifdef GRIB1
  905. CASE ( IO_GRIB1 )
  906. CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  907. #endif
  908. #ifdef GRIB2
  909. CASE ( IO_GRIB2 )
  910. CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  911. #endif
  912. CASE DEFAULT
  913. Status = 0
  914. END SELECT
  915. DEALLOCATE( RData )
  916. ! The I/O server "root" handles the "put_dom_ti_real" request.
  917. CASE ( int_dom_ti_real )
  918. !write(0,*)' int_dom_ti_real '
  919. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  920. ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  921. CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  922. DataHandle, Element, RData, Count, code )
  923. icurs = icurs + hdrbufsize
  924. SELECT CASE (use_package(io_form(DataHandle)))
  925. #ifdef NETCDF
  926. CASE ( IO_NETCDF )
  927. CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  928. !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
  929. #endif
  930. #ifdef INTIO
  931. CASE ( IO_INTIO )
  932. CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  933. #endif
  934. #ifdef YYY
  935. CASE ( IO_YYY )
  936. CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  937. #endif
  938. #ifdef GRIB1
  939. CASE ( IO_GRIB1 )
  940. CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  941. #endif
  942. #ifdef GRIB2
  943. CASE ( IO_GRIB2 )
  944. CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  945. #endif
  946. CASE DEFAULT
  947. Status = 0
  948. END SELECT
  949. DEALLOCATE( RData )
  950. ! The I/O server "root" handles the "put_dom_td_integer" request.
  951. CASE ( int_dom_td_integer )
  952. !write(0,*)' int_dom_td_integer '
  953. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  954. ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  955. CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  956. DataHandle, DateStr, Element, IData, Count, code )
  957. icurs = icurs + hdrbufsize
  958. SELECT CASE (use_package(io_form(DataHandle)))
  959. #ifdef NETCDF
  960. CASE ( IO_NETCDF )
  961. CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  962. #endif
  963. #ifdef INTIO
  964. CASE ( IO_INTIO )
  965. CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  966. #endif
  967. #ifdef YYY
  968. CASE ( IO_YYY )
  969. CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  970. #endif
  971. #ifdef GRIB1
  972. CASE ( IO_GRIB1 )
  973. CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  974. #endif
  975. #ifdef GRIB2
  976. CASE ( IO_GRIB2 )
  977. CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  978. #endif
  979. CASE DEFAULT
  980. Status = 0
  981. END SELECT
  982. DEALLOCATE( IData )
  983. ! The I/O server "root" handles the "put_dom_ti_integer" request.
  984. CASE ( int_dom_ti_integer )
  985. !write(0,*)' int_dom_ti_integer '
  986. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  987. ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  988. CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  989. DataHandle, Element, IData, Count, code )
  990. icurs = icurs + hdrbufsize
  991. SELECT CASE (use_package(io_form(DataHandle)))
  992. #ifdef NETCDF
  993. CASE ( IO_NETCDF )
  994. CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  995. !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
  996. #endif
  997. #ifdef INTIO
  998. CASE ( IO_INTIO )
  999. CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1000. #endif
  1001. #ifdef YYY
  1002. CASE ( IO_YYY )
  1003. CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1004. #endif
  1005. #ifdef GRIB1
  1006. CASE ( IO_GRIB1 )
  1007. CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1008. #endif
  1009. #ifdef GRIB2
  1010. CASE ( IO_GRIB2 )
  1011. CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1012. #endif
  1013. CASE DEFAULT
  1014. Status = 0
  1015. END SELECT
  1016. DEALLOCATE( IData)
  1017. ! The I/O server "root" handles the "set_time" request.
  1018. CASE ( int_set_time )
  1019. !write(0,*)' int_set_time '
  1020. CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1021. DataHandle, Element, VarName, CData, code )
  1022. SELECT CASE (use_package(io_form(DataHandle)))
  1023. #ifdef INTIO
  1024. CASE ( IO_INTIO )
  1025. CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
  1026. #endif
  1027. CASE DEFAULT
  1028. Status = 0
  1029. END SELECT
  1030. icurs = icurs + hdrbufsize
  1031. ! The I/O server "root" handles the "put_dom_ti_char" request.
  1032. CASE ( int_dom_ti_char )
  1033. !write(0,*)' before int_get_ti_header_char '
  1034. CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1035. DataHandle, Element, VarName, CData, code )
  1036. !write(0,*)' after int_get_ti_header_char ',VarName
  1037. SELECT CASE (use_package(io_form(DataHandle)))
  1038. #ifdef NETCDF
  1039. CASE ( IO_NETCDF )
  1040. CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1041. #endif
  1042. #ifdef INTIO
  1043. CASE ( IO_INTIO )
  1044. CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1045. #endif
  1046. #ifdef YYY
  1047. CASE ( IO_YYY )
  1048. CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1049. #endif
  1050. #ifdef GRIB1
  1051. CASE ( IO_GRIB1 )
  1052. CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1053. #endif
  1054. #ifdef GRIB2
  1055. CASE ( IO_GRIB2 )
  1056. CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1057. #endif
  1058. CASE DEFAULT
  1059. Status = 0
  1060. END SELECT
  1061. icurs = icurs + hdrbufsize
  1062. ! The I/O server "root" handles the "put_var_ti_char" request.
  1063. CASE ( int_var_ti_char )
  1064. !write(0,*)' int_var_ti_char '
  1065. CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1066. DataHandle, Element, VarName, CData, code )
  1067. SELECT CASE (use_package(io_form(DataHandle)))
  1068. #ifdef NETCDF
  1069. CASE ( IO_NETCDF )
  1070. CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1071. #endif
  1072. #ifdef INTIO
  1073. CASE ( IO_INTIO )
  1074. CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1075. #endif
  1076. #ifdef YYY
  1077. CASE ( IO_YYY )
  1078. CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1079. #endif
  1080. #ifdef GRIB1
  1081. CASE ( IO_GRIB1 )
  1082. CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1083. #endif
  1084. #ifdef GRIB2
  1085. CASE ( IO_GRIB2 )
  1086. CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1087. #endif
  1088. CASE DEFAULT
  1089. Status = 0
  1090. END SELECT
  1091. icurs = icurs + hdrbufsize
  1092. CASE ( int_ioexit )
  1093. ! ioexit is now handled by sending negative message length to server
  1094. CALL wrf_error_fatal( &
  1095. "quilt: should have handled int_ioexit already")
  1096. ! The I/O server "root" handles the "ioclose" request.
  1097. CASE ( int_ioclose )
  1098. CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1099. DataHandle , code )
  1100. icurs = icurs + hdrbufsize
  1101. IF ( DataHandle .GE. 1 ) THEN
  1102. !JMDEBUGwrite(0,*)'closing DataHandle ',DataHandle,' io_form ',io_form(DataHandle)
  1103. SELECT CASE (use_package(io_form(DataHandle)))
  1104. #ifdef NETCDF
  1105. CASE ( IO_NETCDF )
  1106. CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1107. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1108. CALL ext_ncd_ioclose(handle(DataHandle),Status)
  1109. ENDIF
  1110. #endif
  1111. #ifdef PNETCDF
  1112. CASE ( IO_PNETCDF )
  1113. CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1114. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1115. CALL ext_pnc_ioclose(handle(DataHandle),Status)
  1116. ENDIF
  1117. #endif
  1118. #ifdef INTIO
  1119. CASE ( IO_INTIO )
  1120. CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1121. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1122. CALL ext_int_ioclose(handle(DataHandle),Status)
  1123. ENDIF
  1124. #endif
  1125. #ifdef YYY
  1126. CASE ( IO_YYY )
  1127. CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1128. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1129. CALL ext_yyy_ioclose(handle(DataHandle),Status)
  1130. ENDIF
  1131. #endif
  1132. #ifdef GRIB1
  1133. CASE ( IO_GRIB1 )
  1134. CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1135. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1136. CALL ext_gr1_ioclose(handle(DataHandle),Status)
  1137. ENDIF
  1138. #endif
  1139. #ifdef GRIB2
  1140. CASE ( IO_GRIB2 )
  1141. CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1142. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1143. CALL ext_gr2_ioclose(handle(DataHandle),Status)
  1144. ENDIF
  1145. #endif
  1146. CASE DEFAULT
  1147. Status = 0
  1148. END SELECT
  1149. ENDIF
  1150. ! The I/O server "root" handles the "open_for_write_begin" request.
  1151. CASE ( int_open_for_write_begin )
  1152. CALL int_get_ofwb_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1153. FileName,SysDepInfo,io_form_arg,DataHandle )
  1154. !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
  1155. !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
  1156. !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
  1157. !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
  1158. icurs = icurs + hdrbufsize
  1159. !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
  1160. io_form(DataHandle) = io_form_arg
  1161. SELECT CASE (use_package(io_form(DataHandle)))
  1162. #ifdef NETCDF
  1163. CASE ( IO_NETCDF )
  1164. CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  1165. !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
  1166. #endif
  1167. #ifdef INTIO
  1168. CASE ( IO_INTIO )
  1169. CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  1170. #endif
  1171. #ifdef YYY
  1172. CASE ( IO_YYY )
  1173. CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  1174. #endif
  1175. #ifdef GRIB1
  1176. CASE ( IO_GRIB1 )
  1177. CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  1178. #endif
  1179. #ifdef GRIB2
  1180. CASE ( IO_GRIB2 )
  1181. CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  1182. #endif
  1183. CASE DEFAULT
  1184. Status = 0
  1185. END SELECT
  1186. okay_to_write(DataHandle) = .false.
  1187. ! The I/O server "root" handles the "open_for_write_commit" request.
  1188. ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
  1189. ! requests will initiate writes to disk. Actual commit will be done after
  1190. ! all requests in this batch have been handled.
  1191. CASE ( int_open_for_write_commit )
  1192. CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1193. DataHandle , code )
  1194. icurs = icurs + hdrbufsize
  1195. okay_to_commit(DataHandle) = .true.
  1196. ! The I/O server "root" handles the "write_field" (int_field) request.
  1197. ! If okay_to_write(DataHandle) is .true. then the patch in the
  1198. ! header (bigbuf) is written to a globally-sized internal output buffer via
  1199. ! the call to store_patch_in_outbuf(). Note that this is where the actual
  1200. ! "quilting" (reassembly of patches onto a full-size domain) is done. If
  1201. ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
  1202. ! are called to write metadata for I/O formats that support native metadata.
  1203. !
  1204. ! NOTE that the I/O server "root" will only see write_field (int_field)
  1205. ! requests AFTER an "iosync" request.
  1206. CASE ( int_field )
  1207. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  1208. CALL int_get_write_field_header ( bigbuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  1209. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  1210. DomainDesc , MemoryOrder , Stagger , DimNames , &
  1211. DomainStart , DomainEnd , &
  1212. MemoryStart , MemoryEnd , &
  1213. PatchStart , PatchEnd )
  1214. !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
  1215. icurs = icurs + hdrbufsize
  1216. IF ( okay_to_write(DataHandle) ) THEN
  1217. ! WRITE(0,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
  1218. ! (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
  1219. IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
  1220. ! Note that the WRF_DOUBLE branch of this IF statement must come first since
  1221. ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
  1222. IF ( FieldType .EQ. WRF_DOUBLE) THEN
  1223. ! this branch has not been tested TBH: 20050406
  1224. CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
  1225. ELSE
  1226. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  1227. ENDIF
  1228. stored_write_record = .true.
  1229. CALL store_patch_in_outbuf ( bigbuf(icurs/itypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
  1230. FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
  1231. DomainStart , DomainEnd , &
  1232. MemoryStart , MemoryEnd , &
  1233. PatchStart , PatchEnd )
  1234. ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
  1235. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  1236. stored_write_record = .true.
  1237. CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/itypesize), TRIM(DateStr), TRIM(VarName) , &
  1238. FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
  1239. DomainStart , DomainEnd , &
  1240. MemoryStart , MemoryEnd , &
  1241. PatchStart , PatchEnd )
  1242. ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
  1243. ftypesize = LWORDSIZE
  1244. ENDIF
  1245. icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  1246. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  1247. ELSE
  1248. SELECT CASE (use_package(io_form(DataHandle)))
  1249. #ifdef NETCDF
  1250. CASE ( IO_NETCDF )
  1251. CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
  1252. TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
  1253. DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
  1254. DomainStart , DomainEnd , &
  1255. DomainStart , DomainEnd , &
  1256. DomainStart , DomainEnd , &
  1257. Status )
  1258. #endif
  1259. #if 0
  1260. ! since this is training and the grib output doesn't need training, disable this branch.
  1261. #ifdef YYY
  1262. CASE ( IO_YYY )
  1263. CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
  1264. TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
  1265. DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
  1266. DomainStart , DomainEnd , &
  1267. DomainStart , DomainEnd , &
  1268. DomainStart , DomainEnd , &
  1269. Status )
  1270. #endif
  1271. #endif
  1272. CASE DEFAULT
  1273. Status = 0
  1274. END SELECT
  1275. ENDIF
  1276. CASE ( int_iosync )
  1277. CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
  1278. DataHandle , code )
  1279. icurs = icurs + hdrbufsize
  1280. CASE DEFAULT
  1281. WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/itypesize) ),' icurs ',icurs/itypesize
  1282. CALL wrf_error_fatal( mess )
  1283. END SELECT
  1284. ENDDO !}
  1285. ! Now, the I/O server "root" has finshed handling all commands from the latest
  1286. ! call to retrieve_pieces_of_field().
  1287. IF (stored_write_record) THEN
  1288. ! If any fields have been stored in a globally-sized internal output buffer
  1289. ! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
  1290. ! them to disk now.
  1291. ! NOTE that the I/O server "root" will only have called
  1292. ! store_patch_in_outbuf() when handling write_field (int_field)
  1293. ! commands which only arrive AFTER an "iosync" command.
  1294. ! CALL start_timing
  1295. CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
  1296. ! CALL end_timing( "quilt: call to write_outbuf" )
  1297. ENDIF
  1298. ! If one or more "open_for_write_commit" commands were encountered from the
  1299. ! latest call to retrieve_pieces_of_field() then call the package-specific
  1300. ! routine to do the commit.
  1301. IF (okay_to_commit(DataHandle)) THEN
  1302. SELECT CASE (use_package(io_form(DataHandle)))
  1303. #ifdef NETCDF
  1304. CASE ( IO_NETCDF )
  1305. CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1306. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1307. CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
  1308. okay_to_write(DataHandle) = .true.
  1309. ENDIF
  1310. #endif
  1311. #ifdef INTIO
  1312. CASE ( IO_INTIO )
  1313. CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1314. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1315. CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
  1316. okay_to_write(DataHandle) = .true.
  1317. ENDIF
  1318. #endif
  1319. #ifdef YYY
  1320. CASE ( IO_YYY )
  1321. CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1322. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1323. CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
  1324. okay_to_write(DataHandle) = .true.
  1325. ENDIF
  1326. #endif
  1327. #ifdef GRIB1
  1328. CASE ( IO_GRIB1 )
  1329. CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1330. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1331. CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
  1332. okay_to_write(DataHandle) = .true.
  1333. ENDIF
  1334. #endif
  1335. #ifdef GRIB2
  1336. CASE ( IO_GRIB2 )
  1337. CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
  1338. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  1339. CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
  1340. okay_to_write(DataHandle) = .true.
  1341. ENDIF
  1342. #endif
  1343. CASE DEFAULT
  1344. Status = 0
  1345. END SELECT
  1346. okay_to_commit(DataHandle) = .false.
  1347. ENDIF
  1348. DEALLOCATE( bigbuf )
  1349. ENDIF
  1350. #endif
  1351. ! Retrieve header and all patches for the next field from the internal
  1352. ! buffers.
  1353. CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
  1354. ! Sum sizes of all headers and patches (data) for this field from all I/O
  1355. ! servers in this I/O server group onto the I/O server "root".
  1356. CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER,MPI_SUM, ntasks_local_group-1,mpi_comm_local, ierr )
  1357. ! Then, return to the top of the loop to collect headers and data from all
  1358. ! I/O servers in this I/O server group onto the I/O server "root" and handle
  1359. ! the next batch of commands.
  1360. END DO !}
  1361. DEALLOCATE( obuf )
  1362. ! flush output files if needed
  1363. IF (stored_write_record) THEN
  1364. ! CALL start_timing()
  1365. SELECT CASE ( use_package(io_form) )
  1366. #ifdef NETCDF
  1367. CASE ( IO_NETCDF )
  1368. CALL ext_ncd_iosync( handle(DataHandle), Status )
  1369. #endif
  1370. #ifdef XXX
  1371. CASE ( IO_XXX )
  1372. CALL ext_xxx_iosync( handle(DataHandle), Status )
  1373. #endif
  1374. #ifdef YYY
  1375. CASE ( IO_YYY )
  1376. CALL ext_yyy_iosync( handle(DataHandle), Status )
  1377. #endif
  1378. #ifdef ZZZ
  1379. CASE ( IO_ZZZ )
  1380. CALL ext_zzz_iosync( handle(DataHandle), Status )
  1381. #endif
  1382. #ifdef GRIB1
  1383. CASE ( IO_GRIB1 )
  1384. CALL ext_gr1_iosync( handle(DataHandle), Status )
  1385. #endif
  1386. #ifdef GRIB2
  1387. CASE ( IO_GRIB2 )
  1388. CALL ext_gr2_iosync( handle(DataHandle), Status )
  1389. #endif
  1390. #ifdef INTIO
  1391. CASE ( IO_INTIO )
  1392. CALL ext_int_iosync( handle(DataHandle), Status )
  1393. #endif
  1394. CASE DEFAULT
  1395. Status = 0
  1396. END SELECT
  1397. !CALL end_timing( "quilt: flush" )
  1398. ENDIF
  1399. END DO ! }
  1400. END SUBROUTINE quilt
  1401. SUBROUTINE quilt_pnc
  1402. !<DESCRIPTION>
  1403. ! Same as quilt() routine except that _all_ of the IO servers that call it
  1404. ! actually write data to disk using pNetCDF. This version is only used when
  1405. ! the code is compiled with PNETCDF_QUILT defined.
  1406. !</DESCRIPTION>
  1407. USE module_state_description
  1408. USE module_quilt_outbuf_ops
  1409. IMPLICIT NONE
  1410. INCLUDE 'mpif.h'
  1411. #include "intio_tags.h"
  1412. #include "wrf_io_flags.h"
  1413. INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
  1414. INTEGER istat
  1415. INTEGER mytask_io_group
  1416. INTEGER :: nout_set = 0
  1417. INTEGER :: obufsize, bigbufsize, chunksize, sz
  1418. REAL, DIMENSION(1) :: dummy
  1419. INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
  1420. REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
  1421. INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
  1422. CHARACTER (LEN=512) :: CDATA
  1423. CHARACTER (LEN=80) :: fname
  1424. INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
  1425. INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
  1426. INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
  1427. INTEGER :: dummybuf(1)
  1428. INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
  1429. CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
  1430. INTEGER, EXTERNAL :: use_package
  1431. LOGICAL :: stored_write_record, retval, written_record
  1432. INTEGER iii, jjj, vid, CC, DD
  1433. ! logical okay_to_w
  1434. ! character*120 sysline
  1435. ! Call ext_pkg_ioinit() routines to initialize I/O packages.
  1436. SysDepInfo = " "
  1437. #ifdef NETCDF
  1438. CALL ext_ncd_ioinit( SysDepInfo, ierr)
  1439. #endif
  1440. #ifdef PNETCDF_QUILT
  1441. CALL ext_pnc_ioinit( SysDepInfo, ierr)
  1442. #endif
  1443. #ifdef INTIO
  1444. CALL ext_int_ioinit( SysDepInfo, ierr )
  1445. #endif
  1446. #ifdef XXX
  1447. CALL ext_xxx_ioinit( SysDepInfo, ierr)
  1448. #endif
  1449. #ifdef YYY
  1450. CALL ext_yyy_ioinit( SysDepInfo, ierr)
  1451. #endif
  1452. #ifdef ZZZ
  1453. CALL ext_zzz_ioinit( SysDepInfo, ierr)
  1454. #endif
  1455. #ifdef GRIB1
  1456. CALL ext_gr1_ioinit( SysDepInfo, ierr)
  1457. #endif
  1458. #ifdef GRIB2
  1459. CALL ext_gr2_ioinit( SysDepInfo, ierr)
  1460. #endif
  1461. okay_to_commit = .false.
  1462. stored_write_record = .false.
  1463. ninbuf = 0
  1464. ! get info. about the I/O server group that this I/O server task
  1465. ! belongs to
  1466. CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
  1467. CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
  1468. CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
  1469. CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
  1470. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  1471. IF ( itypesize <= 0 ) THEN
  1472. CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
  1473. ENDIF
  1474. ! Work out whether this i/o server processor has one fewer associated compute proc than
  1475. ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
  1476. ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
  1477. ! same message when they start commmunicating to stitch together an output.
  1478. !
  1479. ! Compute processes associated with this task:
  1480. CC = ntasks_io_group - 1
  1481. ! Number of compute tasks per I/O task (less remainder)
  1482. DD = ncompute_tasks / ntasks_local_group
  1483. !
  1484. ! If CC-DD is 1 on servrs with the maximum number of compute clients,
  1485. ! 0 on servrs with one less than maximum
  1486. ! infinite loop until shutdown message received
  1487. ! This is the main request-handling loop. I/O quilt servers stay in this loop
  1488. ! until the model run ends.
  1489. !okay_to_w = .false.
  1490. DO WHILE (.TRUE.) ! {
  1491. !<DESCRIPTION>
  1492. ! Each I/O server receives requests from its compute tasks. Each request
  1493. ! is contained in a data header (see module_internal_header_util.F for
  1494. ! detailed descriptions of data headers).
  1495. ! Each request is sent in two phases. First, sizes of all messages that
  1496. ! will be sent from the compute tasks to this I/O server are summed on the
  1497. ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
  1498. ! and receives concatenated messages from the compute tasks in it via the
  1499. ! call to collect_on_comm(). Note that "sizes" are generally expressed in
  1500. ! *bytes* in this code so conversion to "count" (number of Fortran words) is
  1501. ! required for Fortran indexing and MPI calls.
  1502. !</DESCRIPTION>
  1503. ! wait for info from compute tasks in the I/O group that we're ready to rock
  1504. ! obufsize will contain number of *bytes*
  1505. !CALL start_timing
  1506. ! first element of reduced is obufsize, second is DataHandle
  1507. ! if needed (currently needed only for ioclose).
  1508. reduced_dummy = 0
  1509. CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
  1510. obufsize = reduced(1)
  1511. !CALL end_timing("MPI_Reduce at top of forever loop")
  1512. !JMDEBUGwrite(0,*)'obufsize = ',obufsize
  1513. ! Negative obufsize will trigger I/O server exit.
  1514. IF ( obufsize .LT. 0 ) THEN
  1515. IF ( obufsize .EQ. -100 ) THEN ! magic number
  1516. #ifdef NETCDF
  1517. CALL ext_ncd_ioexit( Status )
  1518. #endif
  1519. #ifdef PNETCDF_QUILT
  1520. CALL ext_pnc_ioexit( Status )
  1521. #endif
  1522. #ifdef INTIO
  1523. CALL ext_int_ioexit( Status )
  1524. #endif
  1525. #ifdef XXX
  1526. CALL ext_xxx_ioexit( Status )
  1527. #endif
  1528. #ifdef YYY
  1529. CALL ext_yyy_ioexit( Status )
  1530. #endif
  1531. #ifdef ZZZ
  1532. CALL ext_zzz_ioexit( Status )
  1533. #endif
  1534. #ifdef GRIB1
  1535. CALL ext_gr1_ioexit( Status )
  1536. #endif
  1537. #ifdef GRIB2
  1538. CALL ext_gr2_ioexit( Status )
  1539. #endif
  1540. CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
  1541. CALL mpi_finalize(ierr)
  1542. STOP
  1543. ELSE
  1544. WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
  1545. CALL wrf_error_fatal(mess)
  1546. ENDIF
  1547. ENDIF
  1548. ! CALL start_timing
  1549. ! Obufsize of zero signals a close
  1550. ! Allocate buffer obuf to be big enough for the data the compute tasks
  1551. ! will send. Note: obuf is size in *bytes* so we need to pare this
  1552. ! down, since the buffer is INTEGER.
  1553. IF ( obufsize .GT. 0 ) THEN
  1554. ALLOCATE( obuf( (obufsize+1)/itypesize ) )
  1555. ! let's roll; get the data from the compute procs and put in obuf
  1556. CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
  1557. onebyte, &
  1558. dummy, 0, &
  1559. obuf, obufsize )
  1560. ! CALL end_timing( "quilt on server: collecting data from compute procs" )
  1561. ELSE
  1562. ! Necessarily, the compute processes send the ioclose signal,
  1563. ! if there is one, after the iosync, which means they
  1564. ! will stall on the ioclose message waiting for the quilt
  1565. ! processes if we handle the way other messages are collected,
  1566. ! using collect_on_comm. This avoids this, but we need
  1567. ! a special signal (obufsize zero) and the DataHandle
  1568. ! to be closed. That handle is send as the second
  1569. ! word of the io_close message received by the MPI_Reduce above.
  1570. ! Then a header representing the ioclose message is constructed
  1571. ! here and handled below as if it were received from the
  1572. ! compute processes. The clients (compute processes) must be
  1573. ! careful to send this correctly (one compule process sends the actual
  1574. ! handle and everone else sends a zero, so the result sums to
  1575. ! the value of the handle).
  1576. !
  1577. ALLOCATE( obuf( 4096 ) )
  1578. ! DataHandle is provided as second element of reduced
  1579. CALL int_gen_handle_header( obuf, obufsize, itypesize, &
  1580. reduced(2) , int_ioclose )
  1581. ENDIF
  1582. !write(0,*)'calling init_store_piece_of_field'
  1583. ! Now all messages received from the compute clients are stored in
  1584. ! obuf. Scan through obuf and extract headers and field data and store in
  1585. ! internal buffers. The scan is done twice, first to determine sizes of
  1586. ! internal buffers required for storage of headers and fields and second to
  1587. ! actually store the headers and fields. This bit of code does not do any
  1588. ! "quilting" (assembly of patches into full domains). For each field, it
  1589. ! simply writes all received patches for the field to disk.
  1590. ! ARPDBG we can vastly reduce the number of writes to disk by stitching
  1591. ! any contiguous patches together first. Has implications for synchronisation
  1592. ! of pNetCDF calls though.
  1593. CALL init_store_piece_of_field
  1594. CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
  1595. !write(0,*)'mpi_type_size returns ', itypesize
  1596. ! Scan obuf the first time to calculate the size of the buffer required for
  1597. ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
  1598. vid = 0
  1599. icurs = itypesize
  1600. num_noops = 0
  1601. num_commit_messages = 0
  1602. num_field_training_msgs = 0
  1603. DO WHILE ( icurs .lt. obufsize ) ! {
  1604. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  1605. SELECT CASE ( hdr_tag )
  1606. CASE ( int_field )
  1607. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  1608. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  1609. DomainDesc , MemoryOrder , Stagger , DimNames , &
  1610. DomainStart , DomainEnd , &
  1611. MemoryStart , MemoryEnd , &
  1612. PatchStart , PatchEnd )
  1613. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  1614. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  1615. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  1616. IF ( num_field_training_msgs .EQ. 0 ) THEN
  1617. call add_to_bufsize_for_field( VarName, hdrbufsize )
  1618. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1619. ENDIF
  1620. num_field_training_msgs = num_field_training_msgs + 1
  1621. ELSE
  1622. call add_to_bufsize_for_field( VarName, hdrbufsize )
  1623. !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1624. ENDIF
  1625. icurs = icurs + hdrbufsize
  1626. !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1627. ! If this is a real write (i.e. not a training write), accumulate
  1628. ! buffersize for this field.
  1629. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  1630. !write(0,*) 'X-1a', chunksize, TRIM(VarName)
  1631. call add_to_bufsize_for_field( VarName, chunksize )
  1632. icurs = icurs + chunksize
  1633. ENDIF
  1634. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  1635. hdrbufsize = obuf(icurs/itypesize)
  1636. IF (num_commit_messages.EQ.0) THEN
  1637. call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
  1638. ENDIF
  1639. num_commit_messages = num_commit_messages + 1
  1640. icurs = icurs + hdrbufsize
  1641. CASE DEFAULT
  1642. hdrbufsize = obuf(icurs/itypesize)
  1643. ! This logic and the logic in the loop below is used to determine whether
  1644. ! to send a noop records sent by the compute processes to allow to go
  1645. ! through. The purpose is to make sure that the communications between this
  1646. ! server and the other servers in this quilt group stay synchronized in
  1647. ! the collection loop below, even when the servers are serving different
  1648. ! numbers of clients. Here are some conditions:
  1649. !
  1650. ! 1. The number of compute clients served will not differ by more than 1
  1651. ! 2. The servers with +1 number of compute clients begin with task 0
  1652. ! of mpi_comm_local, the commicator shared by this group of servers
  1653. !
  1654. ! 3. For each collective field or metadata output from the compute tasks,
  1655. ! there will be one record sent to the associated i/o server task. The
  1656. ! i/o server task collects these records and stores them contiguously
  1657. ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
  1658. ! server task will contain one record from each associated compute
  1659. ! task, in order.
  1660. ! !
  1661. ! 4. In the case of replicated output from the compute tasks
  1662. ! (e.g. put_dom_ti records and control records like
  1663. ! open_for_write_commit type records), only compute tasks for which
  1664. ! (compute_group_master == .TRUE) send the record. The other compute
  1665. ! tasks send noop records. This is done so that each server task
  1666. ! receives exactly one record plus noops from the other compute tasks.
  1667. !
  1668. ! 5. Logic below does not allow any noop records through since each IO
  1669. ! server task now receives a valid record (from the 'compute-group master'
  1670. ! when doing replicated output
  1671. IF (hdr_tag.NE.int_noop) THEN
  1672. write(VarName,'(I5.5)')vid
  1673. !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1674. call add_to_bufsize_for_field( VarName, hdrbufsize )
  1675. vid = vid+1
  1676. ENDIF
  1677. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  1678. icurs = icurs + hdrbufsize
  1679. END SELECT
  1680. ENDDO ! }
  1681. ! Store the headers and field data in internal buffers. The first call to
  1682. ! store_piece_of_field() allocates internal buffers using sizes computed by
  1683. ! calls to add_to_bufsize_for_field().
  1684. vid = 0
  1685. icurs = itypesize
  1686. num_noops = 0
  1687. num_commit_messages = 0
  1688. num_field_training_msgs = 0
  1689. DO WHILE ( icurs .lt. obufsize ) !{
  1690. !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
  1691. hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
  1692. SELECT CASE ( hdr_tag )
  1693. CASE ( int_field )
  1694. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  1695. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  1696. DomainDesc , MemoryOrder , Stagger , DimNames , &
  1697. DomainStart , DomainEnd , &
  1698. MemoryStart , MemoryEnd , &
  1699. PatchStart , PatchEnd )
  1700. chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
  1701. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  1702. IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
  1703. IF ( num_field_training_msgs .EQ. 0 ) THEN
  1704. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  1705. !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1706. ENDIF
  1707. num_field_training_msgs = num_field_training_msgs + 1
  1708. ELSE
  1709. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  1710. !write(0,*) 'A-2a', icurs, hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1711. ENDIF
  1712. icurs = icurs + hdrbufsize
  1713. ! If this is a real write (i.e. not a training write), store
  1714. ! this piece of this field.
  1715. IF ( DomainDesc .NE. 333933 ) THEN ! magic number
  1716. call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
  1717. icurs = icurs + chunksize
  1718. !write(0,*) 'A-1a',TRIM(VarName),' icurs ',icurs,PatchStart(1:3),PatchEnd(1:3)
  1719. ENDIF
  1720. CASE ( int_open_for_write_commit ) ! only one per group of tasks
  1721. hdrbufsize = obuf(icurs/itypesize)
  1722. IF (num_commit_messages.EQ.0) THEN
  1723. call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
  1724. ENDIF
  1725. num_commit_messages = num_commit_messages + 1
  1726. icurs = icurs + hdrbufsize
  1727. CASE DEFAULT
  1728. hdrbufsize = obuf(icurs/itypesize)
  1729. IF (hdr_tag.NE.int_noop) THEN
  1730. write(VarName,'(I5.5)')vid
  1731. !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
  1732. call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
  1733. vid = vid+1
  1734. ENDIF
  1735. IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
  1736. icurs = icurs + hdrbufsize
  1737. END SELECT
  1738. ENDDO !} while(icurs < obufsize)
  1739. ! Now, for each field, retrieve headers and patches (data) from the internal
  1740. ! buffers
  1741. CALL init_retrieve_pieces_of_field
  1742. ! Retrieve header and all patches for the first field from the internal
  1743. ! buffers.
  1744. CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
  1745. written_record = .false.
  1746. ! Loop until there are no more fields to retrieve from the internal buffers.
  1747. DO WHILE ( retval ) !{
  1748. ! This I/O server now handles the collected requests from the compute
  1749. ! tasks it serves
  1750. icurs = itypesize ! icurs is a byte counter, but buffer is integer
  1751. stored_write_record = .false.
  1752. ! ALL I/O servers in this group loop over the collected requests they have
  1753. ! received.
  1754. DO WHILE ( icurs .lt. sz)! bigbufsize ) !{
  1755. ! The I/O server gets the request out of the next header and
  1756. ! handles it by, in most cases, calling the appropriate external I/O package
  1757. ! interface.
  1758. !write(0,*)__FILE__,__LINE__,'get_hdr_tag ',icurs,sz,get_hdr_tag( obuf(icurs/itypesize) )
  1759. SELECT CASE ( get_hdr_tag( obuf(icurs/itypesize) ) )
  1760. ! The I/O server handles the "noop" (do nothing) request. This is
  1761. ! actually quite easy. "Noop" requests exist to help avoid race conditions.
  1762. CASE ( int_noop )
  1763. CALL int_get_noop_header( obuf(icurs/itypesize), &
  1764. hdrbufsize, itypesize )
  1765. icurs = icurs + hdrbufsize
  1766. ! The I/O server "root" handles the "put_dom_td_real" request.
  1767. CASE ( int_dom_td_real )
  1768. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  1769. ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  1770. CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  1771. DataHandle, DateStr, Element, RData, Count, code )
  1772. icurs = icurs + hdrbufsize
  1773. SELECT CASE (use_package(io_form(DataHandle)))
  1774. #ifdef PNETCDF_QUILT
  1775. CASE (IO_PNETCDF )
  1776. CALL ext_pnc_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1777. #endif
  1778. #ifdef NETCDF
  1779. CASE ( IO_NETCDF )
  1780. CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1781. #endif
  1782. #ifdef INTIO
  1783. CASE ( IO_INTIO )
  1784. CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1785. #endif
  1786. #ifdef YYY
  1787. CASE ( IO_YYY )
  1788. CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1789. #endif
  1790. #ifdef GRIB1
  1791. CASE ( IO_GRIB1 )
  1792. CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1793. #endif
  1794. #ifdef GRIB2
  1795. CASE ( IO_GRIB2 )
  1796. CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
  1797. #endif
  1798. CASE DEFAULT
  1799. Status = 0
  1800. END SELECT
  1801. DEALLOCATE( RData )
  1802. ! Every I/O server handles the "put_dom_ti_real" request.
  1803. CASE ( int_dom_ti_real )
  1804. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  1805. ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  1806. CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  1807. DataHandle, Element, RData, Count, code )
  1808. icurs = icurs + hdrbufsize
  1809. SELECT CASE (use_package(io_form(DataHandle)))
  1810. #ifdef PNETCDF_QUILT
  1811. CASE (IO_PNETCDF )
  1812. CALL ext_pnc_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1813. #endif
  1814. #ifdef NETCDF
  1815. CASE ( IO_NETCDF )
  1816. CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1817. #endif
  1818. #ifdef INTIO
  1819. CASE ( IO_INTIO )
  1820. CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1821. #endif
  1822. #ifdef YYY
  1823. CASE ( IO_YYY )
  1824. CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1825. #endif
  1826. #ifdef GRIB1
  1827. CASE ( IO_GRIB1 )
  1828. CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1829. #endif
  1830. #ifdef GRIB2
  1831. CASE ( IO_GRIB2 )
  1832. CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
  1833. #endif
  1834. CASE DEFAULT
  1835. Status = 0
  1836. END SELECT
  1837. DEALLOCATE( RData )
  1838. ! Every I/O server handles the "put_dom_td_integer" request.
  1839. CASE ( int_dom_td_integer )
  1840. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  1841. ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  1842. CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  1843. DataHandle, DateStr, Element, IData, Count, code )
  1844. icurs = icurs + hdrbufsize
  1845. SELECT CASE (use_package(io_form(DataHandle)))
  1846. #ifdef PNETCDF_QUILT
  1847. CASE (IO_PNETCDF )
  1848. CALL ext_pnc_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1849. #endif
  1850. #ifdef NETCDF
  1851. CASE ( IO_NETCDF )
  1852. CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1853. #endif
  1854. #ifdef INTIO
  1855. CASE ( IO_INTIO )
  1856. CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1857. #endif
  1858. #ifdef YYY
  1859. CASE ( IO_YYY )
  1860. CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1861. #endif
  1862. #ifdef GRIB1
  1863. CASE ( IO_GRIB1 )
  1864. CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1865. #endif
  1866. #ifdef GRIB2
  1867. CASE ( IO_GRIB2 )
  1868. CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
  1869. #endif
  1870. CASE DEFAULT
  1871. Status = 0
  1872. END SELECT
  1873. DEALLOCATE( IData )
  1874. ! Every I/O server handles the "put_dom_ti_integer" request.
  1875. CASE ( int_dom_ti_integer )
  1876. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  1877. ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
  1878. CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
  1879. DataHandle, Element, IData, Count, code )
  1880. icurs = icurs + hdrbufsize
  1881. SELECT CASE (use_package(io_form(DataHandle)))
  1882. #ifdef PNETCDF_QUILT
  1883. CASE (IO_PNETCDF )
  1884. CALL ext_pnc_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1885. #endif
  1886. #ifdef NETCDF
  1887. CASE ( IO_NETCDF )
  1888. CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1889. #endif
  1890. #ifdef INTIO
  1891. CASE ( IO_INTIO )
  1892. CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1893. #endif
  1894. #ifdef YYY
  1895. CASE ( IO_YYY )
  1896. CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1897. #endif
  1898. #ifdef GRIB1
  1899. CASE ( IO_GRIB1 )
  1900. CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1901. #endif
  1902. #ifdef GRIB2
  1903. CASE ( IO_GRIB2 )
  1904. CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
  1905. #endif
  1906. CASE DEFAULT
  1907. Status = 0
  1908. END SELECT
  1909. DEALLOCATE( IData)
  1910. ! Every I/O server handles the "set_time" request.
  1911. CASE ( int_set_time )
  1912. CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  1913. DataHandle, Element, VarName, CData, code )
  1914. SELECT CASE (use_package(io_form(DataHandle)))
  1915. #ifdef INTIO
  1916. CASE ( IO_INTIO )
  1917. CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
  1918. #endif
  1919. CASE DEFAULT
  1920. Status = 0
  1921. END SELECT
  1922. icurs = icurs + hdrbufsize
  1923. ! Every I/O server handles the "put_dom_ti_char" request.
  1924. CASE ( int_dom_ti_char )
  1925. CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  1926. DataHandle, Element, VarName, CData, code )
  1927. SELECT CASE (use_package(io_form(DataHandle)))
  1928. #ifdef PNETCDF_QUILT
  1929. CASE (IO_PNETCDF )
  1930. CALL ext_pnc_put_dom_ti_char ( handle(DataHandle), TRIM(Element), Trim(CData), Status)
  1931. #endif
  1932. #ifdef NETCDF
  1933. CASE ( IO_NETCDF )
  1934. CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1935. #endif
  1936. #ifdef INTIO
  1937. CASE ( IO_INTIO )
  1938. CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1939. #endif
  1940. #ifdef YYY
  1941. CASE ( IO_YYY )
  1942. CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1943. #endif
  1944. #ifdef GRIB1
  1945. CASE ( IO_GRIB1 )
  1946. CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1947. #endif
  1948. #ifdef GRIB2
  1949. CASE ( IO_GRIB2 )
  1950. CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
  1951. #endif
  1952. CASE DEFAULT
  1953. Status = 0
  1954. END SELECT
  1955. icurs = icurs + hdrbufsize
  1956. ! Every I/O server handles the "put_var_ti_char" request.
  1957. CASE ( int_var_ti_char )
  1958. CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  1959. DataHandle, Element, VarName, CData, code )
  1960. SELECT CASE (use_package(io_form(DataHandle)))
  1961. #ifdef PNETCDF_QUILT
  1962. CASE (IO_PNETCDF )
  1963. CALL ext_pnc_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status )
  1964. #endif
  1965. #ifdef NETCDF
  1966. CASE ( IO_NETCDF )
  1967. CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1968. #endif
  1969. #ifdef INTIO
  1970. CASE ( IO_INTIO )
  1971. CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1972. #endif
  1973. #ifdef YYY
  1974. CASE ( IO_YYY )
  1975. CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1976. #endif
  1977. #ifdef GRIB1
  1978. CASE ( IO_GRIB1 )
  1979. CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1980. #endif
  1981. #ifdef GRIB2
  1982. CASE ( IO_GRIB2 )
  1983. CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
  1984. #endif
  1985. CASE DEFAULT
  1986. Status = 0
  1987. END SELECT
  1988. icurs = icurs + hdrbufsize
  1989. CASE ( int_ioexit )
  1990. ! ioexit is now handled by sending negative message length to server
  1991. CALL wrf_error_fatal( &
  1992. "quilt: should have handled int_ioexit already")
  1993. ! Every I/O server handles the "ioclose" request.
  1994. CASE ( int_ioclose )
  1995. CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  1996. DataHandle , code )
  1997. icurs = icurs + hdrbufsize
  1998. IF ( DataHandle .GE. 1 ) THEN
  1999. SELECT CASE (use_package(io_form(DataHandle)))
  2000. #ifdef PNETCDF_QUILT
  2001. CASE ( IO_PNETCDF )
  2002. CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2003. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2004. CALL ext_pnc_ioclose(handle(DataHandle),Status)
  2005. ENDIF
  2006. #endif
  2007. #ifdef NETCDF
  2008. CASE ( IO_NETCDF )
  2009. CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2010. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2011. CALL ext_ncd_ioclose(handle(DataHandle),Status)
  2012. ENDIF
  2013. #endif
  2014. #ifdef INTIO
  2015. CASE ( IO_INTIO )
  2016. CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2017. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2018. CALL ext_int_ioclose(handle(DataHandle),Status)
  2019. ENDIF
  2020. #endif
  2021. #ifdef YYY
  2022. CASE ( IO_YYY )
  2023. CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2024. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2025. CALL ext_yyy_ioclose(handle(DataHandle),Status)
  2026. ENDIF
  2027. #endif
  2028. #ifdef GRIB1
  2029. CASE ( IO_GRIB1 )
  2030. CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2031. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2032. CALL ext_gr1_ioclose(handle(DataHandle),Status)
  2033. ENDIF
  2034. #endif
  2035. #ifdef GRIB2
  2036. CASE ( IO_GRIB2 )
  2037. CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2038. IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2039. CALL ext_gr2_ioclose(handle(DataHandle),Status)
  2040. ENDIF
  2041. #endif
  2042. CASE DEFAULT
  2043. Status = 0
  2044. END SELECT
  2045. ENDIF
  2046. ! Every I/O server handles the "open_for_write_begin" request.
  2047. CASE ( int_open_for_write_begin )
  2048. CALL int_get_ofwb_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  2049. FileName,SysDepInfo,io_form_arg,DataHandle )
  2050. !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
  2051. !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
  2052. !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
  2053. !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
  2054. icurs = icurs + hdrbufsize
  2055. !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
  2056. io_form(DataHandle) = io_form_arg
  2057. SELECT CASE (use_package(io_form(DataHandle)))
  2058. #ifdef PNETCDF_QUILT
  2059. CASE (IO_PNETCDF )
  2060. CALL ext_pnc_open_for_write_begin(FileName,mpi_comm_local,mpi_comm_local,SysDepInfo,handle(DataHandle),Status )
  2061. #endif
  2062. #ifdef NETCDF
  2063. CASE ( IO_NETCDF )
  2064. CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  2065. !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
  2066. #endif
  2067. #ifdef INTIO
  2068. CASE ( IO_INTIO )
  2069. CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  2070. #endif
  2071. #ifdef YYY
  2072. CASE ( IO_YYY )
  2073. CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  2074. #endif
  2075. #ifdef GRIB1
  2076. CASE ( IO_GRIB1 )
  2077. CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  2078. #endif
  2079. #ifdef GRIB2
  2080. CASE ( IO_GRIB2 )
  2081. CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
  2082. #endif
  2083. CASE DEFAULT
  2084. Status = 0
  2085. END SELECT
  2086. okay_to_write(DataHandle) = .false.
  2087. ! Every I/O server handles the "open_for_write_commit" request.
  2088. ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
  2089. ! (int_field) requests will initiate writes to disk. Actual commit will be done after
  2090. ! all requests in this batch have been handled.
  2091. CASE ( int_open_for_write_commit )
  2092. CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  2093. DataHandle , code )
  2094. icurs = icurs + hdrbufsize
  2095. okay_to_commit(DataHandle) = .true.
  2096. ! Every I/O server handles the "write_field" (int_field) request.
  2097. ! If okay_to_write(DataHandle) is .true. then the patch in the
  2098. ! header (bigbuf) is written to disk using pNetCDF. Note that this is where the actual
  2099. ! "quilting" (reassembly of patches onto a full-size domain) is done. If
  2100. ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
  2101. ! are called to write metadata for I/O formats that support native metadata.
  2102. !
  2103. ! NOTE that the I/O servers will only see write_field (int_field)
  2104. ! requests AFTER an "iosync" request.
  2105. CASE ( int_field )
  2106. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  2107. CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
  2108. DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
  2109. DomainDesc , MemoryOrder , Stagger , DimNames , &
  2110. DomainStart , DomainEnd , &
  2111. MemoryStart , MemoryEnd , &
  2112. PatchStart , PatchEnd )
  2113. !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
  2114. icurs = icurs + hdrbufsize
  2115. IF ( okay_to_write(DataHandle) ) THEN
  2116. !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,A2,I6,1x,3('[',I3,',',I3,'] '))") &
  2117. !!$ TRIM(DateStr), TRIM(VarName), TRIM(MemoryOrder), &
  2118. !!$ (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1), &
  2119. !!$PatchStart(1),PatchEnd(1),PatchStart(2),PatchEnd(2),PatchStart(3),PatchEnd(3)
  2120. !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
  2121. !!$ TRIM(DateStr), TRIM(VarName), DomainDesc, &
  2122. !!$ DomainStart(1),DomainEnd(1),DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
  2123. IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
  2124. ! Note that the WRF_DOUBLE branch of this IF statement must come first since
  2125. ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
  2126. IF ( FieldType .EQ. WRF_DOUBLE) THEN
  2127. ! this branch has not been tested TBH: 20050406
  2128. CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
  2129. ELSE
  2130. CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
  2131. ENDIF
  2132. #ifdef PNETCDF_QUILT
  2133. ! WRITE(mess,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
  2134. ! TRIM(DateStr), TRIM(VarName), DomainDesc, &
  2135. ! DomainStart(1),DomainEnd(1), &
  2136. ! DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
  2137. ! CALL wrf_message(mess)
  2138. CALL store_patch_in_outbuf_pnc(obuf(icurs/itypesize), &
  2139. dummybuf, TRIM(DateStr), &
  2140. TRIM(VarName) , &
  2141. FieldType, &
  2142. TRIM(MemoryOrder), &
  2143. TRIM(Stagger), &
  2144. DimNames, &
  2145. DomainStart , DomainEnd ,&
  2146. MemoryStart , MemoryEnd ,&
  2147. PatchStart , PatchEnd, &
  2148. ntasks_io_group-1 )
  2149. stored_write_record = .true.
  2150. !!$ IF(VarName .eq. "PSFC")THEN
  2151. !!$ CALL dump_real_array_c(obuf(icurs/itypesize), DomainStart,&
  2152. !!$ DomainEnd, PatchStart, PatchEnd, &
  2153. !!$ mytask_local, DomainDesc)
  2154. !!$ ENDIF
  2155. #endif
  2156. ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
  2157. CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
  2158. #ifdef PNETCDF_QUILT
  2159. CALL store_patch_in_outbuf_pnc ( dummybuf, &
  2160. obuf(icurs/itypesize) , &
  2161. TRIM(DateStr) , &
  2162. TRIM(VarName) , &
  2163. FieldType, &
  2164. TRIM(MemoryOrder) , &
  2165. TRIM(Stagger), DimNames, &
  2166. DomainStart , DomainEnd , &
  2167. MemoryStart , MemoryEnd , &
  2168. PatchStart , PatchEnd , &
  2169. ntasks_io_group-1 )
  2170. stored_write_record = .true.
  2171. #endif
  2172. ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
  2173. ftypesize = LWORDSIZE
  2174. ENDIF
  2175. icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)* &
  2176. (PatchEnd(2)-PatchStart(2)+1)* &
  2177. (PatchEnd(3)-PatchStart(3)+1)*ftypesize
  2178. ELSE ! Write metadata only (or do 'training'?)
  2179. SELECT CASE (use_package(io_form(DataHandle)))
  2180. #ifdef PNETCDF_QUILT
  2181. CASE ( IO_PNETCDF )
  2182. CALL ext_pnc_write_field ( handle(DataHandle) , TRIM(DateStr), &
  2183. TRIM(VarName) , dummy , FieldType , mpi_comm_local , mpi_comm_local, &
  2184. DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger), DimNames , &
  2185. DomainStart , DomainEnd , &
  2186. MemoryStart , MemoryEnd , &
  2187. PatchStart , PatchEnd, &
  2188. Status )
  2189. #endif
  2190. #ifdef NETCDF
  2191. CASE ( IO_NETCDF )
  2192. CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
  2193. TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
  2194. DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
  2195. DomainStart , DomainEnd , &
  2196. DomainStart , DomainEnd , &
  2197. DomainStart , DomainEnd , &
  2198. Status )
  2199. #endif
  2200. #if 0
  2201. ! since this is training and the grib output doesn't need training, disable this branch.
  2202. #ifdef YYY
  2203. CASE ( IO_YYY )
  2204. CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
  2205. TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
  2206. DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
  2207. DomainStart , DomainEnd , &
  2208. DomainStart , DomainEnd , &
  2209. DomainStart , DomainEnd , &
  2210. Status )
  2211. #endif
  2212. #endif
  2213. CASE DEFAULT
  2214. Status = 0
  2215. END SELECT
  2216. ENDIF
  2217. CASE ( int_iosync )
  2218. CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
  2219. DataHandle , code )
  2220. icurs = icurs + hdrbufsize
  2221. CASE DEFAULT
  2222. WRITE(mess,*)'quilt: bad tag: ', &
  2223. get_hdr_tag( obuf(icurs/itypesize) ),' icurs ',&
  2224. icurs/itypesize
  2225. CALL wrf_error_fatal( mess )
  2226. END SELECT
  2227. ENDDO !}
  2228. ! Now, we have finshed handling all commands from the latest
  2229. ! call to retrieve_pieces_of_field().
  2230. IF (stored_write_record) THEN
  2231. ! If any field patches have been stored in internal output buffers
  2232. ! (via a call to store_patch_in_outbuf_pnc()) then call write_outbuf_pnc()
  2233. ! to write them to disk now.
  2234. ! NOTE that the I/O server will only have called
  2235. ! store_patch_in_outbuf() when handling write_field (int_field)
  2236. ! commands which only arrive AFTER an "iosync" command.
  2237. ! CALL start_timing
  2238. #ifdef PNETCDF_QUILT
  2239. CALL write_outbuf_pnc( handle(DataHandle), &
  2240. use_package(io_form(DataHandle)), &
  2241. mpi_comm_local, mytask_local, &
  2242. ntasks_local_group)
  2243. #endif
  2244. ! CALL end_timing( "quilt_pnc: call to write_outbuf_pnc" )
  2245. stored_write_record = .false.
  2246. written_record = .true.
  2247. ENDIF
  2248. ! If one or more "open_for_write_commit" commands were encountered from the
  2249. ! latest call to retrieve_pieces_of_field() then call the package-specific
  2250. ! routine to do the commit.
  2251. IF (okay_to_commit(DataHandle)) THEN
  2252. SELECT CASE (use_package(io_form(DataHandle)))
  2253. #ifdef PNETCDF_QUILT
  2254. CASE ( IO_PNETCDF )
  2255. CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2256. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2257. CALL ext_pnc_open_for_write_commit(handle(DataHandle),Status)
  2258. okay_to_write(DataHandle) = .true.
  2259. ENDIF
  2260. #endif
  2261. #ifdef NETCDF
  2262. CASE ( IO_NETCDF )
  2263. CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2264. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2265. CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
  2266. okay_to_write(DataHandle) = .true.
  2267. ENDIF
  2268. #endif
  2269. #ifdef INTIO
  2270. CASE ( IO_INTIO )
  2271. CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2272. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2273. CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
  2274. okay_to_write(DataHandle) = .true.
  2275. ENDIF
  2276. #endif
  2277. #ifdef YYY
  2278. CASE ( IO_YYY )
  2279. CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2280. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2281. CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
  2282. okay_to_write(DataHandle) = .true.
  2283. ENDIF
  2284. #endif
  2285. #ifdef GRIB1
  2286. CASE ( IO_GRIB1 )
  2287. CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2288. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2289. CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
  2290. okay_to_write(DataHandle) = .true.
  2291. ENDIF
  2292. #endif
  2293. #ifdef GRIB2
  2294. CASE ( IO_GRIB2 )
  2295. CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
  2296. IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
  2297. CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
  2298. okay_to_write(DataHandle) = .true.
  2299. ENDIF
  2300. #endif
  2301. CASE DEFAULT
  2302. Status = 0
  2303. END SELECT
  2304. okay_to_commit(DataHandle) = .false.
  2305. ENDIF
  2306. !!endif
  2307. ! Retrieve header and all patches for the next field from the internal
  2308. ! buffers.
  2309. CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
  2310. END DO !}
  2311. DEALLOCATE( obuf )
  2312. ! flush output files if needed
  2313. IF (written_record) THEN
  2314. !CALL start_timing
  2315. SELECT CASE ( use_package(io_form) )
  2316. #ifdef PNETCDF_QUILT
  2317. CASE ( IO_PNETCDF )
  2318. CALL ext_pnc_iosync( handle(DataHandle), Status )
  2319. #endif
  2320. CASE DEFAULT
  2321. Status = 0
  2322. END SELECT
  2323. written_record = .false.
  2324. !CALL end_timing( "quilt_pnc: flush" )
  2325. ENDIF
  2326. END DO ! }
  2327. END SUBROUTINE quilt_pnc
  2328. ! end of #endif of DM_PARALLEL
  2329. #endif
  2330. SUBROUTINE init_module_wrf_quilt
  2331. !<DESCRIPTION>
  2332. ! Both client (compute) and server tasks call this routine to initialize the
  2333. ! module. Routine setup_quilt_servers() is called from this routine to
  2334. ! determine which tasks are compute tasks and which are server tasks. Server
  2335. ! tasks then call routine quilt() and remain there for the rest of the model
  2336. ! run. Compute tasks return from init_module_wrf_quilt() to perform model
  2337. ! computations.
  2338. !</DESCRIPTION>
  2339. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2340. IMPLICIT NONE
  2341. INCLUDE 'mpif.h'
  2342. INTEGER i
  2343. NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups, poll_servers
  2344. INTEGER ntasks, mytask, ierr, io_status
  2345. # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
  2346. INTEGER thread_support_provided, thread_support_requested
  2347. #endif
  2348. INTEGER mpi_comm_here, temp_poll
  2349. LOGICAL mpi_inited
  2350. LOGICAL esmf_coupling
  2351. !TODO: Change this to run-time switch
  2352. #ifdef ESMFIO
  2353. esmf_coupling = .TRUE.
  2354. #else
  2355. esmf_coupling = .FALSE.
  2356. #endif
  2357. quilting_enabled = .FALSE.
  2358. IF ( disable_quilt ) RETURN
  2359. DO i = 1,int_num_handles
  2360. okay_to_write(i) = .FALSE.
  2361. int_handle_in_use(i) = .FALSE.
  2362. server_for_handle(i) = 0
  2363. int_num_bytes_to_write(i) = 0
  2364. ENDDO
  2365. CALL MPI_INITIALIZED( mpi_inited, ierr )
  2366. IF ( .NOT. mpi_inited ) THEN
  2367. # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
  2368. thread_support_requested = MPI_THREAD_FUNNELED
  2369. CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
  2370. IF ( thread_support_provided .lt. thread_support_requested ) THEN
  2371. CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
  2372. ENDIF
  2373. # else
  2374. CALL mpi_init ( ierr )
  2375. # endif
  2376. CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
  2377. CALL wrf_termio_dup
  2378. ENDIF
  2379. CALL wrf_get_dm_communicator( mpi_comm_here )
  2380. CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
  2381. CALL mpi_x_comm_size ( mpi_comm_here, ntasks, ierr ) ;
  2382. IF ( mytask .EQ. 0 ) THEN
  2383. OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
  2384. nio_groups = 1
  2385. nio_tasks_per_group = 0
  2386. poll_servers = .false.
  2387. READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
  2388. IF (io_status .NE. 0) THEN
  2389. CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
  2390. ENDIF
  2391. CLOSE ( 27 )
  2392. IF ( esmf_coupling ) THEN
  2393. IF ( nio_tasks_per_group > 0 ) THEN
  2394. CALL wrf_error_fatal("frame/module_io_quilt.F: cannot use "// &
  2395. "ESMF coupling with quilt tasks") ;
  2396. ENDIF
  2397. ENDIF
  2398. if(poll_servers) then
  2399. temp_poll=1
  2400. else
  2401. temp_poll=0
  2402. endif
  2403. ENDIF
  2404. CALL mpi_bcast( nio_tasks_per_group , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
  2405. CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
  2406. CALL mpi_bcast( temp_poll , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
  2407. poll_servers = (temp_poll == 1)
  2408. CALL setup_quilt_servers( nio_tasks_per_group, &
  2409. mytask, &
  2410. ntasks, &
  2411. nio_groups, &
  2412. nio_tasks_in_group, &
  2413. mpi_comm_here, &
  2414. mpi_comm_local, &
  2415. mpi_comm_io_groups)
  2416. ! provide the communicator for the integration tasks to RSL
  2417. IF ( compute_node ) THEN
  2418. CALL wrf_set_dm_communicator( mpi_comm_local )
  2419. #ifdef HWRF
  2420. call ATM_SET_COMM(mpi_comm_local)
  2421. #endif
  2422. ELSE
  2423. #ifdef HWRF
  2424. call ATM_LEAVE_COUPLING()
  2425. #endif
  2426. CALL quilt ! will not return on io server tasks
  2427. ENDIF
  2428. #endif
  2429. RETURN
  2430. END SUBROUTINE init_module_wrf_quilt
  2431. #ifdef IBM_REDUCE_BUG_WORKAROUND
  2432. ! These three subroutines re-implement MPI_Reduce on MPI_INTEGER
  2433. ! with OP=MPI_ADD.
  2434. ! This is a workaround for a bug in the IBM MPI implementation.
  2435. ! Some MPI processes will get stuck in MPI_Reduce and not
  2436. ! return until the PREVIOUS I/O server group finishes writing.
  2437. ! This workaround replaces the MPI_Reduce call with many
  2438. ! MPI_Send and MPI_Recv calls that perform the sum on the
  2439. ! root of the communicator.
  2440. ! There are two reduce routines: one for a sum of scalars
  2441. ! and one for a sum of arrays. The get_reduce_tag generates
  2442. ! MPI tags for the communication.
  2443. integer function get_reduce_tag(root,comm)
  2444. implicit none
  2445. include 'mpif.h'
  2446. integer, intent(in) :: comm,root
  2447. integer :: i,j, tag, here
  2448. integer :: ierr,me,size
  2449. integer, pointer :: nexttags(:)
  2450. integer, target :: dummy(1)
  2451. character(255) :: message
  2452. integer(kind=4) :: comm4,hashed
  2453. integer, parameter :: hashsize = 113 ! should be prime, >max_servers+1
  2454. integer, parameter :: tagloop = 100000 ! number of tags reserved per communicator
  2455. integer, parameter :: origin = 1031102 ! lowest tag number we'll use
  2456. integer, save :: nexttag=origin ! next tag to use for a new communicator
  2457. integer, save :: comms(hashsize)=-1, firsttag(hashsize)=0, curtag(hashsize)=0
  2458. ! If integers are not four bytes, this implementation will still
  2459. ! work, but it may be inefficient (O(N) lookup instead of O(1)).
  2460. ! To fix that, an eight byte hash function would be needed, but
  2461. ! integers are four bytes in WRF, so that is not a problem right
  2462. ! now.
  2463. comm4=comm
  2464. call int_hash(comm4,hashed)
  2465. hashed=mod(abs(hashed),hashsize)+1
  2466. if(hashed<0) call wrf_error_fatal('hashed<0')
  2467. do i=0,hashsize-1
  2468. j=1+mod(i+hashed-1,hashsize)
  2469. if(firsttag(j)/=0 .and. comms(j)==comm) then
  2470. ! Found the communicator
  2471. if(curtag(j)-firsttag(j) >= tagloop) then
  2472. ! Hit the max tag number so we need to reset.
  2473. ! To make sure >tagloop reduces don't happen
  2474. ! before someone finishes an old reduce, we
  2475. ! have an MPI_Barrier here.
  2476. !call wrf_message('Hit tagloop limit so calling mpi_barrier in get_reduce_tag...')
  2477. call mpi_barrier(comm,ierr)
  2478. if(ierr/=0) call wrf_error_fatal('cannot call mpi_barrier')
  2479. !call wrf_message(' ...back from mpi_barrier in get_reduce_tag.')
  2480. curtag(j)=firsttag(j)
  2481. endif
  2482. tag=curtag(j)
  2483. curtag(j)=tag+1
  2484. get_reduce_tag=tag
  2485. return
  2486. endif
  2487. enddo
  2488. ! ==================== HANDLE NEW COMMUNICATORS ====================
  2489. !write(message,'("Found a new communicator ",I0," in get_reduce_tag, so making a tag range for it")') comm
  2490. ! If we get here, the communicator is new to us, so we need
  2491. ! to add it to the hash and give it a new tag.
  2492. ! First, figure out where we'll put the tag in the hashtable
  2493. here=-1
  2494. do i=0,hashsize-1
  2495. j=1+mod(i+hashed-1,hashsize)
  2496. if(firsttag(j)==0) then
  2497. here=j
  2498. exit
  2499. endif
  2500. enddo
  2501. if(here==-1) call wrf_error_fatal('no room in hashtable; increase hashsize in get_reduce_tag (should be >max_servers+1)')
  2502. ! Now, find out the new tag's number. To do this, we need to
  2503. ! get the next tag number that is not used by any ranks.
  2504. call mpi_comm_rank(comm,me,ierr)
  2505. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
  2506. call mpi_comm_size(comm,size,ierr)
  2507. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
  2508. if(me==root) then
  2509. allocate(nexttags(size))
  2510. else
  2511. nexttags=>dummy
  2512. endif
  2513. call mpi_gather(nexttag,1,MPI_INTEGER,nexttags,1,MPI_INTEGER,root,comm,ierr)
  2514. if(ierr/=0) call wrf_error_fatal('cannot call mpi_gather')
  2515. if(me==root) then
  2516. nexttag=max(nexttag,maxval(nexttags))
  2517. deallocate(nexttags)
  2518. endif
  2519. call mpi_bcast(nexttag,1,MPI_INTEGER,root,comm,ierr)
  2520. comms(here)=comm
  2521. firsttag(here)=nexttag
  2522. curtag(here)=nexttag
  2523. get_reduce_tag=nexttag
  2524. !write(message,'("Stored comm ",I0," with tag ",I0,"=",I0," in hash element ",I0)') &
  2525. ! comms(here),firsttag(here),curtag(here),here
  2526. !call wrf_message(message)
  2527. nexttag=nexttag+tagloop
  2528. end function get_reduce_tag
  2529. subroutine reduce_add_int_scl(send,recv,count,root,comm)
  2530. implicit none
  2531. include 'mpif.h'
  2532. integer, intent(in) :: count,root,comm
  2533. integer, intent(inout) :: recv
  2534. integer, intent(in) :: send
  2535. integer :: me, size, ierr, you, temp, tag
  2536. character*255 :: message
  2537. if(root<0) call wrf_error_fatal('root is less than 0')
  2538. tag=get_reduce_tag(root,comm)
  2539. !write(message,'("Send/recv to tag ",I0)') tag
  2540. !call wrf_message(message)
  2541. call mpi_comm_rank(comm,me,ierr)
  2542. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
  2543. call mpi_comm_size(comm,size,ierr)
  2544. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
  2545. if(root>=size) call wrf_error_fatal('root is beyond highest communicator rank')
  2546. if(me==root) then
  2547. recv=send
  2548. do you=0,size-2
  2549. call mpi_recv(temp,1,MPI_INTEGER,MPI_ANY_SOURCE,tag,comm,MPI_STATUS_IGNORE,ierr)
  2550. if(ierr/=0) call wrf_error_fatal('error calling mpi_recv')
  2551. recv=recv+temp
  2552. enddo
  2553. else
  2554. call mpi_send(send,1,MPI_INTEGER,root,tag,comm,ierr)
  2555. if(ierr/=0) call wrf_error_fatal('error calling mpi_send')
  2556. endif
  2557. end subroutine reduce_add_int_scl
  2558. subroutine reduce_add_int_arr(sendbuf,recvbuf,count,root,comm)
  2559. implicit none
  2560. include 'mpif.h'
  2561. integer, intent(in) :: count,root,comm
  2562. integer, intent(in) :: sendbuf(count)
  2563. integer, intent(inout) :: recvbuf(count)
  2564. integer :: me, size, ierr, you, tempbuf(count), tag
  2565. character*255 :: message
  2566. if(root<0) call wrf_error_fatal('root is less than 0')
  2567. tag=get_reduce_tag(root,comm)
  2568. !write(message,'("Send/recv to tag ",I0)') tag
  2569. !call wrf_message(message)
  2570. call mpi_comm_rank(comm,me,ierr)
  2571. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
  2572. call mpi_comm_size(comm,size,ierr)
  2573. if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
  2574. if(root>=size) call wrf_error_fatal('root is beyond highest communicator rank')
  2575. if(me==root) then
  2576. recvbuf=sendbuf
  2577. do you=0,size-2
  2578. call mpi_recv(tempbuf,count,MPI_INTEGER,MPI_ANY_SOURCE,tag,comm,MPI_STATUS_IGNORE,ierr)
  2579. if(ierr/=0) call wrf_error_fatal('error calling mpi_recv')
  2580. recvbuf=recvbuf+tempbuf
  2581. enddo
  2582. else
  2583. call mpi_send(sendbuf,count,MPI_INTEGER,root,tag,comm,ierr)
  2584. if(ierr/=0) call wrf_error_fatal('error calling mpi_send')
  2585. endif
  2586. end subroutine reduce_add_int_arr
  2587. #endif
  2588. END MODULE module_wrf_quilt
  2589. !<DESCRIPTION>
  2590. ! Remaining routines in this file are defined outside of the module
  2591. ! either to defeat arg/param type checking or to avoid an explicit use
  2592. ! dependence.
  2593. !</DESCRIPTION>
  2594. SUBROUTINE disable_quilting
  2595. !<DESCRIPTION>
  2596. ! Call this in programs that you never want to be quilting (e.g. real)
  2597. ! Must call before call to init_module_wrf_quilt().
  2598. !</DESCRIPTION>
  2599. USE module_wrf_quilt
  2600. disable_quilt = .TRUE.
  2601. RETURN
  2602. END SUBROUTINE disable_quilting
  2603. LOGICAL FUNCTION use_output_servers()
  2604. !<DESCRIPTION>
  2605. ! Returns .TRUE. if I/O quilt servers are in-use for write operations.
  2606. ! This routine is called only by client (compute) tasks.
  2607. !</DESCRIPTION>
  2608. USE module_wrf_quilt
  2609. use_output_servers = quilting_enabled
  2610. RETURN
  2611. END FUNCTION use_output_servers
  2612. LOGICAL FUNCTION use_input_servers()
  2613. !<DESCRIPTION>
  2614. ! Returns .TRUE. if I/O quilt servers are in-use for read operations.
  2615. ! This routine is called only by client (compute) tasks.
  2616. !</DESCRIPTION>
  2617. USE module_wrf_quilt
  2618. use_input_servers = .FALSE.
  2619. RETURN
  2620. END FUNCTION use_input_servers
  2621. SUBROUTINE wrf_quilt_open_for_write_begin( FileName , Comm_compute, Comm_io, SysDepInfo, &
  2622. DataHandle , io_form_arg, Status )
  2623. !<DESCRIPTION>
  2624. ! Instruct the I/O quilt servers to begin data definition ("training") phase
  2625. ! for writing to WRF dataset FileName. io_form_arg indicates file format.
  2626. ! This routine is called only by client (compute) tasks.
  2627. !</DESCRIPTION>
  2628. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2629. USE module_wrf_quilt
  2630. USE module_state_description, ONLY: IO_PNETCDF
  2631. IMPLICIT NONE
  2632. INCLUDE 'mpif.h'
  2633. #include "intio_tags.h"
  2634. CHARACTER *(*), INTENT(IN) :: FileName
  2635. INTEGER , INTENT(IN) :: Comm_compute , Comm_io
  2636. CHARACTER *(*), INTENT(IN) :: SysDepInfo
  2637. INTEGER , INTENT(OUT) :: DataHandle
  2638. INTEGER , INTENT(IN) :: io_form_arg
  2639. INTEGER , INTENT(OUT) :: Status
  2640. ! Local
  2641. CHARACTER*132 :: locFileName, locSysDepInfo
  2642. INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
  2643. REAL dummy
  2644. INTEGER, EXTERNAL :: use_package
  2645. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
  2646. CALL int_get_fresh_handle(i)
  2647. okay_to_write(i) = .false.
  2648. DataHandle = i
  2649. locFileName = FileName
  2650. locSysDepInfo = SysDepInfo
  2651. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  2652. SELECT CASE(use_package(io_form_arg))
  2653. #ifdef PNETCDF_QUILT
  2654. CASE(IO_PNETCDF)
  2655. IF(compute_group_master(1)) THEN
  2656. CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
  2657. locFileName,locSysDepInfo,io_form_arg,&
  2658. DataHandle )
  2659. ELSE
  2660. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2661. END IF
  2662. #endif
  2663. CASE DEFAULT
  2664. IF ( wrf_dm_on_monitor() ) THEN
  2665. CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
  2666. locFileName,locSysDepInfo,io_form_arg,DataHandle )
  2667. ELSE
  2668. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2669. ENDIF
  2670. END SELECT
  2671. iserver = get_server_id ( DataHandle )
  2672. !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin iserver = ', iserver
  2673. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  2674. !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin comm_io_group = ', comm_io_group
  2675. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  2676. !JMDEBUGwrite(0,*)'mpi_x_comm_size tasks_in_group ',tasks_in_group, ierr
  2677. !!JMTIMING CALL start_timing
  2678. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  2679. reduced = 0
  2680. reduced(1) = hdrbufsize
  2681. #ifdef PNETCDF_QUILT
  2682. IF ( compute_group_master(1) ) reduced(2) = i
  2683. #else
  2684. IF ( wrf_dm_on_monitor() ) reduced(2) = i
  2685. #endif
  2686. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  2687. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
  2688. ! send data to the i/o processor
  2689. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  2690. onebyte, &
  2691. hdrbuf, hdrbufsize , &
  2692. dummy, 0 )
  2693. Status = 0
  2694. #endif
  2695. RETURN
  2696. END SUBROUTINE wrf_quilt_open_for_write_begin
  2697. SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
  2698. !<DESCRIPTION>
  2699. ! Instruct the I/O quilt servers to switch an internal flag to enable output
  2700. ! for the dataset referenced by DataHandle. The call to
  2701. ! wrf_quilt_open_for_write_commit() must be paired with a call to
  2702. ! wrf_quilt_open_for_write_begin().
  2703. ! This routine is called only by client (compute) tasks.
  2704. !</DESCRIPTION>
  2705. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2706. USE module_wrf_quilt
  2707. IMPLICIT NONE
  2708. INCLUDE 'mpif.h'
  2709. #include "intio_tags.h"
  2710. INTEGER , INTENT(IN ) :: DataHandle
  2711. INTEGER , INTENT(OUT) :: Status
  2712. INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
  2713. REAL dummy
  2714. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
  2715. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  2716. IF ( int_handle_in_use( DataHandle ) ) THEN
  2717. okay_to_write( DataHandle ) = .true.
  2718. ENDIF
  2719. ENDIF
  2720. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  2721. #ifdef PNETCDF_QUILT
  2722. !ARP Only want one command to be received by each IO server when using
  2723. !ARP parallel IO
  2724. IF(compute_group_master(1)) THEN
  2725. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  2726. DataHandle, int_open_for_write_commit )
  2727. ELSE
  2728. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2729. END IF
  2730. #else
  2731. IF ( wrf_dm_on_monitor() ) THEN
  2732. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  2733. DataHandle, int_open_for_write_commit )
  2734. ELSE
  2735. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2736. ENDIF
  2737. #endif
  2738. iserver = get_server_id ( DataHandle )
  2739. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  2740. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  2741. !!JMTIMING CALL start_timing
  2742. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  2743. reduced = 0
  2744. reduced(1) = hdrbufsize
  2745. #ifdef PNETCDF_QUILT
  2746. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  2747. #else
  2748. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  2749. #endif
  2750. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  2751. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
  2752. ! send data to the i/o processor
  2753. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  2754. onebyte, &
  2755. hdrbuf, hdrbufsize , &
  2756. dummy, 0 )
  2757. Status = 0
  2758. #endif
  2759. RETURN
  2760. END SUBROUTINE wrf_quilt_open_for_write_commit
  2761. SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
  2762. DataHandle , Status )
  2763. !<DESCRIPTION>
  2764. ! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
  2765. ! This routine is called only by client (compute) tasks.
  2766. ! This is not yet supported.
  2767. !</DESCRIPTION>
  2768. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2769. IMPLICIT NONE
  2770. CHARACTER *(*), INTENT(IN) :: FileName
  2771. INTEGER , INTENT(IN) :: Comm_compute , Comm_io
  2772. CHARACTER *(*), INTENT(IN) :: SysDepInfo
  2773. INTEGER , INTENT(OUT) :: DataHandle
  2774. INTEGER , INTENT(OUT) :: Status
  2775. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
  2776. DataHandle = -1
  2777. Status = -1
  2778. CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
  2779. #endif
  2780. RETURN
  2781. END SUBROUTINE wrf_quilt_open_for_read
  2782. SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
  2783. !<DESCRIPTION>
  2784. ! Inquire if the dataset referenced by DataHandle is open.
  2785. ! Does not require communication with I/O servers.
  2786. ! This routine is called only by client (compute) tasks.
  2787. !</DESCRIPTION>
  2788. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2789. USE module_wrf_quilt
  2790. IMPLICIT NONE
  2791. #include "wrf_io_flags.h"
  2792. INTEGER , INTENT(IN) :: DataHandle
  2793. CHARACTER *(*), INTENT(IN) :: FileName
  2794. INTEGER , INTENT(OUT) :: FileStatus
  2795. INTEGER , INTENT(OUT) :: Status
  2796. Status = 0
  2797. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
  2798. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  2799. IF ( int_handle_in_use( DataHandle ) ) THEN
  2800. IF ( okay_to_write( DataHandle ) ) THEN
  2801. FileStatus = WRF_FILE_OPENED_FOR_WRITE
  2802. ENDIF
  2803. ENDIF
  2804. ENDIF
  2805. Status = 0
  2806. #endif
  2807. RETURN
  2808. END SUBROUTINE wrf_quilt_inquire_opened
  2809. SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
  2810. !<DESCRIPTION>
  2811. ! Return the Filename and FileStatus associated with DataHandle.
  2812. ! Does not require communication with I/O servers.
  2813. !
  2814. ! Note that the current implementation does not actually return FileName.
  2815. ! Currenlty, WRF does not use this returned value. Fixing this would simply
  2816. ! require saving the file names on the client tasks in an array similar to
  2817. ! okay_to_write().
  2818. ! This routine is called only by client (compute) tasks.
  2819. !</DESCRIPTION>
  2820. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  2821. USE module_wrf_quilt
  2822. IMPLICIT NONE
  2823. #include "wrf_io_flags.h"
  2824. INTEGER , INTENT(IN) :: DataHandle
  2825. CHARACTER *(*), INTENT(OUT) :: FileName
  2826. INTEGER , INTENT(OUT) :: FileStatus
  2827. INTEGER , INTENT(OUT) :: Status
  2828. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
  2829. Status = 0
  2830. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  2831. IF ( int_handle_in_use( DataHandle ) ) THEN
  2832. IF ( okay_to_write( DataHandle ) ) THEN
  2833. FileStatus = WRF_FILE_OPENED_FOR_WRITE
  2834. ELSE
  2835. FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
  2836. ENDIF
  2837. ELSE
  2838. FileStatus = WRF_FILE_NOT_OPENED
  2839. ENDIF
  2840. Status = 0
  2841. FileName = "bogusfornow"
  2842. ELSE
  2843. Status = -1
  2844. ENDIF
  2845. #endif
  2846. RETURN
  2847. END SUBROUTINE wrf_quilt_inquire_filename
  2848. SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
  2849. !<DESCRIPTION>
  2850. ! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
  2851. ! with memory buffers.
  2852. !
  2853. ! After the "iosync" header (request) is sent to the I/O quilt server,
  2854. ! the compute tasks will then send the entire contents (headers and data) of
  2855. ! int_local_output_buffer to their I/O quilt server. This communication is
  2856. ! done in subroutine send_to_io_quilt_servers(). After the I/O quilt servers
  2857. ! receive this data, they will write all accumulated fields to disk.
  2858. !
  2859. ! Significant time may be required for the I/O quilt servers to organize
  2860. ! fields and write them to disk. Therefore, the "iosync" request should be
  2861. ! sent only when the compute tasks are ready to run for a while without
  2862. ! needing to communicate with the servers. Otherwise, the compute tasks
  2863. ! will end up waiting for the servers to finish writing to disk, thus wasting
  2864. ! any performance benefits of having servers at all.
  2865. !
  2866. ! This routine is called only by client (compute) tasks.
  2867. !</DESCRIPTION>
  2868. #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
  2869. USE module_wrf_quilt
  2870. IMPLICIT NONE
  2871. include "mpif.h"
  2872. INTEGER , INTENT(IN) :: DataHandle
  2873. INTEGER , INTENT(OUT) :: Status
  2874. INTEGER locsize , itypesize
  2875. INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
  2876. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
  2877. ! CALL start_timing
  2878. IF ( associated ( int_local_output_buffer ) ) THEN
  2879. iserver = get_server_id ( DataHandle )
  2880. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  2881. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  2882. locsize = int_num_bytes_to_write(DataHandle)
  2883. ! CALL start_timing
  2884. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  2885. reduced = 0
  2886. reduced(1) = locsize
  2887. #ifdef PNETCDF_QUILT
  2888. ! ARP Only want one command per IOServer if doing parallel IO
  2889. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  2890. #else
  2891. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  2892. #endif
  2893. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  2894. ! CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
  2895. ! send data to the i/o processor
  2896. #ifdef DEREF_KLUDGE
  2897. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  2898. onebyte, &
  2899. int_local_output_buffer(1), locsize , &
  2900. dummy, 0 )
  2901. #else
  2902. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  2903. onebyte, &
  2904. int_local_output_buffer, locsize , &
  2905. dummy, 0 )
  2906. #endif
  2907. int_local_output_cursor = 1
  2908. ! int_num_bytes_to_write(DataHandle) = 0
  2909. DEALLOCATE ( int_local_output_buffer )
  2910. NULLIFY ( int_local_output_buffer )
  2911. ELSE
  2912. CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
  2913. ENDIF
  2914. ! CALL end_timing("wrf_quilt_iosync")
  2915. Status = 0
  2916. #endif
  2917. RETURN
  2918. END SUBROUTINE wrf_quilt_iosync
  2919. SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
  2920. !<DESCRIPTION>
  2921. ! Instruct the I/O quilt servers to close the dataset referenced by
  2922. ! DataHandle.
  2923. ! This routine also clears the client file handle and, if needed, deallocates
  2924. ! int_local_output_buffer.
  2925. ! This routine is called only by client (compute) tasks.
  2926. !</DESCRIPTION>
  2927. #if defined( DM_PARALLEL ) && ! defined( STUBMPI)
  2928. USE module_wrf_quilt
  2929. USE module_timing
  2930. IMPLICIT NONE
  2931. INCLUDE 'mpif.h'
  2932. #include "intio_tags.h"
  2933. INTEGER , INTENT(IN) :: DataHandle
  2934. INTEGER , INTENT(OUT) :: Status
  2935. INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
  2936. REAL dummy
  2937. !!JMTIMING CALL start_timing
  2938. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
  2939. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  2940. ! If we're using pnetcdf then each IO server will need to receive the
  2941. ! handle just once as there is
  2942. ! no longer a reduce over the IO servers to get it.
  2943. #ifdef PNETCDF_QUILT
  2944. IF ( compute_group_master(1) )THEN
  2945. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  2946. DataHandle, int_ioclose )
  2947. ELSE
  2948. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2949. ENDIF
  2950. #else
  2951. IF ( wrf_dm_on_monitor() ) THEN
  2952. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  2953. DataHandle , int_ioclose )
  2954. ELSE
  2955. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  2956. ENDIF
  2957. #endif
  2958. iserver = get_server_id ( DataHandle )
  2959. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  2960. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  2961. !!JMTIMING CALL start_timing
  2962. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  2963. reduced = 0
  2964. #ifdef PNETCDF_QUILT
  2965. ! If we're using pnetcdf then each IO server will need the handle as there is
  2966. ! no longer a reduce over the IO servers to get it.
  2967. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  2968. #else
  2969. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  2970. #endif
  2971. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  2972. !!JMTIMING CALL end_timing("MPI_Reduce in ioclose")
  2973. #if 0
  2974. ! send data to the i/o processor
  2975. !!JMTIMING CALL start_timing
  2976. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  2977. onebyte, &
  2978. hdrbuf, hdrbufsize , &
  2979. dummy, 0 )
  2980. !!JMTIMING CALL end_timing("collect_on_comm in io_close")
  2981. #endif
  2982. int_handle_in_use(DataHandle) = .false.
  2983. CALL set_server_id( DataHandle, 0 )
  2984. okay_to_write(DataHandle) = .false.
  2985. okay_to_commit(DataHandle) = .false.
  2986. int_local_output_cursor = 1
  2987. int_num_bytes_to_write(DataHandle) = 0
  2988. IF ( associated ( int_local_output_buffer ) ) THEN
  2989. DEALLOCATE ( int_local_output_buffer )
  2990. NULLIFY ( int_local_output_buffer )
  2991. ENDIF
  2992. Status = 0
  2993. !!JMTIMING CALL end_timing( "wrf_quilt_ioclose" )
  2994. #endif
  2995. RETURN
  2996. END SUBROUTINE wrf_quilt_ioclose
  2997. SUBROUTINE wrf_quilt_ioexit( Status )
  2998. !<DESCRIPTION>
  2999. ! Instruct the I/O quilt servers to shut down the WRF I/O system.
  3000. ! Do not call any wrf_quilt_*() routines after this routine has been called.
  3001. ! This routine is called only by client (compute) tasks.
  3002. !</DESCRIPTION>
  3003. #if defined( DM_PARALLEL ) && ! defined (STUBMPI )
  3004. USE module_wrf_quilt
  3005. IMPLICIT NONE
  3006. INCLUDE 'mpif.h'
  3007. #include "intio_tags.h"
  3008. INTEGER , INTENT(OUT) :: Status
  3009. INTEGER :: DataHandle, actual_iserver
  3010. INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
  3011. REAL dummy
  3012. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
  3013. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3014. !ARPDBG - potential bug. Have no access to what type of IO is being used for
  3015. ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
  3016. #ifdef PNETCDF_QUILT
  3017. !ARP Send the ioexit message just once to each IOServer when using parallel IO
  3018. IF( compute_group_master(1) ) THEN
  3019. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  3020. DataHandle, int_ioexit )
  3021. ELSE
  3022. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3023. END IF
  3024. #else
  3025. IF ( wrf_dm_on_monitor() ) THEN
  3026. CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
  3027. DataHandle , int_ioexit ) ! Handle is dummy
  3028. ELSE
  3029. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3030. ENDIF
  3031. #endif
  3032. DO iserver = 1, nio_groups
  3033. if(poll_servers) then
  3034. ! We're using server polling mode, so we must call
  3035. ! *_find_server to receive the mpi_ssend sent by the servers,
  3036. ! otherwise WRF will hang at the mpi_x_reduce below.
  3037. call wrf_quilt_find_server(actual_iserver)
  3038. ! The actual_iserver is now set to the next available I/O server.
  3039. ! That may not be the same as iserver, but that's okay as long
  3040. ! as we run through this loop exactly nio_groups times.
  3041. else
  3042. ! Not using server polling, so just access servers in numeric order.
  3043. actual_iserver=iserver
  3044. endif
  3045. CALL get_mpi_comm_io_groups( comm_io_group , actual_iserver )
  3046. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  3047. CALL mpi_comm_rank( comm_io_group , me , ierr )
  3048. ! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
  3049. hdrbufsize = -100
  3050. reduced = 0
  3051. IF ( me .eq. 0 ) reduced(1) = hdrbufsize
  3052. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  3053. ENDDO
  3054. Status = 0
  3055. #endif
  3056. RETURN
  3057. END SUBROUTINE wrf_quilt_ioexit
  3058. SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
  3059. !<DESCRIPTION>
  3060. ! Instruct the I/O quilt servers to return the next time stamp.
  3061. ! This is not yet supported.
  3062. ! This routine is called only by client (compute) tasks.
  3063. !</DESCRIPTION>
  3064. #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
  3065. IMPLICIT NONE
  3066. INTEGER , INTENT(IN) :: DataHandle
  3067. CHARACTER*(*) :: DateStr
  3068. INTEGER :: Status
  3069. #endif
  3070. RETURN
  3071. END SUBROUTINE wrf_quilt_get_next_time
  3072. SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
  3073. !<DESCRIPTION>
  3074. ! Instruct the I/O quilt servers to return the previous time stamp.
  3075. ! This is not yet supported.
  3076. ! This routine is called only by client (compute) tasks.
  3077. !</DESCRIPTION>
  3078. #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
  3079. IMPLICIT NONE
  3080. INTEGER , INTENT(IN) :: DataHandle
  3081. CHARACTER*(*) :: DateStr
  3082. INTEGER :: Status
  3083. #endif
  3084. RETURN
  3085. END SUBROUTINE wrf_quilt_get_previous_time
  3086. SUBROUTINE wrf_quilt_set_time ( DataHandle, Data, Status )
  3087. !<DESCRIPTION>
  3088. ! Instruct the I/O quilt servers to set the time stamp in the dataset
  3089. ! referenced by DataHandle.
  3090. ! This routine is called only by client (compute) tasks.
  3091. !</DESCRIPTION>
  3092. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3093. USE module_wrf_quilt
  3094. USE module_state_description, ONLY: IO_PNETCDF
  3095. IMPLICIT NONE
  3096. INCLUDE 'mpif.h'
  3097. #include "intio_tags.h"
  3098. INTEGER , INTENT(IN) :: DataHandle
  3099. CHARACTER*(*) , INTENT(IN) :: Data
  3100. INTEGER :: Status
  3101. INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
  3102. REAL dummy
  3103. INTEGER :: Count
  3104. INTEGER, EXTERNAL :: use_package
  3105. !
  3106. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
  3107. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  3108. IF ( int_handle_in_use( DataHandle ) ) THEN
  3109. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3110. Count = 0 ! there is no count for character strings
  3111. !ARPDBG - potential bug. Have no access to what type of IO is being used for
  3112. ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
  3113. #ifdef PNETCDF_QUILT
  3114. IF(compute_group_master(1) )THEN
  3115. ! Only want to send one time header to each IO server as
  3116. ! can't tell that's what they are on the IO servers themselves - therefore use
  3117. ! the compute_group_master process.
  3118. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  3119. DataHandle, "TIMESTAMP", "", Data, int_set_time )
  3120. ELSE
  3121. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3122. END IF
  3123. #else
  3124. IF ( wrf_dm_on_monitor() ) THEN
  3125. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  3126. DataHandle, "TIMESTAMP", "", Data, int_set_time )
  3127. ELSE
  3128. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3129. ENDIF
  3130. #endif
  3131. iserver = get_server_id ( DataHandle )
  3132. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  3133. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  3134. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  3135. reduced = 0
  3136. reduced(1) = hdrbufsize
  3137. #ifdef PNETCDF_QUILT
  3138. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  3139. #else
  3140. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  3141. #endif
  3142. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  3143. ! send data to the i/o processor
  3144. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  3145. onebyte, &
  3146. hdrbuf, hdrbufsize , &
  3147. dummy, 0 )
  3148. ENDIF
  3149. ENDIF
  3150. #endif
  3151. RETURN
  3152. END SUBROUTINE wrf_quilt_set_time
  3153. SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
  3154. !<DESCRIPTION>
  3155. ! When reading, instruct the I/O quilt servers to return the name of the next
  3156. ! variable in the current time frame.
  3157. ! This is not yet supported.
  3158. ! This routine is called only by client (compute) tasks.
  3159. !</DESCRIPTION>
  3160. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3161. IMPLICIT NONE
  3162. INTEGER , INTENT(IN) :: DataHandle
  3163. CHARACTER*(*) :: VarName
  3164. INTEGER :: Status
  3165. #endif
  3166. RETURN
  3167. END SUBROUTINE wrf_quilt_get_next_var
  3168. SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element, Data, Count, Outcount, Status )
  3169. !<DESCRIPTION>
  3170. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3171. ! independent domain metadata named "Element"
  3172. ! from the open dataset described by DataHandle.
  3173. ! Metadata of type real are
  3174. ! stored in array Data.
  3175. ! Actual number of words read is returned in OutCount.
  3176. ! This routine is called only by client (compute) tasks.
  3177. ! This is not yet supported.
  3178. !</DESCRIPTION>
  3179. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3180. IMPLICIT NONE
  3181. INTEGER , INTENT(IN) :: DataHandle
  3182. CHARACTER*(*) , INTENT(IN) :: Element
  3183. REAL, INTENT(IN) :: Data(*)
  3184. INTEGER , INTENT(IN) :: Count
  3185. INTEGER :: Outcount
  3186. INTEGER :: Status
  3187. CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
  3188. #endif
  3189. RETURN
  3190. END SUBROUTINE wrf_quilt_get_dom_ti_real
  3191. SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element, Data, Count, Status )
  3192. !<DESCRIPTION>
  3193. ! Instruct the I/O quilt servers to write Count words of time independent
  3194. ! domain metadata named "Element"
  3195. ! to the open dataset described by DataHandle.
  3196. ! Metadata of type real are
  3197. ! copied from array Data.
  3198. ! This routine is called only by client (compute) tasks.
  3199. !</DESCRIPTION>
  3200. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3201. USE module_wrf_quilt
  3202. IMPLICIT NONE
  3203. INCLUDE 'mpif.h'
  3204. #include "intio_tags.h"
  3205. INTEGER , INTENT(IN) :: DataHandle
  3206. CHARACTER*(*) , INTENT(IN) :: Element
  3207. REAL , INTENT(IN) :: Data(*)
  3208. INTEGER , INTENT(IN) :: Count
  3209. INTEGER :: Status
  3210. !Local
  3211. CHARACTER*132 :: locElement
  3212. INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
  3213. REAL dummy
  3214. !
  3215. !!JMTIMING CALL start_timing
  3216. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
  3217. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3218. locElement = Element
  3219. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  3220. IF ( int_handle_in_use( DataHandle ) ) THEN
  3221. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3222. CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
  3223. #ifdef PNETCDF_QUILT
  3224. IF ( compute_group_master(1) ) THEN
  3225. CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
  3226. DataHandle, locElement, Data, Count, int_dom_ti_real )
  3227. ELSE
  3228. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3229. ENDIF
  3230. #else
  3231. IF ( wrf_dm_on_monitor() ) THEN
  3232. CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
  3233. DataHandle, locElement, Data, Count, int_dom_ti_real )
  3234. ELSE
  3235. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3236. ENDIF
  3237. #endif
  3238. iserver = get_server_id ( DataHandle )
  3239. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  3240. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  3241. !!JMTIMING CALL start_timing
  3242. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  3243. reduced = 0
  3244. reduced(1) = hdrbufsize
  3245. #ifdef PNETCDF_QUILT
  3246. IF( compute_group_master(1) ) reduced(2) = DataHandle
  3247. #else
  3248. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  3249. #endif
  3250. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  3251. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
  3252. ! send data to the i/o processor
  3253. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  3254. onebyte, &
  3255. hdrbuf, hdrbufsize , &
  3256. dummy, 0 )
  3257. ENDIF
  3258. ENDIF
  3259. Status = 0
  3260. !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_real")
  3261. #endif
  3262. RETURN
  3263. END SUBROUTINE wrf_quilt_put_dom_ti_real
  3264. SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element, Data, Count, Outcount, Status )
  3265. !<DESCRIPTION>
  3266. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3267. ! independent domain metadata named "Element"
  3268. ! from the open dataset described by DataHandle.
  3269. ! Metadata of type double are
  3270. ! stored in array Data.
  3271. ! Actual number of words read is returned in OutCount.
  3272. ! This routine is called only by client (compute) tasks.
  3273. !
  3274. ! This is not yet supported.
  3275. !</DESCRIPTION>
  3276. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3277. IMPLICIT NONE
  3278. INTEGER , INTENT(IN) :: DataHandle
  3279. CHARACTER*(*) , INTENT(IN) :: Element
  3280. real*8 :: Data(*)
  3281. INTEGER , INTENT(IN) :: Count
  3282. INTEGER :: OutCount
  3283. INTEGER :: Status
  3284. CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
  3285. #endif
  3286. RETURN
  3287. END SUBROUTINE wrf_quilt_get_dom_ti_double
  3288. SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element, Data, Count, Status )
  3289. !<DESCRIPTION>
  3290. ! Instruct the I/O quilt servers to write Count words of time independent
  3291. ! domain metadata named "Element"
  3292. ! to the open dataset described by DataHandle.
  3293. ! Metadata of type double are
  3294. ! copied from array Data.
  3295. ! This routine is called only by client (compute) tasks.
  3296. !
  3297. ! This is not yet supported.
  3298. !</DESCRIPTION>
  3299. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3300. IMPLICIT NONE
  3301. INTEGER , INTENT(IN) :: DataHandle
  3302. CHARACTER*(*) , INTENT(IN) :: Element
  3303. REAL*8 , INTENT(IN) :: Data(*)
  3304. INTEGER , INTENT(IN) :: Count
  3305. INTEGER :: Status
  3306. CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
  3307. #endif
  3308. RETURN
  3309. END SUBROUTINE wrf_quilt_put_dom_ti_double
  3310. SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element, Data, Count, Outcount, Status )
  3311. !<DESCRIPTION>
  3312. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3313. ! independent domain metadata named "Element"
  3314. ! from the open dataset described by DataHandle.
  3315. ! Metadata of type integer are
  3316. ! stored in array Data.
  3317. ! Actual number of words read is returned in OutCount.
  3318. ! This routine is called only by client (compute) tasks.
  3319. !
  3320. ! This is not yet supported.
  3321. !</DESCRIPTION>
  3322. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3323. IMPLICIT NONE
  3324. INTEGER , INTENT(IN) :: DataHandle
  3325. CHARACTER*(*) , INTENT(IN) :: Element
  3326. integer :: Data(*)
  3327. INTEGER , INTENT(IN) :: Count
  3328. INTEGER :: OutCount
  3329. INTEGER :: Status
  3330. CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
  3331. #endif
  3332. RETURN
  3333. END SUBROUTINE wrf_quilt_get_dom_ti_integer
  3334. SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element, Data, Count, Status )
  3335. !<DESCRIPTION>
  3336. ! Instruct the I/O quilt servers to write Count words of time independent
  3337. ! domain metadata named "Element"
  3338. ! to the open dataset described by DataHandle.
  3339. ! Metadata of type integer are
  3340. ! copied from array Data.
  3341. ! This routine is called only by client (compute) tasks.
  3342. !</DESCRIPTION>
  3343. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3344. USE module_wrf_quilt
  3345. USE module_state_description, ONLY: IO_PNETCDF
  3346. IMPLICIT NONE
  3347. INCLUDE 'mpif.h'
  3348. #include "intio_tags.h"
  3349. INTEGER , INTENT(IN) :: DataHandle
  3350. CHARACTER*(*) , INTENT(IN) :: Element
  3351. INTEGER , INTENT(IN) :: Data(*)
  3352. INTEGER , INTENT(IN) :: Count
  3353. INTEGER :: Status
  3354. ! Local
  3355. CHARACTER*132 :: locElement
  3356. INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
  3357. REAL dummy
  3358. INTEGER, EXTERNAL :: use_package
  3359. !
  3360. !!JMTIMING CALL start_timing
  3361. locElement = Element
  3362. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
  3363. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  3364. IF ( int_handle_in_use( DataHandle ) ) THEN
  3365. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3366. CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
  3367. !ARPDBG - potential bug. Have no access to what type of IO is being used for
  3368. ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
  3369. #ifdef PNETCDF_QUILT
  3370. IF ( compute_group_master(1) )THEN
  3371. CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
  3372. DataHandle, locElement, Data, Count, &
  3373. int_dom_ti_integer )
  3374. ELSE
  3375. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3376. ENDIF
  3377. #else
  3378. IF ( wrf_dm_on_monitor() ) THEN
  3379. CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
  3380. DataHandle, locElement, Data, Count, &
  3381. int_dom_ti_integer )
  3382. ELSE
  3383. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3384. ENDIF
  3385. #endif
  3386. iserver = get_server_id ( DataHandle )
  3387. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  3388. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  3389. !!JMTIMING CALL start_timing
  3390. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  3391. reduced = 0
  3392. reduced(1) = hdrbufsize
  3393. #ifdef PNETCDF_QUILT
  3394. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  3395. #else
  3396. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  3397. #endif
  3398. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  3399. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
  3400. ! send data to the i/o processor
  3401. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  3402. onebyte, &
  3403. hdrbuf, hdrbufsize , &
  3404. dummy, 0 )
  3405. ENDIF
  3406. ENDIF
  3407. CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
  3408. !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_integer" )
  3409. #endif
  3410. RETURN
  3411. END SUBROUTINE wrf_quilt_put_dom_ti_integer
  3412. SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element, Data, Count, Outcount, Status )
  3413. !<DESCRIPTION>
  3414. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3415. ! independent domain metadata named "Element"
  3416. ! from the open dataset described by DataHandle.
  3417. ! Metadata of type logical are
  3418. ! stored in array Data.
  3419. ! Actual number of words read is returned in OutCount.
  3420. ! This routine is called only by client (compute) tasks.
  3421. !
  3422. ! This is not yet supported.
  3423. !</DESCRIPTION>
  3424. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3425. IMPLICIT NONE
  3426. INTEGER , INTENT(IN) :: DataHandle
  3427. CHARACTER*(*) , INTENT(IN) :: Element
  3428. logical :: Data(*)
  3429. INTEGER , INTENT(IN) :: Count
  3430. INTEGER :: OutCount
  3431. INTEGER :: Status
  3432. ! CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
  3433. #endif
  3434. RETURN
  3435. END SUBROUTINE wrf_quilt_get_dom_ti_logical
  3436. SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element, Data, Count, Status )
  3437. !<DESCRIPTION>
  3438. ! Instruct the I/O quilt servers to write Count words of time independent
  3439. ! domain metadata named "Element"
  3440. ! to the open dataset described by DataHandle.
  3441. ! Metadata of type logical are
  3442. ! copied from array Data.
  3443. ! This routine is called only by client (compute) tasks.
  3444. !
  3445. ! This is not yet supported.
  3446. !</DESCRIPTION>
  3447. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3448. IMPLICIT NONE
  3449. INTEGER , INTENT(IN) :: DataHandle
  3450. CHARACTER*(*) , INTENT(IN) :: Element
  3451. logical , INTENT(IN) :: Data(*)
  3452. INTEGER , INTENT(IN) :: Count
  3453. INTEGER :: Status
  3454. ! Local
  3455. INTEGER i
  3456. INTEGER one_or_zero(Count)
  3457. DO i = 1, Count
  3458. IF ( Data(i) ) THEN
  3459. one_or_zero(i) = 1
  3460. ELSE
  3461. one_or_zero(i) = 0
  3462. ENDIF
  3463. ENDDO
  3464. CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element, one_or_zero, Count, Status )
  3465. #endif
  3466. RETURN
  3467. END SUBROUTINE wrf_quilt_put_dom_ti_logical
  3468. SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element, Data, Status )
  3469. !<DESCRIPTION>
  3470. ! Instruct the I/O quilt servers to attempt to read time independent
  3471. ! domain metadata named "Element"
  3472. ! from the open dataset described by DataHandle.
  3473. ! Metadata of type char are
  3474. ! stored in string Data.
  3475. ! This routine is called only by client (compute) tasks.
  3476. !
  3477. ! This is not yet supported.
  3478. !</DESCRIPTION>
  3479. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3480. IMPLICIT NONE
  3481. INTEGER , INTENT(IN) :: DataHandle
  3482. CHARACTER*(*) , INTENT(IN) :: Element
  3483. CHARACTER*(*) :: Data
  3484. INTEGER :: Status
  3485. CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
  3486. #endif
  3487. RETURN
  3488. END SUBROUTINE wrf_quilt_get_dom_ti_char
  3489. SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element, Data, Status )
  3490. !<DESCRIPTION>
  3491. ! Instruct the I/O quilt servers to write time independent
  3492. ! domain metadata named "Element"
  3493. ! to the open dataset described by DataHandle.
  3494. ! Metadata of type char are
  3495. ! copied from string Data.
  3496. ! This routine is called only by client (compute) tasks.
  3497. !</DESCRIPTION>
  3498. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3499. USE module_wrf_quilt
  3500. IMPLICIT NONE
  3501. INCLUDE 'mpif.h'
  3502. #include "intio_tags.h"
  3503. INTEGER , INTENT(IN) :: DataHandle
  3504. CHARACTER*(*) , INTENT(IN) :: Element
  3505. CHARACTER*(*) , INTENT(IN) :: Data
  3506. INTEGER :: Status
  3507. INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
  3508. REAL dummy
  3509. !
  3510. !!JMTIMING CALL start_timing
  3511. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
  3512. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  3513. IF ( int_handle_in_use( DataHandle ) ) THEN
  3514. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  3515. !ARPDBG - potential bug. Have no access to what type of IO is being used for
  3516. ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
  3517. #ifdef PNETCDF_QUILT
  3518. IF(compute_group_master(1))THEN
  3519. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  3520. DataHandle, Element, "", Data, &
  3521. int_dom_ti_char )
  3522. ELSE
  3523. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3524. END IF
  3525. #else
  3526. IF ( wrf_dm_on_monitor() ) THEN
  3527. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  3528. DataHandle, Element, "", Data, int_dom_ti_char )
  3529. ELSE
  3530. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  3531. ENDIF
  3532. #endif
  3533. iserver = get_server_id ( DataHandle )
  3534. ! write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
  3535. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  3536. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  3537. ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
  3538. !!JMTIMING! CALL start_timing
  3539. !write(0,*)'calling MPI_Barrier'
  3540. ! CALL MPI_Barrier( mpi_comm_local, ierr )
  3541. !write(0,*)'back from MPI_Barrier'
  3542. !!JMTIMING! CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
  3543. !!JMTIMING CALL start_timing
  3544. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  3545. reduced_dummy = 0
  3546. reduced = 0
  3547. reduced(1) = hdrbufsize
  3548. #ifdef PNETCDF_QUILT
  3549. IF(compute_group_master(1)) reduced(2) = DataHandle
  3550. #else
  3551. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  3552. #endif
  3553. !call mpi_comm_rank( comm_io_group , me, ierr )
  3554. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  3555. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
  3556. ! send data to the i/o processor
  3557. !!JMTIMING CALL start_timing
  3558. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  3559. onebyte, &
  3560. hdrbuf, hdrbufsize , &
  3561. dummy, 0 )
  3562. !!JMTIMING CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
  3563. ENDIF
  3564. ENDIF
  3565. !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char")
  3566. #endif
  3567. RETURN
  3568. END SUBROUTINE wrf_quilt_put_dom_ti_char
  3569. SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
  3570. !<DESCRIPTION>
  3571. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3572. ! dependent domain metadata named "Element" valid at time DateStr
  3573. ! from the open dataset described by DataHandle.
  3574. ! Metadata of type real are
  3575. ! stored in array Data.
  3576. ! Actual number of words read is returned in OutCount.
  3577. ! This routine is called only by client (compute) tasks.
  3578. !
  3579. ! This is not yet supported.
  3580. !</DESCRIPTION>
  3581. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3582. IMPLICIT NONE
  3583. INTEGER , INTENT(IN) :: DataHandle
  3584. CHARACTER*(*) , INTENT(IN) :: Element
  3585. CHARACTER*(*) , INTENT(IN) :: DateStr
  3586. real :: Data(*)
  3587. INTEGER , INTENT(IN) :: Count
  3588. INTEGER :: OutCount
  3589. INTEGER :: Status
  3590. #endif
  3591. RETURN
  3592. END SUBROUTINE wrf_quilt_get_dom_td_real
  3593. SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Status )
  3594. !<DESCRIPTION>
  3595. ! Instruct the I/O quilt servers to write Count words of time dependent
  3596. ! domain metadata named "Element" valid at time DateStr
  3597. ! to the open dataset described by DataHandle.
  3598. ! Metadata of type real are
  3599. ! copied from array Data.
  3600. ! This routine is called only by client (compute) tasks.
  3601. !
  3602. ! This is not yet supported.
  3603. !</DESCRIPTION>
  3604. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3605. IMPLICIT NONE
  3606. INTEGER , INTENT(IN) :: DataHandle
  3607. CHARACTER*(*) , INTENT(IN) :: Element
  3608. CHARACTER*(*) , INTENT(IN) :: DateStr
  3609. real , INTENT(IN) :: Data(*)
  3610. INTEGER , INTENT(IN) :: Count
  3611. INTEGER :: Status
  3612. #endif
  3613. RETURN
  3614. END SUBROUTINE wrf_quilt_put_dom_td_real
  3615. SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
  3616. !<DESCRIPTION>
  3617. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3618. ! dependent domain metadata named "Element" valid at time DateStr
  3619. ! from the open dataset described by DataHandle.
  3620. ! Metadata of type double are
  3621. ! stored in array Data.
  3622. ! Actual number of words read is returned in OutCount.
  3623. ! This routine is called only by client (compute) tasks.
  3624. !
  3625. ! This is not yet supported.
  3626. !</DESCRIPTION>
  3627. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3628. IMPLICIT NONE
  3629. INTEGER , INTENT(IN) :: DataHandle
  3630. CHARACTER*(*) , INTENT(IN) :: Element
  3631. CHARACTER*(*) , INTENT(IN) :: DateStr
  3632. real*8 :: Data(*)
  3633. INTEGER , INTENT(IN) :: Count
  3634. INTEGER :: OutCount
  3635. INTEGER :: Status
  3636. #endif
  3637. CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
  3638. RETURN
  3639. END SUBROUTINE wrf_quilt_get_dom_td_double
  3640. SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Status )
  3641. !<DESCRIPTION>
  3642. ! Instruct the I/O quilt servers to write Count words of time dependent
  3643. ! domain metadata named "Element" valid at time DateStr
  3644. ! to the open dataset described by DataHandle.
  3645. ! Metadata of type double are
  3646. ! copied from array Data.
  3647. ! This routine is called only by client (compute) tasks.
  3648. !
  3649. ! This is not yet supported.
  3650. !</DESCRIPTION>
  3651. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3652. IMPLICIT NONE
  3653. INTEGER , INTENT(IN) :: DataHandle
  3654. CHARACTER*(*) , INTENT(IN) :: Element
  3655. CHARACTER*(*) , INTENT(IN) :: DateStr
  3656. real*8 , INTENT(IN) :: Data(*)
  3657. INTEGER , INTENT(IN) :: Count
  3658. INTEGER :: Status
  3659. #endif
  3660. CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
  3661. RETURN
  3662. END SUBROUTINE wrf_quilt_put_dom_td_double
  3663. SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
  3664. !<DESCRIPTION>
  3665. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3666. ! dependent domain metadata named "Element" valid at time DateStr
  3667. ! from the open dataset described by DataHandle.
  3668. ! Metadata of type integer are
  3669. ! stored in array Data.
  3670. ! Actual number of words read is returned in OutCount.
  3671. ! This routine is called only by client (compute) tasks.
  3672. !
  3673. ! This is not yet supported.
  3674. !</DESCRIPTION>
  3675. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3676. IMPLICIT NONE
  3677. INTEGER , INTENT(IN) :: DataHandle
  3678. CHARACTER*(*) , INTENT(IN) :: Element
  3679. CHARACTER*(*) , INTENT(IN) :: DateStr
  3680. integer :: Data(*)
  3681. INTEGER , INTENT(IN) :: Count
  3682. INTEGER :: OutCount
  3683. INTEGER :: Status
  3684. #endif
  3685. RETURN
  3686. END SUBROUTINE wrf_quilt_get_dom_td_integer
  3687. SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Status )
  3688. !<DESCRIPTION>
  3689. ! Instruct the I/O quilt servers to write Count words of time dependent
  3690. ! domain metadata named "Element" valid at time DateStr
  3691. ! to the open dataset described by DataHandle.
  3692. ! Metadata of type integer are
  3693. ! copied from array Data.
  3694. ! This routine is called only by client (compute) tasks.
  3695. !
  3696. ! This is not yet supported.
  3697. !</DESCRIPTION>
  3698. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3699. IMPLICIT NONE
  3700. INTEGER , INTENT(IN) :: DataHandle
  3701. CHARACTER*(*) , INTENT(IN) :: Element
  3702. CHARACTER*(*) , INTENT(IN) :: DateStr
  3703. integer , INTENT(IN) :: Data(*)
  3704. INTEGER , INTENT(IN) :: Count
  3705. INTEGER :: Status
  3706. #endif
  3707. RETURN
  3708. END SUBROUTINE wrf_quilt_put_dom_td_integer
  3709. SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
  3710. !<DESCRIPTION>
  3711. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3712. ! dependent domain metadata named "Element" valid at time DateStr
  3713. ! from the open dataset described by DataHandle.
  3714. ! Metadata of type logical are
  3715. ! stored in array Data.
  3716. ! Actual number of words read is returned in OutCount.
  3717. ! This routine is called only by client (compute) tasks.
  3718. !
  3719. ! This is not yet supported.
  3720. !</DESCRIPTION>
  3721. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3722. IMPLICIT NONE
  3723. INTEGER , INTENT(IN) :: DataHandle
  3724. CHARACTER*(*) , INTENT(IN) :: Element
  3725. CHARACTER*(*) , INTENT(IN) :: DateStr
  3726. logical :: Data(*)
  3727. INTEGER , INTENT(IN) :: Count
  3728. INTEGER :: OutCount
  3729. INTEGER :: Status
  3730. #endif
  3731. RETURN
  3732. END SUBROUTINE wrf_quilt_get_dom_td_logical
  3733. SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Status )
  3734. !<DESCRIPTION>
  3735. ! Instruct the I/O quilt servers to write Count words of time dependent
  3736. ! domain metadata named "Element" valid at time DateStr
  3737. ! to the open dataset described by DataHandle.
  3738. ! Metadata of type logical are
  3739. ! copied from array Data.
  3740. ! This routine is called only by client (compute) tasks.
  3741. !
  3742. ! This is not yet supported.
  3743. !</DESCRIPTION>
  3744. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3745. IMPLICIT NONE
  3746. INTEGER , INTENT(IN) :: DataHandle
  3747. CHARACTER*(*) , INTENT(IN) :: Element
  3748. CHARACTER*(*) , INTENT(IN) :: DateStr
  3749. logical , INTENT(IN) :: Data(*)
  3750. INTEGER , INTENT(IN) :: Count
  3751. INTEGER :: Status
  3752. #endif
  3753. RETURN
  3754. END SUBROUTINE wrf_quilt_put_dom_td_logical
  3755. SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
  3756. !<DESCRIPTION>
  3757. ! Instruct the I/O quilt servers to attempt to read time dependent
  3758. ! domain metadata named "Element" valid at time DateStr
  3759. ! from the open dataset described by DataHandle.
  3760. ! Metadata of type char are
  3761. ! stored in string Data.
  3762. ! This routine is called only by client (compute) tasks.
  3763. !
  3764. ! This is not yet supported.
  3765. !</DESCRIPTION>
  3766. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3767. IMPLICIT NONE
  3768. INTEGER , INTENT(IN) :: DataHandle
  3769. CHARACTER*(*) , INTENT(IN) :: Element
  3770. CHARACTER*(*) , INTENT(IN) :: DateStr
  3771. CHARACTER*(*) :: Data
  3772. INTEGER :: Status
  3773. #endif
  3774. RETURN
  3775. END SUBROUTINE wrf_quilt_get_dom_td_char
  3776. SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
  3777. !<DESCRIPTION>
  3778. ! Instruct $he I/O quilt servers to write time dependent
  3779. ! domain metadata named "Element" valid at time DateStr
  3780. ! to the open dataset described by DataHandle.
  3781. ! Metadata of type char are
  3782. ! copied from string Data.
  3783. ! This routine is called only by client (compute) tasks.
  3784. !
  3785. ! This is not yet supported.
  3786. !</DESCRIPTION>
  3787. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3788. IMPLICIT NONE
  3789. INTEGER , INTENT(IN) :: DataHandle
  3790. CHARACTER*(*) , INTENT(IN) :: Element
  3791. CHARACTER*(*) , INTENT(IN) :: DateStr
  3792. CHARACTER*(*) , INTENT(IN) :: Data
  3793. INTEGER :: Status
  3794. #endif
  3795. RETURN
  3796. END SUBROUTINE wrf_quilt_put_dom_td_char
  3797. SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
  3798. !<DESCRIPTION>
  3799. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3800. ! independent attribute "Element" of variable "Varname"
  3801. ! from the open dataset described by DataHandle.
  3802. ! Attribute of type real is
  3803. ! stored in array Data.
  3804. ! Actual number of words read is returned in OutCount.
  3805. ! This routine is called only by client (compute) tasks.
  3806. !
  3807. ! This is not yet supported.
  3808. !</DESCRIPTION>
  3809. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3810. IMPLICIT NONE
  3811. INTEGER , INTENT(IN) :: DataHandle
  3812. CHARACTER*(*) , INTENT(IN) :: Element
  3813. CHARACTER*(*) , INTENT(IN) :: VarName
  3814. real :: Data(*)
  3815. INTEGER , INTENT(IN) :: Count
  3816. INTEGER :: OutCount
  3817. INTEGER :: Status
  3818. #endif
  3819. RETURN
  3820. END SUBROUTINE wrf_quilt_get_var_ti_real
  3821. SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element, Varname, Data, Count, Status )
  3822. !<DESCRIPTION>
  3823. ! Instruct the I/O quilt servers to write Count words of time independent
  3824. ! attribute "Element" of variable "Varname"
  3825. ! to the open dataset described by DataHandle.
  3826. ! Attribute of type real is
  3827. ! copied from array Data.
  3828. ! This routine is called only by client (compute) tasks.
  3829. !
  3830. ! This is not yet supported.
  3831. !</DESCRIPTION>
  3832. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3833. IMPLICIT NONE
  3834. INTEGER , INTENT(IN) :: DataHandle
  3835. CHARACTER*(*) , INTENT(IN) :: Element
  3836. CHARACTER*(*) , INTENT(IN) :: VarName
  3837. real , INTENT(IN) :: Data(*)
  3838. INTEGER , INTENT(IN) :: Count
  3839. INTEGER :: Status
  3840. #endif
  3841. RETURN
  3842. END SUBROUTINE wrf_quilt_put_var_ti_real
  3843. SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
  3844. !<DESCRIPTION>
  3845. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3846. ! independent attribute "Element" of variable "Varname"
  3847. ! from the open dataset described by DataHandle.
  3848. ! Attribute of type double is
  3849. ! stored in array Data.
  3850. ! Actual number of words read is returned in OutCount.
  3851. ! This routine is called only by client (compute) tasks.
  3852. !
  3853. ! This is not yet supported.
  3854. !</DESCRIPTION>
  3855. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3856. IMPLICIT NONE
  3857. INTEGER , INTENT(IN) :: DataHandle
  3858. CHARACTER*(*) , INTENT(IN) :: Element
  3859. CHARACTER*(*) , INTENT(IN) :: VarName
  3860. real*8 :: Data(*)
  3861. INTEGER , INTENT(IN) :: Count
  3862. INTEGER :: OutCount
  3863. INTEGER :: Status
  3864. #endif
  3865. CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
  3866. RETURN
  3867. END SUBROUTINE wrf_quilt_get_var_ti_double
  3868. SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element, Varname, Data, Count, Status )
  3869. !<DESCRIPTION>
  3870. ! Instruct the I/O quilt servers to write Count words of time independent
  3871. ! attribute "Element" of variable "Varname"
  3872. ! to the open dataset described by DataHandle.
  3873. ! Attribute of type double is
  3874. ! copied from array Data.
  3875. ! This routine is called only by client (compute) tasks.
  3876. !
  3877. ! This is not yet supported.
  3878. !</DESCRIPTION>
  3879. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3880. IMPLICIT NONE
  3881. INTEGER , INTENT(IN) :: DataHandle
  3882. CHARACTER*(*) , INTENT(IN) :: Element
  3883. CHARACTER*(*) , INTENT(IN) :: VarName
  3884. real*8 , INTENT(IN) :: Data(*)
  3885. INTEGER , INTENT(IN) :: Count
  3886. INTEGER :: Status
  3887. #endif
  3888. CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
  3889. RETURN
  3890. END SUBROUTINE wrf_quilt_put_var_ti_double
  3891. SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
  3892. !<DESCRIPTION>
  3893. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3894. ! independent attribute "Element" of variable "Varname"
  3895. ! from the open dataset described by DataHandle.
  3896. ! Attribute of type integer is
  3897. ! stored in array Data.
  3898. ! Actual number of words read is returned in OutCount.
  3899. ! This routine is called only by client (compute) tasks.
  3900. !
  3901. ! This is not yet supported.
  3902. !</DESCRIPTION>
  3903. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3904. IMPLICIT NONE
  3905. INTEGER , INTENT(IN) :: DataHandle
  3906. CHARACTER*(*) , INTENT(IN) :: Element
  3907. CHARACTER*(*) , INTENT(IN) :: VarName
  3908. integer :: Data(*)
  3909. INTEGER , INTENT(IN) :: Count
  3910. INTEGER :: OutCount
  3911. INTEGER :: Status
  3912. #endif
  3913. RETURN
  3914. END SUBROUTINE wrf_quilt_get_var_ti_integer
  3915. SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Status )
  3916. !<DESCRIPTION>
  3917. ! Instruct the I/O quilt servers to write Count words of time independent
  3918. ! attribute "Element" of variable "Varname"
  3919. ! to the open dataset described by DataHandle.
  3920. ! Attribute of type integer is
  3921. ! copied from array Data.
  3922. ! This routine is called only by client (compute) tasks.
  3923. !
  3924. ! This is not yet supported.
  3925. !</DESCRIPTION>
  3926. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3927. IMPLICIT NONE
  3928. INTEGER , INTENT(IN) :: DataHandle
  3929. CHARACTER*(*) , INTENT(IN) :: Element
  3930. CHARACTER*(*) , INTENT(IN) :: VarName
  3931. integer , INTENT(IN) :: Data(*)
  3932. INTEGER , INTENT(IN) :: Count
  3933. INTEGER :: Status
  3934. #endif
  3935. RETURN
  3936. END SUBROUTINE wrf_quilt_put_var_ti_integer
  3937. SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
  3938. !<DESCRIPTION>
  3939. ! Instruct the I/O quilt servers to attempt to read Count words of time
  3940. ! independent attribute "Element" of variable "Varname"
  3941. ! from the open dataset described by DataHandle.
  3942. ! Attribute of type logical is
  3943. ! stored in array Data.
  3944. ! Actual number of words read is returned in OutCount.
  3945. ! This routine is called only by client (compute) tasks.
  3946. !
  3947. ! This is not yet supported.
  3948. !</DESCRIPTION>
  3949. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3950. IMPLICIT NONE
  3951. INTEGER , INTENT(IN) :: DataHandle
  3952. CHARACTER*(*) , INTENT(IN) :: Element
  3953. CHARACTER*(*) , INTENT(IN) :: VarName
  3954. logical :: Data(*)
  3955. INTEGER , INTENT(IN) :: Count
  3956. INTEGER :: OutCount
  3957. INTEGER :: Status
  3958. #endif
  3959. RETURN
  3960. END SUBROUTINE wrf_quilt_get_var_ti_logical
  3961. SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Status )
  3962. !<DESCRIPTION>
  3963. ! Instruct the I/O quilt servers to write Count words of time independent
  3964. ! attribute "Element" of variable "Varname"
  3965. ! to the open dataset described by DataHandle.
  3966. ! Attribute of type logical is
  3967. ! copied from array Data.
  3968. ! This routine is called only by client (compute) tasks.
  3969. !
  3970. ! This is not yet supported.
  3971. !</DESCRIPTION>
  3972. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3973. IMPLICIT NONE
  3974. INTEGER , INTENT(IN) :: DataHandle
  3975. CHARACTER*(*) , INTENT(IN) :: Element
  3976. CHARACTER*(*) , INTENT(IN) :: VarName
  3977. logical , INTENT(IN) :: Data(*)
  3978. INTEGER , INTENT(IN) :: Count
  3979. INTEGER :: Status
  3980. #endif
  3981. RETURN
  3982. END SUBROUTINE wrf_quilt_put_var_ti_logical
  3983. SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element, Varname, Data, Status )
  3984. !<DESCRIPTION>
  3985. ! Instruct the I/O quilt servers to attempt to read time independent
  3986. ! attribute "Element" of variable "Varname"
  3987. ! from the open dataset described by DataHandle.
  3988. ! Attribute of type char is
  3989. ! stored in string Data.
  3990. ! This routine is called only by client (compute) tasks.
  3991. !
  3992. ! This is not yet supported.
  3993. !</DESCRIPTION>
  3994. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  3995. IMPLICIT NONE
  3996. INTEGER , INTENT(IN) :: DataHandle
  3997. CHARACTER*(*) , INTENT(IN) :: Element
  3998. CHARACTER*(*) , INTENT(IN) :: VarName
  3999. CHARACTER*(*) :: Data
  4000. INTEGER :: Status
  4001. #endif
  4002. RETURN
  4003. END SUBROUTINE wrf_quilt_get_var_ti_char
  4004. SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element, Varname, Data, Status )
  4005. !<DESCRIPTION>
  4006. ! Instruct the I/O quilt servers to write time independent
  4007. ! attribute "Element" of variable "Varname"
  4008. ! to the open dataset described by DataHandle.
  4009. ! Attribute of type char is
  4010. ! copied from string Data.
  4011. ! This routine is called only by client (compute) tasks.
  4012. !</DESCRIPTION>
  4013. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4014. USE module_wrf_quilt
  4015. IMPLICIT NONE
  4016. INCLUDE 'mpif.h'
  4017. #include "intio_tags.h"
  4018. INTEGER , INTENT(IN) :: DataHandle
  4019. CHARACTER*(*) , INTENT(IN) :: Element
  4020. CHARACTER*(*) , INTENT(IN) :: VarName
  4021. CHARACTER*(*) , INTENT(IN) :: Data
  4022. INTEGER :: Status
  4023. INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
  4024. REAL dummy
  4025. !
  4026. !!JMTIMING CALL start_timing
  4027. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
  4028. IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
  4029. IF ( int_handle_in_use( DataHandle ) ) THEN
  4030. CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
  4031. #ifdef PNETCDF_QUILT
  4032. IF ( compute_group_master(1) ) THEN
  4033. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  4034. DataHandle, TRIM(Element), &
  4035. TRIM(VarName), TRIM(Data), int_var_ti_char )
  4036. ELSE
  4037. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  4038. ENDIF
  4039. #else
  4040. IF ( wrf_dm_on_monitor() ) THEN
  4041. CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
  4042. DataHandle, TRIM(Element), &
  4043. TRIM(VarName), TRIM(Data), int_var_ti_char )
  4044. ELSE
  4045. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  4046. ENDIF
  4047. #endif
  4048. iserver = get_server_id ( DataHandle )
  4049. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  4050. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  4051. !!JMTIMING CALL start_timing
  4052. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  4053. reduced = 0
  4054. reduced(1) = hdrbufsize
  4055. #ifdef PNETCDF_QUILT
  4056. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  4057. #else
  4058. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  4059. #endif
  4060. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  4061. !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
  4062. ! send data to the i/o processor
  4063. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  4064. onebyte, &
  4065. hdrbuf, hdrbufsize , &
  4066. dummy, 0 )
  4067. ENDIF
  4068. ENDIF
  4069. !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char" )
  4070. #endif
  4071. RETURN
  4072. END SUBROUTINE wrf_quilt_put_var_ti_char
  4073. SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
  4074. !<DESCRIPTION>
  4075. ! Instruct the I/O quilt servers to attempt to read Count words of time
  4076. ! dependent attribute "Element" of variable "Varname" valid at time DateStr
  4077. ! from the open dataset described by DataHandle.
  4078. ! Attribute of type real is
  4079. ! stored in array Data.
  4080. ! Actual number of words read is returned in OutCount.
  4081. ! This routine is called only by client (compute) tasks.
  4082. !
  4083. ! This is not yet supported.
  4084. !</DESCRIPTION>
  4085. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4086. IMPLICIT NONE
  4087. INTEGER , INTENT(IN) :: DataHandle
  4088. CHARACTER*(*) , INTENT(IN) :: Element
  4089. CHARACTER*(*) , INTENT(IN) :: DateStr
  4090. CHARACTER*(*) , INTENT(IN) :: VarName
  4091. real :: Data(*)
  4092. INTEGER , INTENT(IN) :: Count
  4093. INTEGER :: OutCount
  4094. INTEGER :: Status
  4095. #endif
  4096. RETURN
  4097. END SUBROUTINE wrf_quilt_get_var_td_real
  4098. SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
  4099. !<DESCRIPTION>
  4100. ! Instruct the I/O quilt servers to write Count words of time dependent
  4101. ! attribute "Element" of variable "Varname" valid at time DateStr
  4102. ! to the open dataset described by DataHandle.
  4103. ! Attribute of type real is
  4104. ! copied from array Data.
  4105. ! This routine is called only by client (compute) tasks.
  4106. !
  4107. ! This is not yet supported.
  4108. !</DESCRIPTION>
  4109. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4110. IMPLICIT NONE
  4111. INTEGER , INTENT(IN) :: DataHandle
  4112. CHARACTER*(*) , INTENT(IN) :: Element
  4113. CHARACTER*(*) , INTENT(IN) :: DateStr
  4114. CHARACTER*(*) , INTENT(IN) :: VarName
  4115. real , INTENT(IN) :: Data(*)
  4116. INTEGER , INTENT(IN) :: Count
  4117. INTEGER :: Status
  4118. #endif
  4119. RETURN
  4120. END SUBROUTINE wrf_quilt_put_var_td_real
  4121. SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
  4122. !<DESCRIPTION>
  4123. ! Instruct the I/O quilt servers to attempt to read Count words of time
  4124. ! dependent attribute "Element" of variable "Varname" valid at time DateStr
  4125. ! from the open dataset described by DataHandle.
  4126. ! Attribute of type double is
  4127. ! stored in array Data.
  4128. ! Actual number of words read is returned in OutCount.
  4129. ! This routine is called only by client (compute) tasks.
  4130. !
  4131. ! This is not yet supported.
  4132. !</DESCRIPTION>
  4133. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4134. IMPLICIT NONE
  4135. INTEGER , INTENT(IN) :: DataHandle
  4136. CHARACTER*(*) , INTENT(IN) :: Element
  4137. CHARACTER*(*) , INTENT(IN) :: DateStr
  4138. CHARACTER*(*) , INTENT(IN) :: VarName
  4139. real*8 :: Data(*)
  4140. INTEGER , INTENT(IN) :: Count
  4141. INTEGER :: OutCount
  4142. INTEGER :: Status
  4143. #endif
  4144. CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
  4145. RETURN
  4146. END SUBROUTINE wrf_quilt_get_var_td_double
  4147. SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
  4148. !<DESCRIPTION>
  4149. ! Instruct the I/O quilt servers to write Count words of time dependent
  4150. ! attribute "Element" of variable "Varname" valid at time DateStr
  4151. ! to the open dataset described by DataHandle.
  4152. ! Attribute of type double is
  4153. ! copied from array Data.
  4154. ! This routine is called only by client (compute) tasks.
  4155. !
  4156. ! This is not yet supported.
  4157. !</DESCRIPTION>
  4158. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4159. IMPLICIT NONE
  4160. INTEGER , INTENT(IN) :: DataHandle
  4161. CHARACTER*(*) , INTENT(IN) :: Element
  4162. CHARACTER*(*) , INTENT(IN) :: DateStr
  4163. CHARACTER*(*) , INTENT(IN) :: VarName
  4164. real*8 , INTENT(IN) :: Data(*)
  4165. INTEGER , INTENT(IN) :: Count
  4166. INTEGER :: Status
  4167. #endif
  4168. CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
  4169. RETURN
  4170. END SUBROUTINE wrf_quilt_put_var_td_double
  4171. SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount,Status)
  4172. !<DESCRIPTION>
  4173. ! Instruct the I/O quilt servers to attempt to read Count words of time
  4174. ! dependent attribute "Element" of variable "Varname" valid at time DateStr
  4175. ! from the open dataset described by DataHandle.
  4176. ! Attribute of type integer is
  4177. ! stored in array Data.
  4178. ! Actual number of words read is returned in OutCount.
  4179. ! This routine is called only by client (compute) tasks.
  4180. !
  4181. ! This is not yet supported.
  4182. !</DESCRIPTION>
  4183. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4184. IMPLICIT NONE
  4185. INTEGER , INTENT(IN) :: DataHandle
  4186. CHARACTER*(*) , INTENT(IN) :: Element
  4187. CHARACTER*(*) , INTENT(IN) :: DateStr
  4188. CHARACTER*(*) , INTENT(IN) :: VarName
  4189. integer :: Data(*)
  4190. INTEGER , INTENT(IN) :: Count
  4191. INTEGER :: OutCount
  4192. INTEGER :: Status
  4193. #endif
  4194. RETURN
  4195. END SUBROUTINE wrf_quilt_get_var_td_integer
  4196. SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
  4197. !<DESCRIPTION>
  4198. ! Instruct the I/O quilt servers to write Count words of time dependent
  4199. ! attribute "Element" of variable "Varname" valid at time DateStr
  4200. ! to the open dataset described by DataHandle.
  4201. ! Attribute of type integer is
  4202. ! copied from array Data.
  4203. ! This routine is called only by client (compute) tasks.
  4204. !
  4205. ! This is not yet supported.
  4206. !</DESCRIPTION>
  4207. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4208. IMPLICIT NONE
  4209. INTEGER , INTENT(IN) :: DataHandle
  4210. CHARACTER*(*) , INTENT(IN) :: Element
  4211. CHARACTER*(*) , INTENT(IN) :: DateStr
  4212. CHARACTER*(*) , INTENT(IN) :: VarName
  4213. integer , INTENT(IN) :: Data(*)
  4214. INTEGER , INTENT(IN) :: Count
  4215. INTEGER :: Status
  4216. #endif
  4217. RETURN
  4218. END SUBROUTINE wrf_quilt_put_var_td_integer
  4219. SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
  4220. !<DESCRIPTION>
  4221. ! Instruct the I/O quilt servers to attempt to read Count words of time
  4222. ! dependent attribute "Element" of variable "Varname" valid at time DateStr
  4223. ! from the open dataset described by DataHandle.
  4224. ! Attribute of type logical is
  4225. ! stored in array Data.
  4226. ! Actual number of words read is returned in OutCount.
  4227. ! This routine is called only by client (compute) tasks.
  4228. !
  4229. ! This is not yet supported.
  4230. !</DESCRIPTION>
  4231. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4232. IMPLICIT NONE
  4233. INTEGER , INTENT(IN) :: DataHandle
  4234. CHARACTER*(*) , INTENT(IN) :: Element
  4235. CHARACTER*(*) , INTENT(IN) :: DateStr
  4236. CHARACTER*(*) , INTENT(IN) :: VarName
  4237. logical :: Data(*)
  4238. INTEGER , INTENT(IN) :: Count
  4239. INTEGER :: OutCount
  4240. INTEGER :: Status
  4241. #endif
  4242. RETURN
  4243. END SUBROUTINE wrf_quilt_get_var_td_logical
  4244. SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
  4245. !<DESCRIPTION>
  4246. ! Instruct the I/O quilt servers to write Count words of time dependent
  4247. ! attribute "Element" of variable "Varname" valid at time DateStr
  4248. ! to the open dataset described by DataHandle.
  4249. ! Attribute of type logical is
  4250. ! copied from array Data.
  4251. ! This routine is called only by client (compute) tasks.
  4252. !
  4253. ! This is not yet supported.
  4254. !</DESCRIPTION>
  4255. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4256. IMPLICIT NONE
  4257. INTEGER , INTENT(IN) :: DataHandle
  4258. CHARACTER*(*) , INTENT(IN) :: Element
  4259. CHARACTER*(*) , INTENT(IN) :: DateStr
  4260. CHARACTER*(*) , INTENT(IN) :: VarName
  4261. logical , INTENT(IN) :: Data(*)
  4262. INTEGER , INTENT(IN) :: Count
  4263. INTEGER :: Status
  4264. #endif
  4265. RETURN
  4266. END SUBROUTINE wrf_quilt_put_var_td_logical
  4267. SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
  4268. !<DESCRIPTION>
  4269. ! Instruct the I/O quilt servers to attempt to read time dependent
  4270. ! attribute "Element" of variable "Varname" valid at time DateStr
  4271. ! from the open dataset described by DataHandle.
  4272. ! Attribute of type char is
  4273. ! stored in string Data.
  4274. ! This routine is called only by client (compute) tasks.
  4275. !
  4276. ! This is not yet supported.
  4277. !</DESCRIPTION>
  4278. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4279. IMPLICIT NONE
  4280. INTEGER , INTENT(IN) :: DataHandle
  4281. CHARACTER*(*) , INTENT(IN) :: Element
  4282. CHARACTER*(*) , INTENT(IN) :: DateStr
  4283. CHARACTER*(*) , INTENT(IN) :: VarName
  4284. CHARACTER*(*) :: Data
  4285. INTEGER :: Status
  4286. #endif
  4287. RETURN
  4288. END SUBROUTINE wrf_quilt_get_var_td_char
  4289. SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
  4290. !<DESCRIPTION>
  4291. ! Instruct the I/O quilt servers to write time dependent
  4292. ! attribute "Element" of variable "Varname" valid at time DateStr
  4293. ! to the open dataset described by DataHandle.
  4294. ! Attribute of type char is
  4295. ! copied from string Data.
  4296. ! This routine is called only by client (compute) tasks.
  4297. !
  4298. ! This is not yet supported.
  4299. !</DESCRIPTION>
  4300. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4301. IMPLICIT NONE
  4302. INTEGER , INTENT(IN) :: DataHandle
  4303. CHARACTER*(*) , INTENT(IN) :: Element
  4304. CHARACTER*(*) , INTENT(IN) :: DateStr
  4305. CHARACTER*(*) , INTENT(IN) :: VarName
  4306. CHARACTER*(*) , INTENT(IN) :: Data
  4307. INTEGER :: Status
  4308. #endif
  4309. RETURN
  4310. END SUBROUTINE wrf_quilt_put_var_td_char
  4311. SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
  4312. DomainDesc , MemoryOrder , Stagger , DimNames , &
  4313. DomainStart , DomainEnd , &
  4314. MemoryStart , MemoryEnd , &
  4315. PatchStart , PatchEnd , &
  4316. Status )
  4317. !<DESCRIPTION>
  4318. ! Instruct the I/O quilt servers to read the variable named VarName from the
  4319. ! dataset pointed to by DataHandle.
  4320. ! This routine is called only by client (compute) tasks.
  4321. !
  4322. ! This is not yet supported.
  4323. !</DESCRIPTION>
  4324. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4325. IMPLICIT NONE
  4326. INTEGER , INTENT(IN) :: DataHandle
  4327. CHARACTER*(*) , INTENT(INOUT) :: DateStr
  4328. CHARACTER*(*) , INTENT(INOUT) :: VarName
  4329. INTEGER , INTENT(INOUT) :: Field(*)
  4330. integer ,intent(in) :: FieldType
  4331. integer ,intent(inout) :: Comm
  4332. integer ,intent(inout) :: IOComm
  4333. integer ,intent(in) :: DomainDesc
  4334. character*(*) ,intent(in) :: MemoryOrder
  4335. character*(*) ,intent(in) :: Stagger
  4336. character*(*) , dimension (*) ,intent(in) :: DimNames
  4337. integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
  4338. integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
  4339. integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
  4340. integer ,intent(out) :: Status
  4341. Status = 0
  4342. #endif
  4343. RETURN
  4344. END SUBROUTINE wrf_quilt_read_field
  4345. SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
  4346. DomainDesc , MemoryOrder , Stagger , DimNames , &
  4347. DomainStart , DomainEnd , &
  4348. MemoryStart , MemoryEnd , &
  4349. PatchStart , PatchEnd , &
  4350. Status )
  4351. !<DESCRIPTION>
  4352. ! Prepare instructions for the I/O quilt servers to write the variable named
  4353. ! VarName to the dataset pointed to by DataHandle.
  4354. !
  4355. ! During a "training" write this routine accumulates number and sizes of
  4356. ! messages that will be sent to the I/O server associated with this compute
  4357. ! (client) task.
  4358. !
  4359. ! During a "real" write, this routine begins by allocating
  4360. ! int_local_output_buffer if it has not already been allocated. Sizes
  4361. ! accumulated during "training" are used to determine how big
  4362. ! int_local_output_buffer must be. This routine then stores "int_field"
  4363. ! headers and associated field data in int_local_output_buffer. The contents
  4364. ! of int_local_output_buffer are actually sent to the I/O quilt server in
  4365. ! routine wrf_quilt_iosync(). This scheme allows output of multiple variables
  4366. ! to be aggregated into a single "iosync" operation.
  4367. ! This routine is called only by client (compute) tasks.
  4368. !</DESCRIPTION>
  4369. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4370. USE module_state_description
  4371. USE module_wrf_quilt
  4372. IMPLICIT NONE
  4373. INCLUDE 'mpif.h'
  4374. #include "wrf_io_flags.h"
  4375. INTEGER , INTENT(IN) :: DataHandle
  4376. CHARACTER*(*) , INTENT(IN) :: DateStr
  4377. CHARACTER*(*) , INTENT(IN) :: VarName
  4378. ! INTEGER , INTENT(IN) :: Field(*)
  4379. integer ,intent(in) :: FieldType
  4380. integer ,intent(inout) :: Comm
  4381. integer ,intent(inout) :: IOComm
  4382. integer ,intent(in) :: DomainDesc
  4383. character*(*) ,intent(in) :: MemoryOrder
  4384. character*(*) ,intent(in) :: Stagger
  4385. character*(*) , dimension (*) ,intent(in) :: DimNames
  4386. integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
  4387. integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
  4388. integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
  4389. integer ,intent(out) :: Status
  4390. integer ii,jj,kk,myrank
  4391. REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
  4392. MemoryStart(2):MemoryEnd(2), &
  4393. MemoryStart(3):MemoryEnd(3) ) :: Field
  4394. INTEGER locsize , typesize, itypesize
  4395. INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
  4396. INTEGER, EXTERNAL :: use_package
  4397. !!ARPTIMING CALL start_timing
  4398. CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
  4399. IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
  4400. CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
  4401. ENDIF
  4402. IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
  4403. CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
  4404. ENDIF
  4405. locsize = (PatchEnd(1)-PatchStart(1)+1)* &
  4406. (PatchEnd(2)-PatchStart(2)+1)* &
  4407. (PatchEnd(3)-PatchStart(3)+1)
  4408. CALL mpi_type_size( MPI_INTEGER, itypesize, ierr )
  4409. ! Note that the WRF_DOUBLE branch of this IF statement must come first since
  4410. ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
  4411. IF ( FieldType .EQ. WRF_DOUBLE ) THEN
  4412. CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
  4413. ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
  4414. CALL mpi_type_size( MPI_REAL, typesize, ierr )
  4415. ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
  4416. CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
  4417. ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
  4418. CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
  4419. ENDIF
  4420. IF ( .NOT. okay_to_write( DataHandle ) ) THEN
  4421. ! This is a "training" write.
  4422. ! it is not okay to actually write; what we do here is just "bookkeep": count up
  4423. ! the number and size of messages that we will output to io server associated with
  4424. ! this task
  4425. CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
  4426. DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
  4427. 333933 , MemoryOrder , Stagger , DimNames , & ! 333933 means training; magic number
  4428. DomainStart , DomainEnd , &
  4429. MemoryStart , MemoryEnd , &
  4430. PatchStart , PatchEnd )
  4431. int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
  4432. ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
  4433. iserver = get_server_id ( DataHandle )
  4434. !JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
  4435. CALL get_mpi_comm_io_groups( comm_io_group , iserver )
  4436. ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
  4437. CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
  4438. #if 0
  4439. IF ( .NOT. wrf_dm_on_monitor() ) THEN ! only one task in compute grid sends this message; send noops on others
  4440. CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
  4441. ENDIF
  4442. #endif
  4443. !!ARPTIMING CALL start_timing
  4444. ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
  4445. reduced = 0
  4446. reduced(1) = hdrbufsize
  4447. #ifdef PNETCDF_QUILT
  4448. IF ( compute_group_master(1) ) reduced(2) = DataHandle
  4449. #else
  4450. IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
  4451. #endif
  4452. CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
  4453. !!ARPTIMING CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
  4454. ! send data to the i/o processor
  4455. CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
  4456. onebyte, &
  4457. hdrbuf, hdrbufsize , &
  4458. dummy, 0 )
  4459. ELSE
  4460. IF ( .NOT. associated( int_local_output_buffer ) ) THEN
  4461. ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/itypesize ), Stat=ierr )
  4462. IF(ierr /= 0)THEN
  4463. CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: allocate of int_local_output_buffer failed" )
  4464. END IF
  4465. int_local_output_cursor = 1
  4466. ENDIF
  4467. iserver = get_server_id ( DataHandle )
  4468. !JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
  4469. ! This is NOT a "training" write. It is OK to write now.
  4470. CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
  4471. DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
  4472. 0 , MemoryOrder , Stagger , DimNames , & ! non-333933 means okay to write; magic number
  4473. DomainStart , DomainEnd , &
  4474. MemoryStart , MemoryEnd , &
  4475. PatchStart , PatchEnd )
  4476. ! Pack header into int_local_output_buffer. It will be sent to the
  4477. ! I/O servers during the next "iosync" operation.
  4478. #ifdef DEREF_KLUDGE
  4479. CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
  4480. #else
  4481. CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
  4482. #endif
  4483. ! Pack field data into int_local_output_buffer. It will be sent to the
  4484. ! I/O servers during the next "iosync" operation.
  4485. #ifdef DEREF_KLUDGE
  4486. CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
  4487. locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
  4488. #else
  4489. CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
  4490. locsize * typesize , int_local_output_buffer, int_local_output_cursor )
  4491. #endif
  4492. ENDIF
  4493. Status = 0
  4494. !!ARPTIMING CALL end_timing("wrf_quilt_write_field")
  4495. #endif
  4496. RETURN
  4497. END SUBROUTINE wrf_quilt_write_field
  4498. SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
  4499. DomainStart , DomainEnd , Status )
  4500. !<DESCRIPTION>
  4501. ! This routine applies only to a dataset that is open for read. It instructs
  4502. ! the I/O quilt servers to return information about variable VarName.
  4503. ! This routine is called only by client (compute) tasks.
  4504. !
  4505. ! This is not yet supported.
  4506. !</DESCRIPTION>
  4507. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4508. IMPLICIT NONE
  4509. integer ,intent(in) :: DataHandle
  4510. character*(*) ,intent(in) :: VarName
  4511. integer :: NDim
  4512. character*(*) :: MemoryOrder
  4513. character*(*) :: Stagger
  4514. integer ,dimension(*) :: DomainStart, DomainEnd
  4515. integer :: Status
  4516. #endif
  4517. RETURN
  4518. END SUBROUTINE wrf_quilt_get_var_info
  4519. subroutine wrf_quilt_find_server(iserver)
  4520. ! This routine is called by the compute processes when they need an
  4521. ! I/O server to write out a new file. Upon return, this routine will
  4522. ! set iserver to the next available I/O server group.
  4523. ! A mpi_recv to all of mpi_comm_avail is used to implement this, and
  4524. ! that recv will not return until an I/O server group calls
  4525. ! wrf_quilt_server_ready to signal that it is ready for a new file.
  4526. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4527. use module_wrf_quilt, only : in_avail, mpi_comm_avail, mpi_comm_local
  4528. implicit none
  4529. INCLUDE 'mpif.h'
  4530. integer, intent(inout) :: iserver
  4531. integer :: ierr
  4532. character(255) :: message
  4533. call wrf_message('Polling I/O servers...')
  4534. if(in_avail) then
  4535. call mpi_recv(iserver,1,MPI_INTEGER,MPI_ANY_SOURCE,0,mpi_comm_avail,MPI_STATUS_IGNORE,ierr)
  4536. if(ierr/=0) then
  4537. call wrf_error_fatal('mpi_recv failed in wrf_quilt_find_server')
  4538. endif
  4539. endif
  4540. call mpi_bcast(iserver,1,MPI_INTEGER,0,mpi_comm_local,ierr)
  4541. if(ierr/=0) then
  4542. call wrf_error_fatal('mpi_bcast failed in wrf_quilt_find_server')
  4543. endif
  4544. write(message,'("I/O server ",I0," is ready for operations.")') iserver
  4545. call wrf_message(message)
  4546. #endif
  4547. end subroutine wrf_quilt_find_server
  4548. subroutine wrf_quilt_server_ready()
  4549. ! This routine is called by the I/O server group's master process once the
  4550. ! I/O server group is done writing its current file, and is waiting for
  4551. ! a new one. This information is passed to the monitor process by a
  4552. ! blocking send from the I/O server master process to the monitor.
  4553. ! All processes in an I/O group must call this routine, and this routine
  4554. ! will not return (in any process) until the monitor process signals
  4555. ! that it wants the I/O server group to write a file. That signal is
  4556. ! sent in a call to wrf_quilt_find_server on the compute processes.
  4557. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4558. use module_wrf_quilt, only : mpi_comm_local, in_avail, availrank, mpi_comm_avail
  4559. implicit none
  4560. INCLUDE 'mpif.h'
  4561. integer :: ierr
  4562. write(0,*) 'Entering wrf_quilt_server_ready.'
  4563. call mpi_barrier(mpi_comm_local,ierr)
  4564. if(ierr/=0) then
  4565. call wrf_error_fatal('mpi_barrier failed in wrf_quilt_server_ready')
  4566. endif
  4567. if(in_avail) then
  4568. write(0,'("mpi_ssend ioserver=",I0," in wrf_quilt_server_ready")') availrank
  4569. call mpi_ssend(availrank,1,MPI_INTEGER,0,0,mpi_comm_avail,ierr)
  4570. if(ierr/=0) then
  4571. call wrf_error_fatal('mpi_ssend failed in wrf_quilt_server_ready')
  4572. endif
  4573. endif
  4574. call mpi_barrier(mpi_comm_local,ierr)
  4575. if(ierr/=0) then
  4576. call wrf_error_fatal('mpi_barrier failed in wrf_quilt_server_ready')
  4577. endif
  4578. write(0,*) 'Leaving wrf_quilt_server_ready.'
  4579. #endif
  4580. end subroutine wrf_quilt_server_ready
  4581. SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
  4582. !<DESCRIPTION>
  4583. ! This routine returns the compute+io communicator to which this
  4584. ! compute task belongs for I/O server group "isrvr".
  4585. ! This routine is called only by client (compute) tasks.
  4586. !</DESCRIPTION>
  4587. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4588. USE module_wrf_quilt
  4589. IMPLICIT NONE
  4590. INTEGER, INTENT(IN ) :: isrvr
  4591. INTEGER, INTENT(OUT) :: retval
  4592. retval = mpi_comm_io_groups(isrvr)
  4593. #endif
  4594. RETURN
  4595. END SUBROUTINE get_mpi_comm_io_groups
  4596. SUBROUTINE get_nio_tasks_in_group( retval )
  4597. !<DESCRIPTION>
  4598. ! This routine returns the number of I/O server tasks in each
  4599. ! I/O server group. It can be called by both clients and
  4600. ! servers.
  4601. !</DESCRIPTION>
  4602. #if defined( DM_PARALLEL ) && !defined( STUBMPI )
  4603. USE module_wrf_quilt
  4604. IMPLICIT NONE
  4605. INTEGER, INTENT(OUT) :: retval
  4606. retval = nio_tasks_in_group
  4607. #endif
  4608. RETURN
  4609. END SUBROUTINE get_nio_tasks_in_group
  4610. SUBROUTINE collect_on_comm_debug(file,line, comm_io_group, &
  4611. sze, &
  4612. hdrbuf, hdrbufsize , &
  4613. outbuf, outbufsize )
  4614. IMPLICIT NONE
  4615. CHARACTER*(*) file
  4616. INTEGER line
  4617. INTEGER comm_io_group
  4618. INTEGER sze
  4619. INTEGER hdrbuf(*), outbuf(*)
  4620. INTEGER hdrbufsize, outbufsize
  4621. !write(0,*)'collect_on_comm_debug ',trim(file),line,sze,hdrbufsize,outbufsize
  4622. CALL collect_on_comm( comm_io_group, &
  4623. sze, &
  4624. hdrbuf, hdrbufsize , &
  4625. outbuf, outbufsize )
  4626. !write(0,*)trim(file),line,'returning'
  4627. RETURN
  4628. END
  4629. SUBROUTINE collect_on_comm_debug2(file,line,var,tag,sz,hdr_rec_size, &
  4630. comm_io_group, &
  4631. sze, &
  4632. hdrbuf, hdrbufsize , &
  4633. outbuf, outbufsize )
  4634. IMPLICIT NONE
  4635. CHARACTER*(*) file,var
  4636. INTEGER line,tag,sz,hdr_rec_size
  4637. INTEGER comm_io_group
  4638. INTEGER sze
  4639. INTEGER hdrbuf(*), outbuf(*)
  4640. INTEGER hdrbufsize, outbufsize
  4641. ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,trim(var),tag,sz,hdr_rec_size,sze,hdrbufsize,outbufsize
  4642. CALL collect_on_comm( comm_io_group, &
  4643. sze, &
  4644. hdrbuf, hdrbufsize , &
  4645. outbuf, outbufsize )
  4646. ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,'returning for ',trim(var)
  4647. RETURN
  4648. END