PageRenderTime 52ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/wrfv2_fire/external/RSL_LITE/module_dm.F

http://github.com/jbeezley/wrf-fire
FORTRAN Legacy | 4562 lines | 3088 code | 503 blank | 971 comment | 19 complexity | f5d2014a5e852efe6c1288f8887dbdf5 MD5 | raw file
Possible License(s): AGPL-1.0

Large files files are truncated, but you can click here to view the full file

  1. !WRF:PACKAGE:RSL
  2. !
  3. MODULE module_dm
  4. USE module_machine
  5. USE module_wrf_error
  6. USE module_driver_constants
  7. ! USE module_comm_dm
  8. IMPLICIT NONE
  9. #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
  10. INTEGER, PARAMETER :: max_halo_width = 6
  11. #else
  12. INTEGER, PARAMETER :: max_halo_width = 6 ! 5
  13. #endif
  14. INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
  15. INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
  16. INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
  17. INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
  18. LOGICAL :: dm_debug_flag = .FALSE.
  19. INTERFACE wrf_dm_maxval
  20. #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
  21. MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
  22. #else
  23. MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
  24. #endif
  25. END INTERFACE
  26. INTERFACE wrf_dm_minval ! gopal's doing
  27. #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
  28. MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
  29. #else
  30. MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
  31. #endif
  32. END INTERFACE
  33. CONTAINS
  34. SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
  35. IMPLICIT NONE
  36. INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
  37. MINI = 2*P
  38. MINM = 1
  39. MINN = P
  40. DO M = 1, P
  41. IF ( MOD( P, M ) .EQ. 0 ) THEN
  42. N = P / M
  43. IF ( ABS(M-N) .LT. MINI &
  44. .AND. M .GE. PROCMIN_M &
  45. .AND. N .GE. PROCMIN_N &
  46. ) THEN
  47. MINI = ABS(M-N)
  48. MINM = M
  49. MINN = N
  50. ENDIF
  51. ENDIF
  52. ENDDO
  53. IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
  54. WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
  55. CALL wrf_message ( TRIM ( wrf_err_message ) )
  56. WRITE(0,*)' PROCMIN_M ', PROCMIN_M
  57. WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
  58. CALL wrf_message ( TRIM ( wrf_err_message ) )
  59. WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
  60. CALL wrf_message ( TRIM ( wrf_err_message ) )
  61. WRITE( wrf_err_message , * )' P ', P
  62. CALL wrf_message ( TRIM ( wrf_err_message ) )
  63. WRITE( wrf_err_message , * )' MINM ', MINM
  64. CALL wrf_message ( TRIM ( wrf_err_message ) )
  65. WRITE( wrf_err_message , * )' MINN ', MINN
  66. CALL wrf_message ( TRIM ( wrf_err_message ) )
  67. CALL wrf_error_fatal ( 'module_dm: mpaspect' )
  68. ENDIF
  69. RETURN
  70. END SUBROUTINE MPASPECT
  71. SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
  72. IMPLICIT NONE
  73. INTEGER, INTENT(IN) :: ntasks
  74. INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
  75. CALL nl_get_nproc_x ( 1, ntasks_x )
  76. CALL nl_get_nproc_y ( 1, ntasks_y )
  77. ! check if user has specified in the namelist
  78. IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
  79. ! if only ntasks_x is specified then make it 1-d decomp in i
  80. IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
  81. ntasks_y = ntasks / ntasks_x
  82. ! if only ntasks_y is specified then make it 1-d decomp in j
  83. ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
  84. ntasks_x = ntasks / ntasks_y
  85. ENDIF
  86. ! make sure user knows what they're doing
  87. IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
  88. WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
  89. CALL wrf_error_fatal ( wrf_err_message )
  90. ENDIF
  91. ELSE
  92. ! When neither is specified, work out mesh with MPASPECT
  93. ! Pass nproc_ln and nproc_nt so that number of procs in
  94. ! i-dim (nproc_ln) is equal or lesser.
  95. CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
  96. ENDIF
  97. END SUBROUTINE compute_mesh
  98. SUBROUTINE wrf_dm_initialize
  99. IMPLICIT NONE
  100. #ifndef STUBMPI
  101. INCLUDE 'mpif.h'
  102. INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
  103. INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
  104. INTEGER comdup
  105. INTEGER, DIMENSION(2) :: dims, coords
  106. LOGICAL, DIMENSION(2) :: isperiodic
  107. LOGICAL :: reorder_mesh
  108. CALL wrf_get_dm_communicator ( local_comm )
  109. CALL mpi_comm_size( local_comm, ntasks, ierr )
  110. CALL nl_get_reorder_mesh( 1, reorder_mesh )
  111. CALL compute_mesh( ntasks, ntasks_x, ntasks_y )
  112. WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
  113. CALL wrf_message( wrf_err_message )
  114. CALL mpi_comm_rank( local_comm, mytask, ierr )
  115. ! extra code to reorder the communicator 20051212jm
  116. IF ( reorder_mesh ) THEN
  117. ALLOCATE (ranks(ntasks))
  118. CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
  119. CALL mpi_comm_group ( local_comm2, group, ierr )
  120. DO p1=1,ntasks
  121. p = p1 - 1
  122. ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
  123. ENDDO
  124. CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
  125. DEALLOCATE (ranks)
  126. CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
  127. ELSE
  128. new_local_comm = local_comm
  129. ENDIF
  130. ! end extra code to reorder the communicator 20051212jm
  131. dims(1) = ntasks_y ! rows
  132. dims(2) = ntasks_x ! columns
  133. isperiodic(1) = .false.
  134. isperiodic(2) = .false.
  135. CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
  136. dims(1) = ntasks_y ! rows
  137. dims(2) = ntasks_x ! columns
  138. isperiodic(1) = .true.
  139. isperiodic(2) = .true.
  140. CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
  141. ! debug
  142. CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
  143. CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
  144. ! write(0,*)'periodic coords ',mytask, coords
  145. CALL mpi_comm_rank( local_communicator, mytask, ierr )
  146. CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
  147. ! write(0,*)'non periodic coords ',mytask, coords
  148. mytask_x = coords(2) ! col task (x)
  149. mytask_y = coords(1) ! row task (y)
  150. CALL nl_set_nproc_x ( 1, ntasks_x )
  151. CALL nl_set_nproc_y ( 1, ntasks_y )
  152. ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
  153. ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
  154. ! in other words, local_comm_x has all the processes with the same rank in Y
  155. CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
  156. IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
  157. CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
  158. IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
  159. CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
  160. IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
  161. ! end 20061228
  162. CALL wrf_set_dm_communicator ( local_communicator )
  163. #else
  164. ntasks = 1
  165. ntasks_x = 1
  166. ntasks_y = 1
  167. mytask = 0
  168. mytask_x = 0
  169. mytask_y = 0
  170. #endif
  171. RETURN
  172. END SUBROUTINE wrf_dm_initialize
  173. SUBROUTINE get_dm_max_halo_width( id, width )
  174. IMPLICIT NONE
  175. INTEGER, INTENT(IN) :: id
  176. INTEGER, INTENT(OUT) :: width
  177. IF ( id .EQ. 1 ) THEN ! this is coarse domain
  178. width = max_halo_width
  179. ELSE
  180. width = max_halo_width + 3
  181. ENDIF
  182. RETURN
  183. END SUBROUTINE get_dm_max_halo_width
  184. SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
  185. sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
  186. sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
  187. sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
  188. sp1x , ep1x , sm1x , em1x , &
  189. sp2x , ep2x , sm2x , em2x , &
  190. sp3x , ep3x , sm3x , em3x , &
  191. sp1y , ep1y , sm1y , em1y , &
  192. sp2y , ep2y , sm2y , em2y , &
  193. sp3y , ep3y , sm3y , em3y , &
  194. bdx , bdy )
  195. USE module_domain, ONLY : domain, head_grid, find_grid_by_id, alloc_space_field
  196. IMPLICIT NONE
  197. INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
  198. INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
  199. sm1 , em1 , sm2 , em2 , sm3 , em3
  200. INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
  201. sm1x , em1x , sm2x , em2x , sm3x , em3x
  202. INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
  203. sm1y , em1y , sm2y , em2y , sm3y , em3y
  204. INTEGER, INTENT(IN) :: id, parent_id
  205. TYPE(domain),POINTER :: parent
  206. ! Local variables
  207. INTEGER :: ids, ide, jds, jde, kds, kde
  208. INTEGER :: ims, ime, jms, jme, kms, kme
  209. INTEGER :: ips, ipe, jps, jpe, kps, kpe
  210. INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
  211. INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
  212. INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
  213. INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
  214. INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
  215. INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
  216. c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
  217. INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
  218. c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
  219. INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
  220. c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
  221. INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
  222. INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
  223. INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
  224. INTEGER :: idim , jdim , kdim , rem , a, b
  225. INTEGER :: i, j, ni, nj, Px, Py, P
  226. INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
  227. INTEGER :: shw
  228. INTEGER :: idim_cd, jdim_cd, ierr
  229. INTEGER :: max_dom
  230. TYPE(domain), POINTER :: intermediate_grid
  231. TYPE(domain), POINTER :: nest_grid
  232. CHARACTER*256 :: mess
  233. INTEGER parent_max_halo_width
  234. INTEGER thisdomain_max_halo_width
  235. SELECT CASE ( model_data_order )
  236. ! need to finish other cases
  237. CASE ( DATA_ORDER_ZXY )
  238. ids = sd2 ; ide = ed2
  239. jds = sd3 ; jde = ed3
  240. kds = sd1 ; kde = ed1
  241. CASE ( DATA_ORDER_XYZ )
  242. ids = sd1 ; ide = ed1
  243. jds = sd2 ; jde = ed2
  244. kds = sd3 ; kde = ed3
  245. CASE ( DATA_ORDER_XZY )
  246. ids = sd1 ; ide = ed1
  247. jds = sd3 ; jde = ed3
  248. kds = sd2 ; kde = ed2
  249. CASE ( DATA_ORDER_YXZ)
  250. ids = sd2 ; ide = ed2
  251. jds = sd1 ; jde = ed1
  252. kds = sd3 ; kde = ed3
  253. END SELECT
  254. CALL nl_get_max_dom( 1 , max_dom )
  255. CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
  256. IF ( id .GT. 1 ) THEN
  257. CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
  258. ENDIF
  259. CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
  260. ids, ide, jds, jde, kds, kde, &
  261. ims, ime, jms, jme, kms, kme, &
  262. imsx, imex, jmsx, jmex, kmsx, kmex, &
  263. imsy, imey, jmsy, jmey, kmsy, kmey, &
  264. ips, ipe, jps, jpe, kps, kpe, &
  265. ipsx, ipex, jpsx, jpex, kpsx, kpex, &
  266. ipsy, ipey, jpsy, jpey, kpsy, kpey )
  267. ! ensure that the every parent domain point has a full set of nested points under it
  268. ! even at the borders. Do this by making sure the number of nest points is a multiple of
  269. ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
  270. ! is the subject of the scatter gather comms with the parent
  271. IF ( id .GT. 1 ) THEN
  272. CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
  273. if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
  274. if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
  275. ENDIF
  276. SELECT CASE ( model_data_order )
  277. CASE ( DATA_ORDER_ZXY )
  278. sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
  279. sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
  280. sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
  281. sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
  282. sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
  283. sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
  284. sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
  285. sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
  286. sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
  287. CASE ( DATA_ORDER_ZYX )
  288. sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
  289. sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
  290. sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
  291. sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
  292. sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
  293. sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
  294. sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
  295. sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
  296. sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
  297. CASE ( DATA_ORDER_XYZ )
  298. sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
  299. sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
  300. sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
  301. sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
  302. sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
  303. sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
  304. sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
  305. sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
  306. sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
  307. CASE ( DATA_ORDER_YXZ)
  308. sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
  309. sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
  310. sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
  311. sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
  312. sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
  313. sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
  314. sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
  315. sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
  316. sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
  317. CASE ( DATA_ORDER_XZY )
  318. sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
  319. sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
  320. sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
  321. sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
  322. sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
  323. sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
  324. sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
  325. sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
  326. sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
  327. CASE ( DATA_ORDER_YZX )
  328. sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
  329. sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
  330. sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
  331. sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
  332. sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
  333. sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
  334. sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
  335. sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
  336. sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
  337. END SELECT
  338. IF ( id.EQ.1 ) THEN
  339. WRITE(wrf_err_message,*)'*************************************'
  340. CALL wrf_message( TRIM(wrf_err_message) )
  341. WRITE(wrf_err_message,*)'Parent domain'
  342. CALL wrf_message( TRIM(wrf_err_message) )
  343. WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
  344. CALL wrf_message( TRIM(wrf_err_message) )
  345. WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
  346. CALL wrf_message( TRIM(wrf_err_message) )
  347. WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
  348. CALL wrf_message( TRIM(wrf_err_message) )
  349. WRITE(wrf_err_message,*)'*************************************'
  350. CALL wrf_message( TRIM(wrf_err_message) )
  351. ENDIF
  352. IF ( id .GT. 1 ) THEN
  353. CALL nl_get_shw( id, shw )
  354. CALL nl_get_i_parent_start( id , i_parent_start )
  355. CALL nl_get_j_parent_start( id , j_parent_start )
  356. CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
  357. SELECT CASE ( model_data_order )
  358. CASE ( DATA_ORDER_ZXY )
  359. idim = ed2-sd2+1
  360. jdim = ed3-sd3+1
  361. kdim = ed1-sd1+1
  362. c_kds = sd1 ; c_kde = ed1
  363. CASE ( DATA_ORDER_ZYX )
  364. idim = ed3-sd3+1
  365. jdim = ed2-sd2+1
  366. kdim = ed1-sd1+1
  367. c_kds = sd1 ; c_kde = ed1
  368. CASE ( DATA_ORDER_XYZ )
  369. idim = ed1-sd1+1
  370. jdim = ed2-sd2+1
  371. kdim = ed3-sd3+1
  372. c_kds = sd3 ; c_kde = ed3
  373. CASE ( DATA_ORDER_YXZ)
  374. idim = ed2-sd2+1
  375. jdim = ed1-sd1+1
  376. kdim = ed3-sd3+1
  377. c_kds = sd3 ; c_kde = ed3
  378. CASE ( DATA_ORDER_XZY )
  379. idim = ed1-sd1+1
  380. jdim = ed3-sd3+1
  381. kdim = ed2-sd2+1
  382. c_kds = sd2 ; c_kde = ed2
  383. CASE ( DATA_ORDER_YZX )
  384. idim = ed3-sd3+1
  385. jdim = ed1-sd1+1
  386. kdim = ed2-sd2+1
  387. c_kds = sd2 ; c_kde = ed2
  388. END SELECT
  389. idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
  390. jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
  391. c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
  392. c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
  393. ! we want the intermediate domain to be decomposed the
  394. ! the same as the underlying nest. So try this:
  395. c_ips = -1
  396. nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
  397. ierr = 0
  398. DO i = c_ids, c_ide
  399. ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
  400. CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  401. 1, 1, ierr )
  402. IF ( Px .EQ. mytask_x ) THEN
  403. c_ipe = i
  404. IF ( c_ips .EQ. -1 ) c_ips = i
  405. ENDIF
  406. ENDDO
  407. IF ( ierr .NE. 0 ) THEN
  408. CALL tfp_message(__FILE__,__LINE__)
  409. ENDIF
  410. IF (c_ips .EQ. -1 ) THEN
  411. c_ipe = -1
  412. c_ips = 0
  413. ENDIF
  414. c_jps = -1
  415. ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
  416. ierr = 0
  417. DO j = c_jds, c_jde
  418. nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
  419. CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  420. 1, 1, ierr )
  421. IF ( Py .EQ. mytask_y ) THEN
  422. c_jpe = j
  423. IF ( c_jps .EQ. -1 ) c_jps = j
  424. ENDIF
  425. ENDDO
  426. IF ( ierr .NE. 0 ) THEN
  427. CALL tfp_message(__FILE__,__LINE__)
  428. ENDIF
  429. IF (c_jps .EQ. -1 ) THEN
  430. c_jpe = -1
  431. c_jps = 0
  432. ENDIF
  433. IF ( c_ips <= c_ipe ) THEN
  434. ! extend the patch dimensions out shw along edges of domain
  435. IF ( mytask_x .EQ. 0 ) THEN
  436. c_ips = c_ips - shw
  437. ENDIF
  438. IF ( mytask_x .EQ. ntasks_x-1 ) THEN
  439. c_ipe = c_ipe + shw
  440. ENDIF
  441. c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
  442. c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
  443. ELSE
  444. c_ims = 0
  445. c_ime = 0
  446. ENDIF
  447. ! handle j dims
  448. IF ( c_jps <= c_jpe ) THEN
  449. ! extend the patch dimensions out shw along edges of domain
  450. IF ( mytask_y .EQ. 0 ) THEN
  451. c_jps = c_jps - shw
  452. ENDIF
  453. IF ( mytask_y .EQ. ntasks_y-1 ) THEN
  454. c_jpe = c_jpe + shw
  455. ENDIF
  456. c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
  457. c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
  458. ! handle k dims
  459. ELSE
  460. c_jms = 0
  461. c_jme = 0
  462. ENDIF
  463. c_kps = 1
  464. c_kpe = c_kde
  465. c_kms = 1
  466. c_kme = c_kde
  467. WRITE(wrf_err_message,*)'*************************************'
  468. CALL wrf_message( TRIM(wrf_err_message) )
  469. WRITE(wrf_err_message,*)'Nesting domain'
  470. CALL wrf_message( TRIM(wrf_err_message) )
  471. WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
  472. CALL wrf_message( TRIM(wrf_err_message) )
  473. WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
  474. CALL wrf_message( TRIM(wrf_err_message) )
  475. WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
  476. CALL wrf_message( TRIM(wrf_err_message) )
  477. WRITE(wrf_err_message,*)'INTERMEDIATE domain'
  478. CALL wrf_message( TRIM(wrf_err_message) )
  479. WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
  480. CALL wrf_message( TRIM(wrf_err_message) )
  481. WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
  482. CALL wrf_message( TRIM(wrf_err_message) )
  483. WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
  484. CALL wrf_message( TRIM(wrf_err_message) )
  485. WRITE(wrf_err_message,*)'*************************************'
  486. CALL wrf_message( TRIM(wrf_err_message) )
  487. SELECT CASE ( model_data_order )
  488. CASE ( DATA_ORDER_ZXY )
  489. c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
  490. c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
  491. c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
  492. CASE ( DATA_ORDER_ZYX )
  493. c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
  494. c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
  495. c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
  496. CASE ( DATA_ORDER_XYZ )
  497. c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
  498. c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
  499. c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
  500. CASE ( DATA_ORDER_YXZ)
  501. c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
  502. c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
  503. c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
  504. CASE ( DATA_ORDER_XZY )
  505. c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
  506. c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
  507. c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
  508. CASE ( DATA_ORDER_YZX )
  509. c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
  510. c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
  511. c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
  512. END SELECT
  513. ALLOCATE ( intermediate_grid )
  514. ALLOCATE ( intermediate_grid%parents( max_parents ) )
  515. ALLOCATE ( intermediate_grid%nests( max_nests ) )
  516. intermediate_grid%allocated=.false.
  517. NULLIFY( intermediate_grid%sibling )
  518. DO i = 1, max_nests
  519. NULLIFY( intermediate_grid%nests(i)%ptr )
  520. ENDDO
  521. NULLIFY (intermediate_grid%next)
  522. NULLIFY (intermediate_grid%same_level)
  523. NULLIFY (intermediate_grid%i_start)
  524. NULLIFY (intermediate_grid%j_start)
  525. NULLIFY (intermediate_grid%i_end)
  526. NULLIFY (intermediate_grid%j_end)
  527. intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
  528. intermediate_grid%num_nests = 0
  529. intermediate_grid%num_siblings = 0
  530. intermediate_grid%num_parents = 1
  531. intermediate_grid%max_tiles = 0
  532. intermediate_grid%num_tiles_spec = 0
  533. CALL find_grid_by_id ( id, head_grid, nest_grid )
  534. nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
  535. intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
  536. intermediate_grid%num_parents = 1
  537. intermediate_grid%is_intermediate = .TRUE.
  538. SELECT CASE ( model_data_order )
  539. CASE ( DATA_ORDER_ZXY )
  540. intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
  541. intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
  542. CASE ( DATA_ORDER_ZYX )
  543. intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
  544. intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
  545. CASE ( DATA_ORDER_XYZ )
  546. intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
  547. intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
  548. CASE ( DATA_ORDER_YXZ)
  549. intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
  550. intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
  551. CASE ( DATA_ORDER_XZY )
  552. intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
  553. intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
  554. CASE ( DATA_ORDER_YZX )
  555. intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
  556. intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
  557. END SELECT
  558. intermediate_grid%nids = ids
  559. intermediate_grid%nide = ide
  560. intermediate_grid%njds = jds
  561. intermediate_grid%njde = jde
  562. c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
  563. c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
  564. intermediate_grid%sm31x = c_sm1x
  565. intermediate_grid%em31x = c_em1x
  566. intermediate_grid%sm32x = c_sm2x
  567. intermediate_grid%em32x = c_em2x
  568. intermediate_grid%sm33x = c_sm3x
  569. intermediate_grid%em33x = c_em3x
  570. intermediate_grid%sm31y = c_sm1y
  571. intermediate_grid%em31y = c_em1y
  572. intermediate_grid%sm32y = c_sm2y
  573. intermediate_grid%em32y = c_em2y
  574. intermediate_grid%sm33y = c_sm3y
  575. intermediate_grid%em33y = c_em3y
  576. #if ( defined(SGIALTIX) && (! defined(MOVE_NESTS) ) ) || ( defined(FUJITSU_FX10) && (! defined(MOVE_NESTS) ) )
  577. ! allocate space for the intermediate domain
  578. CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
  579. c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
  580. c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
  581. c_sp1, c_ep1, c_sp2, c_ep2, c_sp3, c_ep3, &
  582. c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
  583. c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
  584. c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
  585. c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
  586. #endif
  587. intermediate_grid%sd31 = c_sd1
  588. intermediate_grid%ed31 = c_ed1
  589. intermediate_grid%sp31 = c_sp1
  590. intermediate_grid%ep31 = c_ep1
  591. intermediate_grid%sm31 = c_sm1
  592. intermediate_grid%em31 = c_em1
  593. intermediate_grid%sd32 = c_sd2
  594. intermediate_grid%ed32 = c_ed2
  595. intermediate_grid%sp32 = c_sp2
  596. intermediate_grid%ep32 = c_ep2
  597. intermediate_grid%sm32 = c_sm2
  598. intermediate_grid%em32 = c_em2
  599. intermediate_grid%sd33 = c_sd3
  600. intermediate_grid%ed33 = c_ed3
  601. intermediate_grid%sp33 = c_sp3
  602. intermediate_grid%ep33 = c_ep3
  603. intermediate_grid%sm33 = c_sm3
  604. intermediate_grid%em33 = c_em3
  605. CALL med_add_config_info_to_grid ( intermediate_grid )
  606. intermediate_grid%dx = parent%dx
  607. intermediate_grid%dy = parent%dy
  608. intermediate_grid%dt = parent%dt
  609. ENDIF
  610. RETURN
  611. END SUBROUTINE patch_domain_rsl_lite
  612. SUBROUTINE compute_memory_dims_rsl_lite ( &
  613. id , maxhalowidth , &
  614. shw , bdx, bdy , &
  615. ids, ide, jds, jde, kds, kde, &
  616. ims, ime, jms, jme, kms, kme, &
  617. imsx, imex, jmsx, jmex, kmsx, kmex, &
  618. imsy, imey, jmsy, jmey, kmsy, kmey, &
  619. ips, ipe, jps, jpe, kps, kpe, &
  620. ipsx, ipex, jpsx, jpex, kpsx, kpex, &
  621. ipsy, ipey, jpsy, jpey, kpsy, kpey )
  622. IMPLICIT NONE
  623. INTEGER, INTENT(IN) :: id , maxhalowidth
  624. INTEGER, INTENT(IN) :: shw, bdx, bdy
  625. INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
  626. INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
  627. INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
  628. INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
  629. INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
  630. INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
  631. INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
  632. INTEGER Px, Py, P, i, j, k, ierr
  633. #if ( ! NMM_CORE == 1 )
  634. ! xy decomposition
  635. ips = -1
  636. j = jds
  637. ierr = 0
  638. DO i = ids, ide
  639. CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  640. 1, 1, ierr )
  641. IF ( Px .EQ. mytask_x ) THEN
  642. ipe = i
  643. IF ( ips .EQ. -1 ) ips = i
  644. ENDIF
  645. ENDDO
  646. IF ( ierr .NE. 0 ) THEN
  647. CALL tfp_message(__FILE__,__LINE__)
  648. ENDIF
  649. ! handle setting the memory dimensions where there are no X elements assigned to this proc
  650. IF (ips .EQ. -1 ) THEN
  651. ipe = -1
  652. ips = 0
  653. ENDIF
  654. jps = -1
  655. i = ids
  656. ierr = 0
  657. DO j = jds, jde
  658. CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  659. 1, 1, ierr )
  660. IF ( Py .EQ. mytask_y ) THEN
  661. jpe = j
  662. IF ( jps .EQ. -1 ) jps = j
  663. ENDIF
  664. ENDDO
  665. IF ( ierr .NE. 0 ) THEN
  666. CALL tfp_message(__FILE__,__LINE__)
  667. ENDIF
  668. ! handle setting the memory dimensions where there are no Y elements assigned to this proc
  669. IF (jps .EQ. -1 ) THEN
  670. jpe = -1
  671. jps = 0
  672. ENDIF
  673. !begin: wig; 12-Mar-2008
  674. ! This appears redundant with the conditionals above, but we get cases with only
  675. ! one of the directions being set to "missing" when turning off extra processors.
  676. ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
  677. IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
  678. ipe = -1
  679. ips = 0
  680. jpe = -1
  681. jps = 0
  682. ENDIF
  683. !end: wig; 12-Mar-2008
  684. !
  685. ! description of transpose decomposition strategy for RSL LITE. 20061231jm
  686. !
  687. ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
  688. ! XY corresponds to the dimension of the processor mesh, lower-case xyz
  689. ! corresponds to grid dimension.
  690. !
  691. ! xy zy zx
  692. !
  693. ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
  694. ! ^ ^
  695. ! | |
  696. ! +------------------+ <- this edge is costly; see below
  697. !
  698. ! The aim is to avoid all-to-all communication over whole
  699. ! communicator. Instead, when possible, use a transpose scheme that requires
  700. ! all-to-all within dimensional communicators; that is, communicators
  701. ! defined for the processes in a rank or column of the processor mesh. Note,
  702. ! however, it is not possible to create a ring of transposes between
  703. ! xy-yz-xz decompositions without at least one of the edges in the ring
  704. ! being fully all-to-all (in other words, one of the tranpose edges must
  705. ! rotate and not just transpose a plane of the model grid within the
  706. ! processor mesh). The issue is then, where should we put this costly edge
  707. ! in the tranpose scheme we chose? To avoid being completely arbitrary,
  708. ! we chose a scheme most natural for models that use parallel spectral
  709. ! transforms, where the costly edge is the one that goes from the xz to
  710. ! the xy decomposition. (May be implemented as just a two step transpose
  711. ! back through yz).
  712. !
  713. ! Additional notational convention, below. The 'x' or 'y' appended to the
  714. ! dimension start or end variable refers to which grid dimension is all
  715. ! on-processor in the given decomposition. That is ipsx and ipex are the
  716. ! start and end for the i-dimension in the zy decomposition where x is
  717. ! on-processor. ('z' is assumed for xy decomposition and not appended to
  718. ! the ips, ipe, etc. variable names).
  719. !
  720. ! XzYy decomposition
  721. kpsx = -1
  722. j = jds ;
  723. ierr = 0
  724. DO k = kds, kde
  725. CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  726. 1, 1, ierr )
  727. IF ( Px .EQ. mytask_x ) THEN
  728. kpex = k
  729. IF ( kpsx .EQ. -1 ) kpsx = k
  730. ENDIF
  731. ENDDO
  732. IF ( ierr .NE. 0 ) THEN
  733. CALL tfp_message(__FILE__,__LINE__)
  734. ENDIF
  735. ! handle case where no levels are assigned to this process
  736. ! no iterations. Do same for I and J. Need to handle memory alloc below.
  737. IF (kpsx .EQ. -1 ) THEN
  738. kpex = -1
  739. kpsx = 0
  740. ENDIF
  741. jpsx = -1
  742. k = kds ;
  743. ierr = 0
  744. DO j = jds, jde
  745. CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
  746. 1, 1, ierr )
  747. IF ( Py .EQ. mytask_y ) THEN
  748. jpex = j
  749. IF ( jpsx .EQ. -1 ) jpsx = j
  750. ENDIF
  751. ENDDO
  752. IF ( ierr .NE. 0 ) THEN
  753. CALL tfp_message(__FILE__,__LINE__)
  754. ENDIF
  755. IF (jpsx .EQ. -1 ) THEN
  756. jpex = -1
  757. jpsx = 0
  758. ENDIF
  759. !begin: wig; 12-Mar-2008
  760. ! This appears redundant with the conditionals above, but we get cases with only
  761. ! one of the directions being set to "missing" when turning off extra processors.
  762. ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
  763. IF (ipex .EQ. -1 .or. jpex .EQ. -1) THEN
  764. ipex = -1
  765. ipsx = 0
  766. jpex = -1
  767. jpsx = 0
  768. ENDIF
  769. !end: wig; 12-Mar-2008
  770. ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
  771. kpsy = kpsx ! same as above
  772. kpey = kpex ! same as above
  773. ipsy = -1
  774. k = kds ;
  775. ierr = 0
  776. DO i = ids, ide
  777. CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
  778. 1, 1, ierr ) ! x and y for proc mesh reversed
  779. IF ( Py .EQ. mytask_y ) THEN
  780. ipey = i
  781. IF ( ipsy .EQ. -1 ) ipsy = i
  782. ENDIF
  783. ENDDO
  784. IF ( ierr .NE. 0 ) THEN
  785. CALL tfp_message(__FILE__,__LINE__)
  786. ENDIF
  787. IF (ipsy .EQ. -1 ) THEN
  788. ipey = -1
  789. ipsy = 0
  790. ENDIF
  791. #else
  792. ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
  793. ! adjust decomposition to reflect. 20051020 JM
  794. ips = -1
  795. j = jds
  796. ierr = 0
  797. DO i = ids, ide-1
  798. CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
  799. 1, 1 , ierr )
  800. IF ( Px .EQ. mytask_x ) THEN
  801. ipe = i
  802. IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
  803. IF ( ips .EQ. -1 ) ips = i
  804. ENDIF
  805. ENDDO
  806. IF ( ierr .NE. 0 ) THEN
  807. CALL tfp_message(__FILE__,__LINE__)
  808. ENDIF
  809. jps = -1
  810. i = ids ;
  811. ierr = 0
  812. DO j = jds, jde-1
  813. CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
  814. 1 , 1 , ierr )
  815. IF ( Py .EQ. mytask_y ) THEN
  816. jpe = j
  817. IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
  818. IF ( jps .EQ. -1 ) jps = j
  819. ENDIF
  820. ENDDO
  821. IF ( ierr .NE. 0 ) THEN
  822. CALL tfp_message(__FILE__,__LINE__)
  823. ENDIF
  824. #endif
  825. ! extend the patch dimensions out shw along edges of domain
  826. IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
  827. IF ( mytask_x .EQ. 0 ) THEN
  828. ips = ips - shw
  829. ipsy = ipsy - shw
  830. ENDIF
  831. IF ( mytask_x .EQ. ntasks_x-1 ) THEN
  832. ipe = ipe + shw
  833. ipey = ipey + shw
  834. ENDIF
  835. IF ( mytask_y .EQ. 0 ) THEN
  836. jps = jps - shw
  837. jpsx = jpsx - shw
  838. ENDIF
  839. IF ( mytask_y .EQ. ntasks_y-1 ) THEN
  840. jpe = jpe + shw
  841. jpex = jpex + shw
  842. ENDIF
  843. ENDIF !wig; 11-Mar-2008
  844. kps = 1
  845. kpe = kde-kds+1
  846. kms = 1
  847. kme = kpe
  848. kmsx = kpsx
  849. kmex = kpex
  850. kmsy = kpsy
  851. kmey = kpey
  852. ! handle setting the memory dimensions where there are no levels assigned to this proc
  853. IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
  854. kmsx = 0
  855. kmex = 0
  856. ENDIF
  857. IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
  858. kmsy = 0
  859. kmey = 0
  860. ENDIF
  861. IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
  862. ims = 0
  863. ime = 0
  864. ELSE
  865. ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
  866. ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
  867. ENDIF
  868. imsx = ids
  869. imex = ide
  870. ipsx = imsx
  871. ipex = imex
  872. ! handle setting the memory dimensions where there are no Y elements assigned to this proc
  873. IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
  874. imsy = 0
  875. imey = 0
  876. ELSE
  877. imsy = ipsy
  878. imey = ipey
  879. ENDIF
  880. IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
  881. jms = 0
  882. jme = 0
  883. ELSE
  884. jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
  885. jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
  886. ENDIF
  887. jmsx = jpsx
  888. jmex = jpex
  889. jmsy = jds
  890. jmey = jde
  891. ! handle setting the memory dimensions where there are no X elements assigned to this proc
  892. IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
  893. jmsx = 0
  894. jmex = 0
  895. ELSE
  896. jpsy = jmsy
  897. jpey = jmey
  898. ENDIF
  899. END SUBROUTINE compute_memory_dims_rsl_lite
  900. ! internal, used below for switching the argument to MPI calls
  901. ! if reals are being autopromoted to doubles in the build of WRF
  902. INTEGER function getrealmpitype()
  903. #ifndef STUBMPI
  904. IMPLICIT NONE
  905. INCLUDE 'mpif.h'
  906. INTEGER rtypesize, dtypesize, ierr
  907. CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
  908. CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
  909. IF ( RWORDSIZE .EQ. rtypesize ) THEN
  910. getrealmpitype = MPI_REAL
  911. ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
  912. getrealmpitype = MPI_DOUBLE_PRECISION
  913. ELSE
  914. CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
  915. ENDIF
  916. #else
  917. ! required dummy initialization for function that is never called
  918. getrealmpitype = 1
  919. #endif
  920. RETURN
  921. END FUNCTION getrealmpitype
  922. REAL FUNCTION wrf_dm_max_real ( inval )
  923. IMPLICIT NONE
  924. #ifndef STUBMPI
  925. INCLUDE 'mpif.h'
  926. REAL inval, retval
  927. INTEGER ierr
  928. CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
  929. wrf_dm_max_real = retval
  930. #else
  931. REAL inval
  932. wrf_dm_max_real = inval
  933. #endif
  934. END FUNCTION wrf_dm_max_real
  935. REAL FUNCTION wrf_dm_min_real ( inval )
  936. IMPLICIT NONE
  937. #ifndef STUBMPI
  938. INCLUDE 'mpif.h'
  939. REAL inval, retval
  940. INTEGER ierr
  941. CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
  942. wrf_dm_min_real = retval
  943. #else
  944. REAL inval
  945. wrf_dm_min_real = inval
  946. #endif
  947. END FUNCTION wrf_dm_min_real
  948. SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
  949. IMPLICIT NONE
  950. INTEGER n
  951. REAL inval(*)
  952. REAL retval(*)
  953. #ifndef STUBMPI
  954. INCLUDE 'mpif.h'
  955. INTEGER ierr
  956. CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, local_communicator, ierr )
  957. #else
  958. retval(1:n) = inval(1:n)
  959. #endif
  960. END SUBROUTINE wrf_dm_min_reals
  961. REAL FUNCTION wrf_dm_sum_real ( inval )
  962. IMPLICIT NONE
  963. #ifndef STUBMPI
  964. INCLUDE 'mpif.h'
  965. REAL inval, retval
  966. INTEGER ierr
  967. CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
  968. wrf_dm_sum_real = retval
  969. #else
  970. REAL inval
  971. wrf_dm_sum_real = inval
  972. #endif
  973. END FUNCTION wrf_dm_sum_real
  974. SUBROUTINE wrf_dm_sum_reals (inval, retval)
  975. IMPLICIT NONE
  976. REAL, INTENT(IN) :: inval(:)
  977. REAL, INTENT(OUT) :: retval(:)
  978. #ifndef STUBMPI
  979. INCLUDE 'mpif.h'
  980. INTEGER ierr
  981. CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
  982. #else
  983. retval = inval
  984. #endif
  985. END SUBROUTINE wrf_dm_sum_reals
  986. INTEGER FUNCTION wrf_dm_sum_integer ( inval )
  987. IMPLICIT NONE
  988. #ifndef STUBMPI
  989. INCLUDE 'mpif.h'
  990. INTEGER inval, retval
  991. INTEGER ierr
  992. CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
  993. wrf_dm_sum_integer = retval
  994. #else
  995. INTEGER inval
  996. wrf_dm_sum_integer = inval
  997. #endif
  998. END FUNCTION wrf_dm_sum_integer
  999. #ifdef HWRF
  1000. SUBROUTINE wrf_dm_minloc_real ( val, lat, lon, z, idex, jdex )
  1001. IMPLICIT NONE
  1002. #ifndef STUBMPI
  1003. INCLUDE 'mpif.h'
  1004. REAL val, lat, lon, z
  1005. INTEGER idex, jdex, ierr
  1006. INTEGER dex(2)
  1007. REAL vll(4)
  1008. INTEGER dex_all (2,ntasks)
  1009. REAL vll_all(4,ntasks)
  1010. INTEGER i
  1011. vll= (/ val, lat, lon, z /)
  1012. dex(1) = idex ; dex(2) = jdex
  1013. CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
  1014. CALL mpi_allgather ( vll, 4, getrealmpitype(), vll_all , 4, getrealmpitype(), local_communicator, ierr )
  1015. val = vll_all(1,1) ; lat = vll_all(2,1)
  1016. lon = vll_all(3,1) ; z = vll_all(4,1)
  1017. idex = dex_all(1,1) ; jdex = dex_all(2,1)
  1018. DO i = 2, ntasks
  1019. IF ( vll_all(1,i) .LT. val ) THEN
  1020. val = vll_all(1,i)
  1021. lat = vll_all(2,i)
  1022. lon = vll_all(3,i)
  1023. z = vll_all(4,i)
  1024. idex = dex_all(1,i)
  1025. jdex = dex_all(2,i)
  1026. ENDIF
  1027. ENDDO
  1028. #else
  1029. REAL val,lat,lon,z
  1030. INTEGER idex, jdex, ierr
  1031. #endif
  1032. END SUBROUTINE wrf_dm_minloc_real
  1033. SUBROUTINE wrf_dm_maxloc_real ( val, lat, lon, z, idex, jdex )
  1034. IMPLICIT NONE
  1035. #ifndef STUBMPI
  1036. INCLUDE 'mpif.h'
  1037. REAL val, lat, lon, z
  1038. INTEGER idex, jdex, ierr
  1039. INTEGER dex(2)
  1040. REAL vll(4)
  1041. INTEGER dex_all (2,ntasks)
  1042. REAL vll_all(4,ntasks)
  1043. INTEGER i
  1044. vll= (/ val, lat, lon, z /)
  1045. dex(1) = idex ; dex(2) = jdex
  1046. CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
  1047. CALL mpi_allgather ( vll, 4, getrealmpitype(), vll_all , 4, getrealmpitype(), local_communicator, ierr )
  1048. val = vll_all(1,1) ; lat = vll_all(2,1)
  1049. lon = vll_all(3,1) ; z = vll_all(4,1)
  1050. idex = dex_all(1,1) ; jdex = dex_all(2,1)
  1051. DO i = 2, ntasks
  1052. IF ( vll_all(1,i) .GT. val ) THEN
  1053. val = vll_all(1,i)
  1054. lat = vll_all(2,i)
  1055. lon = vll_all(3,i)
  1056. z = vll_all(4,i)
  1057. idex = dex_all(1,i)
  1058. jdex = dex_all(2,i)
  1059. ENDIF
  1060. ENDDO
  1061. #else
  1062. REAL val,lat,lon,z
  1063. INTEGER idex, jdex, ierr
  1064. #endif
  1065. END SUBROUTINE wrf_dm_maxloc_real
  1066. #endif
  1067. INTEGER FUNCTION wrf_dm_bxor_integer ( inval )
  1068. IMPLICIT NONE
  1069. #ifndef STUBMPI
  1070. INCLUDE 'mpif.h'
  1071. INTEGER inval, retval
  1072. INTEGER ierr
  1073. CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_BXOR, local_communicator, ierr )
  1074. wrf_dm_bxor_integer = retval
  1075. #else
  1076. INTEGER inval
  1077. wrf_dm_bxor_integer = inval
  1078. #endif
  1079. END FUNCTION wrf_dm_bxor_integer
  1080. SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
  1081. IMPLICIT NONE
  1082. #ifndef STUBMPI
  1083. INCLUDE 'mpif.h'
  1084. REAL val, val_all( ntasks )
  1085. INTEGER idex, jdex, ierr
  1086. INTEGER dex(2)
  1087. INTEGER dex_all (2,ntasks)
  1088. INTEGER i
  1089. dex(1) = idex ; dex(2) = jdex
  1090. CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
  1091. CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
  1092. val = val_all(1)
  1093. idex = dex_all(1,1) ; jdex = dex_all(2,1)
  1094. DO i = 2, ntasks
  1095. IF ( val_all(i) .GT. val ) THEN
  1096. val = val_all(i)
  1097. idex = dex_all(1,i)
  1098. jdex = dex_all(2,i)
  1099. ENDIF
  1100. ENDDO
  1101. #else
  1102. REAL val
  1103. INTEGER idex, jdex, ierr
  1104. #endif
  1105. END SUBROUTINE wrf_dm_maxval_real
  1106. #ifndef PROMOTE_FLOAT
  1107. SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
  1108. IMPLICIT NONE
  1109. # ifndef STUBMPI
  1110. INCLUDE 'mpif.h'
  1111. DOUBLE PRECISION val, val_all( ntasks )
  1112. INTEGER idex, jdex, ierr
  1113. INTEGER dex(2)
  1114. INTEGER dex_all (2,ntasks)
  1115. INTEGER i
  1116. dex(1) = idex ; dex(2) = jdex
  1117. CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
  1118. CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
  1119. val = val_all(1)
  1120. idex = dex_all(1,1) ; jdex = dex_all(2,1)
  1121. DO i = 2, ntasks
  1122. IF ( val_all(i) .GT. val ) THEN
  1123. val = val_all(i)
  1124. idex = dex_all(1,i)
  1125. jdex = dex_all(2,i)
  1126. ENDIF
  1127. ENDDO
  1128. # else
  1129. DOUBLE PRECISION val
  1130. INTEGER idex, jdex, ierr
  1131. # endif
  1132. END SUBROUTINE wrf_dm_maxval_doubleprecision
  1133. #endif
  1134. SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
  1135. IMPLICIT NONE
  1136. #ifndef STUBMPI
  1137. INCLUDE 'mpif.h'
  1138. INTEGER val, val_all( ntasks )
  1139. INTEGER idex, jdex, ierr
  1140. INTEGER dex(2)
  1141. INTEGER dex_all (2,ntasks)
  1142. INTEGER i
  1143. dex(1) = idex ; dex(2) = jdex
  1144. CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
  1145. CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
  1146. val = val_all(1)
  1147. idex = dex_all(1,1) ; jdex = dex_all(2,1)
  1148. DO i = 2, ntasks
  1149. IF ( val_all(i) .GT. val ) THEN
  1150. val = val_all(i)
  1151. idex = dex_all(1,i)
  1152. jdex = dex_all(2,i)
  1153. ENDIF
  1154. ENDDO
  1155. #else
  1156. INTEGER val
  1157. INTEGER idex, jdex
  1158. #endif
  1159. END SUBROUTINE wrf_dm_maxval_integer
  1160. ! For HWRF some additional computation is required. This is gopal's doing
  1161. SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
  1162. IMPLICIT NONE
  1163. REAL val, val_all( ntasks )
  1164. INTEGER idex, jdex, ierr
  1165. INTEGER dex(2)
  1166. INTEGER dex_all (2,ntasks)
  1167. ! <DESCRIPTION>
  1168. ! Collective operation. Each processor calls passing a local value and its index; on return
  1169. ! all processors are passed back th

Large files files are truncated, but you can click here to view the full file