PageRenderTime 36ms CodeModel.GetById 1ms RepoModel.GetById 0ms app.codeStats 0ms

/WPS/geogrid/src/parallel_module.F

http://github.com/jbeezley/wrf-fire
FORTRAN Legacy | 1045 lines | 620 code | 158 blank | 267 comment | 66 complexity | 9173c9323b1fba5b8dc4177d30d82b6e MD5 | raw file
Possible License(s): AGPL-1.0
  1. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  2. ! MODULE PARALLEL_MODULE
  3. !
  4. ! This module provides routines for parallelizing.
  5. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  6. module parallel_module
  7. #ifdef _MPI
  8. include 'mpif.h'
  9. #endif
  10. integer, parameter :: IO_NODE = 0
  11. integer, parameter :: HALO_WIDTH = 3
  12. integer, pointer, dimension(:,:) :: processors, &
  13. proc_minx, proc_maxx, &
  14. proc_miny, proc_maxy
  15. integer :: nprocs, &
  16. my_proc_id, &
  17. nproc_x, nproc_y, &
  18. my_x, my_y, &
  19. my_minx, my_miny, my_maxx, my_maxy, &
  20. comm
  21. contains
  22. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  23. ! Name: parallel_start
  24. !
  25. ! Purpose: For MPI, the purpose of this routine is to basically set up
  26. ! a communicator for a rectangular mesh, and determine how many processors
  27. ! in the x and y directions there will be.
  28. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  29. subroutine parallel_start()
  30. implicit none
  31. ! Arguments
  32. ! Local variables
  33. #ifdef _MPI
  34. integer :: mpi_rank, mpi_size
  35. integer :: mpi_ierr
  36. integer, dimension(2) :: dims, coords
  37. integer :: rectangle, myleft, myright, mytop, mybottom
  38. integer :: mini, m, n
  39. logical, dimension(2) :: periods
  40. ! Find out our rank and the total number of processors
  41. call MPI_Init(mpi_ierr)
  42. call MPI_Comm_rank(MPI_COMM_WORLD, mpi_rank, mpi_ierr)
  43. call MPI_Comm_size(MPI_COMM_WORLD, mpi_size, mpi_ierr)
  44. comm = MPI_COMM_WORLD
  45. nprocs = mpi_size
  46. my_proc_id = mpi_rank
  47. ! Code from RSL to get number of procs in m and n directions
  48. mini = 2*nprocs
  49. nproc_x = 1
  50. nproc_y = nprocs
  51. do m = 1, nprocs
  52. if ( mod( nprocs, m ) == 0 ) then
  53. n = nprocs / m
  54. if ( abs(m-n) < mini ) then
  55. mini = abs(m-n)
  56. nproc_x = m
  57. nproc_y = n
  58. end if
  59. end if
  60. end do
  61. ! Calculate which patch current processor will work on
  62. my_x = mod(mpi_rank,nproc_x)
  63. my_y = mpi_rank / nproc_x
  64. #else
  65. comm = 0
  66. my_proc_id = IO_NODE
  67. nprocs = 1
  68. my_x = 0
  69. my_y = 0
  70. nproc_x = 1
  71. nproc_y = 1
  72. #endif
  73. nullify(processors)
  74. nullify(proc_minx)
  75. nullify(proc_maxx)
  76. nullify(proc_miny)
  77. nullify(proc_maxy)
  78. end subroutine parallel_start
  79. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  80. ! Name: parallel_get_tile_dims
  81. !
  82. ! Purpose: To compute the starting and ending indices of the patch that the
  83. ! calling processor is to work on. When there are multiple processors,
  84. ! appropriate data structures describing the range of indices being
  85. ! worked on by other processors are also allocated and filled
  86. ! (processors, proc_minx, proc_maxx, proc_miny, proc_maxy).
  87. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  88. subroutine parallel_get_tile_dims(idim, jdim)
  89. implicit none
  90. ! Arguments
  91. integer, intent(in) :: idim, jdim
  92. ! Local variables
  93. #ifdef _MPI
  94. integer :: i, j, ix, iy, px, py
  95. integer, dimension(2) :: buffer
  96. integer :: mpi_ierr
  97. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  98. !
  99. ! Determine starting and ending grid points in x and y direction that we will work on
  100. !
  101. ! NOTE:
  102. ! For now, copy code from RSL_LITE's module_dm.F until build mechanism to link
  103. ! WRF and WPS code is worked out more.
  104. ! Eventually, it would probably be best to use module_dm code without copying
  105. !
  106. my_minx = -1
  107. j = 1
  108. do i = 1, idim
  109. call task_for_point(i, j, 1, idim, 1, jdim, nproc_x, nproc_y, px, py)
  110. if ( px == my_x ) then
  111. my_maxx = i
  112. if ( my_minx == -1 ) my_minx = i
  113. end if
  114. end do
  115. my_miny = -1
  116. i = 1
  117. do j = 1, jdim
  118. call task_for_point(i, j, 1, idim, 1, jdim, nproc_x, nproc_y, px, py)
  119. if ( py == my_y ) then
  120. my_maxy = j
  121. if ( my_miny == -1 ) my_miny = j
  122. end if
  123. end do
  124. ! Create space to hold information about which other processors are
  125. ! working on which parts of the domain
  126. allocate(processors(0:nproc_x-1, 0:nproc_y-1))
  127. allocate(proc_minx(0:nproc_x-1, 0:nproc_y-1))
  128. allocate(proc_miny(0:nproc_x-1, 0:nproc_y-1))
  129. allocate(proc_maxx(0:nproc_x-1, 0:nproc_y-1))
  130. allocate(proc_maxy(0:nproc_x-1, 0:nproc_y-1))
  131. ! Exchange information with other processors
  132. if (my_proc_id == IO_NODE) then
  133. processors(my_x, my_y) = my_proc_id
  134. do i=1,nprocs-1
  135. call MPI_Recv(buffer, 2, MPI_INTEGER, i, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  136. processors(buffer(1), buffer(2)) = mpi_stat(MPI_SOURCE)
  137. end do
  138. else
  139. buffer(1) = my_x
  140. buffer(2) = my_y
  141. call MPI_Send(buffer, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  142. end if
  143. do ix=0,nproc_x-1
  144. do iy=0,nproc_y-1
  145. call parallel_bcast_int(processors(ix,iy), IO_NODE)
  146. end do
  147. end do
  148. proc_minx(my_x, my_y) = my_minx
  149. proc_maxx(my_x, my_y) = my_maxx
  150. proc_miny(my_x, my_y) = my_miny
  151. proc_maxy(my_x, my_y) = my_maxy
  152. do ix=0,nproc_x-1
  153. do iy=0,nproc_y-1
  154. call parallel_bcast_int(proc_minx(ix,iy), processors(ix,iy))
  155. call parallel_bcast_int(proc_maxx(ix,iy), processors(ix,iy))
  156. call parallel_bcast_int(proc_miny(ix,iy), processors(ix,iy))
  157. call parallel_bcast_int(proc_maxy(ix,iy), processors(ix,iy))
  158. end do
  159. end do
  160. #else
  161. allocate(processors(0:nproc_x-1, 0:nproc_y-1))
  162. allocate(proc_minx(0:nproc_x-1, 0:nproc_y-1))
  163. allocate(proc_miny(0:nproc_x-1, 0:nproc_y-1))
  164. allocate(proc_maxx(0:nproc_x-1, 0:nproc_y-1))
  165. allocate(proc_maxy(0:nproc_x-1, 0:nproc_y-1))
  166. processors(0,0) = IO_NODE
  167. proc_minx(0,0) = 1
  168. proc_miny(0,0) = 1
  169. proc_maxx(0,0) = idim
  170. proc_maxy(0,0) = jdim
  171. my_minx = 1
  172. my_maxx = idim
  173. my_miny = 1
  174. my_maxy = jdim
  175. #endif
  176. end subroutine parallel_get_tile_dims
  177. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  178. ! Copied from RSL_LITE's task_for_point.c until a good way can be found to
  179. ! get the build mechanism to use the original code in RSL_LITE.
  180. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  181. subroutine task_for_point(i_p, j_p, ids_p, ide_p, jds_p, jde_p, npx, npy, px, py)
  182. implicit none
  183. ! Arguments
  184. integer, intent(in) :: i_p, j_p, ids_p, ide_p, jds_p, jde_p, npx, npy
  185. integer, intent(out) :: px, py
  186. ! Local variables
  187. integer :: a, b, rem, idim, jdim, i, j, ids, jds, ide, jde
  188. i = i_p - 1
  189. j = j_p - 1
  190. ids = ids_p - 1
  191. jds = jds_p - 1
  192. ide = ide_p - 1
  193. jde = jde_p - 1
  194. idim = ide-ids+1
  195. jdim = jde-jds+1
  196. i = max(i,ids)
  197. i = min(i,ide)
  198. rem = mod(idim, npx)
  199. a = ( rem / 2 ) * ( (idim / npx) + 1 )
  200. b = a + ( npx - rem ) * ( idim / npx )
  201. if ( i-ids < a ) then
  202. px = (i-ids) / ( (idim / npx) + 1 )
  203. else if ( i-ids < b ) then
  204. px = ( a / ( (idim / npx) + 1 ) ) + (i-a-ids) / ( ( b - a ) / ( npx - rem ) )
  205. else
  206. px = ( a / ( (idim / npx) + 1 ) ) + (b-a-ids) / ( ( b - a ) / ( npx - rem ) ) + &
  207. (i-b-ids) / ( ( idim / npx ) + 1 )
  208. end if
  209. j = max(j,jds)
  210. j = min(j,jde)
  211. rem = mod(jdim, npy)
  212. a = ( rem / 2 ) * ( (jdim / npy) + 1 )
  213. b = a + ( npy - rem ) * ( jdim / npy )
  214. if ( j-jds < a ) then
  215. py = (j-jds) / ( (jdim / npy) + 1 )
  216. else if ( j-jds < b ) then
  217. py = ( a / ( (jdim / npy) + 1 ) ) + (j-a-jds) / ( ( b - a ) / ( npy - rem ) )
  218. else
  219. py = ( a / ( (jdim / npy) + 1 ) ) + (b-a-jds) / ( ( b - a ) / ( npy - rem ) ) + &
  220. (j-b-jds) / ( ( jdim / npy ) + 1 )
  221. end if
  222. end subroutine task_for_point
  223. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  224. ! Name: gather_whole_field_i
  225. !
  226. ! Purpose:
  227. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  228. subroutine gather_whole_field_i(patch_array, ms1, me1, ms2, me2, ms3, me3, &
  229. ps1, pe1, ps2, pe2, ps3, pe3, &
  230. domain_array, ds1, de1, ds2, de2, ds3, de3)
  231. implicit none
  232. ! Arguments
  233. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  234. ms1, me1, ms2, me2, ms3, me3, &
  235. ds1, de1, ds2, de2, ds3, de3
  236. integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(in) :: patch_array
  237. integer, dimension(ds1:de1,ds2:de2,ds3:de3), intent(inout) :: domain_array
  238. ! Local variables
  239. #ifdef _MPI
  240. integer :: i, ii, j, jj, kk
  241. integer, dimension(2) :: idims, jdims
  242. integer :: mpi_ierr
  243. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  244. if (my_proc_id == IO_NODE) then
  245. do j=0,nproc_y-1
  246. do i=0,nproc_x-1
  247. if (processors(i,j) /= IO_NODE) then
  248. call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  249. call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  250. do kk=ds3,de3
  251. ! BUG: Check on mpi_stat and mpi_ierr
  252. call MPI_Recv(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
  253. (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
  254. MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  255. end do
  256. else
  257. domain_array(ps1:pe1,ps2:pe2,ps3:pe3) = patch_array(ps1:pe1,ps2:pe2,ps3:pe3)
  258. end if
  259. end do
  260. end do
  261. else
  262. jdims(1) = ps2
  263. jdims(2) = pe2
  264. call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  265. idims(1) = ps1
  266. idims(2) = pe1
  267. call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  268. do kk=ps3,pe3
  269. call MPI_Send(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  270. ! BUG: Check on mpi_ierr
  271. end do
  272. end if
  273. #endif
  274. end subroutine gather_whole_field_i
  275. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  276. ! Name: gather_whole_field_r
  277. !
  278. ! Purpose:
  279. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  280. subroutine gather_whole_field_r(patch_array, ms1, me1, ms2, me2, ms3, me3, &
  281. ps1, pe1, ps2, pe2, ps3, pe3, &
  282. domain_array, ds1, de1, ds2, de2, ds3, de3)
  283. implicit none
  284. ! Arguments
  285. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  286. ms1, me1, ms2, me2, ms3, me3, &
  287. ds1, de1, ds2, de2, ds3, de3
  288. real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(in) :: patch_array
  289. real, dimension(ds1:de1,ds2:de2,ds3:de3), intent(inout) :: domain_array
  290. ! Local variables
  291. #ifdef _MPI
  292. integer :: i, ii, j, jj, kk
  293. integer, dimension(2) :: idims, jdims
  294. integer :: mpi_ierr
  295. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  296. if (my_proc_id == IO_NODE) then
  297. do j=0,nproc_y-1
  298. do i=0,nproc_x-1
  299. if (processors(i,j) /= IO_NODE) then
  300. call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  301. call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  302. do kk=ds3,de3
  303. ! BUG: Check on mpi_stat and mpi_ierr
  304. call MPI_Recv(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
  305. (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
  306. MPI_REAL, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  307. end do
  308. else
  309. domain_array(ps1:pe1,ps2:pe2,ps3:pe3) = patch_array(ps1:pe1,ps2:pe2,ps3:pe3)
  310. end if
  311. end do
  312. end do
  313. else
  314. jdims(1) = ps2
  315. jdims(2) = pe2
  316. call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  317. idims(1) = ps1
  318. idims(2) = pe1
  319. call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  320. do kk=ps3,pe3
  321. call MPI_Send(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), MPI_REAL, 0, my_proc_id, comm, mpi_ierr)
  322. ! BUG: Check on mpi_ierr
  323. end do
  324. end if
  325. #endif
  326. end subroutine gather_whole_field_r
  327. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  328. ! Name: scatter_whole_field_i
  329. !
  330. ! Purpose:
  331. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  332. subroutine scatter_whole_field_i(patch_array, ms1, me1, ms2, me2, ms3, me3, &
  333. ps1, pe1, ps2, pe2, ps3, pe3, &
  334. domain_array, ds1, de1, ds2, de2, ds3, de3)
  335. implicit none
  336. ! Arguments
  337. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  338. ms1, me1, ms2, me2, ms3, me3, &
  339. ds1, de1, ds2, de2, ds3, de3
  340. integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
  341. integer, dimension(ds1:de1,ds2:de2,ds3:de3), intent(in) :: domain_array
  342. ! Local variables
  343. #ifdef _MPI
  344. integer :: i, ii, j, jj, kk
  345. integer, dimension(2) :: idims, jdims
  346. integer :: mpi_ierr
  347. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  348. if (my_proc_id == IO_NODE) then
  349. do j=0,nproc_y-1
  350. do i=0,nproc_x-1
  351. if (processors(i,j) /= IO_NODE) then
  352. call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  353. call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  354. do kk=ds3,de3
  355. ! BUG: Check on mpi_stat and mpi_ierr
  356. call MPI_Send(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
  357. (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
  358. MPI_INTEGER, processors(i,j), my_proc_id, comm, mpi_ierr)
  359. end do
  360. else
  361. patch_array(ps1:pe1,ps2:pe2,ps3:pe3) = domain_array(ps1:pe1,ps2:pe2,ps3:pe3)
  362. end if
  363. end do
  364. end do
  365. else
  366. jdims(1) = ps2
  367. jdims(2) = pe2
  368. call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  369. idims(1) = ps1
  370. idims(2) = pe1
  371. call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  372. do kk=ps3,pe3
  373. call MPI_Recv(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), &
  374. MPI_INTEGER, 0, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  375. ! BUG: Check on mpi_ierr
  376. end do
  377. end if
  378. #endif
  379. end subroutine scatter_whole_field_i
  380. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  381. ! Name: scatter_whole_field_r
  382. !
  383. ! Purpose:
  384. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  385. subroutine scatter_whole_field_r(patch_array, ms1, me1, ms2, me2, ms3, me3, &
  386. ps1, pe1, ps2, pe2, ps3, pe3, &
  387. domain_array, ds1, de1, ds2, de2, ds3, de3)
  388. implicit none
  389. ! Arguments
  390. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  391. ms1, me1, ms2, me2, ms3, me3, &
  392. ds1, de1, ds2, de2, ds3, de3
  393. real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
  394. real, dimension(ds1:de1,ds2:de2,ds3:de3), intent(in) :: domain_array
  395. ! Local variables
  396. #ifdef _MPI
  397. integer :: i, ii, j, jj, kk
  398. integer, dimension(2) :: idims, jdims
  399. integer :: mpi_ierr
  400. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  401. if (my_proc_id == IO_NODE) then
  402. do j=0,nproc_y-1
  403. do i=0,nproc_x-1
  404. if (processors(i,j) /= IO_NODE) then
  405. call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  406. call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  407. do kk=ds3,de3
  408. ! BUG: Check on mpi_stat and mpi_ierr
  409. call MPI_Send(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
  410. (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
  411. MPI_REAL, processors(i,j), my_proc_id, comm, mpi_ierr)
  412. end do
  413. else
  414. patch_array(ps1:pe1,ps2:pe2,ps3:pe3) = domain_array(ps1:pe1,ps2:pe2,ps3:pe3)
  415. end if
  416. end do
  417. end do
  418. else
  419. jdims(1) = ps2
  420. jdims(2) = pe2
  421. call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  422. idims(1) = ps1
  423. idims(2) = pe1
  424. call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
  425. do kk=ps3,pe3
  426. call MPI_Recv(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), &
  427. MPI_REAL, 0, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  428. ! BUG: Check on mpi_ierr
  429. end do
  430. end if
  431. #endif
  432. end subroutine scatter_whole_field_r
  433. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  434. ! Name: exchange_halo_r
  435. !
  436. ! Purpose:
  437. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  438. subroutine exchange_halo_r(patch_array, &
  439. ms1, me1, ms2, me2, ms3, me3, &
  440. ps1, pe1, ps2, pe2, ps3, pe3)
  441. implicit none
  442. ! Arguments
  443. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  444. ms1, me1, ms2, me2, ms3, me3
  445. real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
  446. ! Local variables
  447. #ifdef _MPI
  448. integer :: jj, kk
  449. integer :: mpi_ierr
  450. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  451. !
  452. ! Get left edge of halo
  453. !
  454. if (my_x /= (nproc_x - 1)) then
  455. do kk=ps3,pe3
  456. do jj=ms2,me2
  457. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
  458. processors(my_x+1,my_y), my_proc_id, comm, mpi_ierr)
  459. end do
  460. end do
  461. end if
  462. if (my_x /= 0) then
  463. do kk=ps3,pe3
  464. do jj=ms2,me2
  465. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  466. processors(my_x-1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  467. end do
  468. end do
  469. end if
  470. !
  471. ! Get right edge of halo
  472. !
  473. if (my_x /= 0) then
  474. do kk=ps3,pe3
  475. do jj=ms2,me2
  476. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  477. processors(my_x-1,my_y), my_proc_id, comm, mpi_ierr)
  478. end do
  479. end do
  480. end if
  481. if (my_x /= (nproc_x - 1)) then
  482. do kk=ps3,pe3
  483. do jj=ms2,me2
  484. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
  485. processors(my_x+1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  486. end do
  487. end do
  488. end if
  489. !
  490. ! Get bottom edge of halo
  491. !
  492. if (my_y /= (nproc_y - 1)) then
  493. do kk=ps3,pe3
  494. do jj=pe2-HALO_WIDTH+1,pe2
  495. call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
  496. processors(my_x,my_y+1), my_proc_id, comm, mpi_ierr)
  497. end do
  498. end do
  499. end if
  500. if (my_y /= 0) then
  501. do kk=ps3,pe3
  502. do jj=ms2,ms2+HALO_WIDTH-1
  503. call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
  504. processors(my_x,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  505. end do
  506. end do
  507. end if
  508. !
  509. ! Get top edge of halo
  510. !
  511. if (my_y /= 0) then
  512. do kk=ps3,pe3
  513. do jj=ps2,ps2+HALO_WIDTH-1
  514. call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
  515. processors(my_x,my_y-1), my_proc_id, comm, mpi_ierr)
  516. end do
  517. end do
  518. end if
  519. if (my_y /= (nproc_y - 1)) then
  520. do kk=ps3,pe3
  521. do jj=me2-HALO_WIDTH+1,me2
  522. call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
  523. processors(my_x,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  524. end do
  525. end do
  526. end if
  527. !
  528. ! Get lower-right corner of halo
  529. !
  530. if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
  531. do kk=ps3,pe3
  532. do jj=pe2-HALO_WIDTH+1,pe2
  533. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  534. processors(my_x-1,my_y+1), my_proc_id, comm, mpi_ierr)
  535. end do
  536. end do
  537. end if
  538. if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
  539. do kk=ps3,pe3
  540. do jj=ms2,ms2+HALO_WIDTH-1
  541. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
  542. processors(my_x+1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  543. end do
  544. end do
  545. end if
  546. !
  547. ! Get upper-left corner of halo
  548. !
  549. if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
  550. do kk=ps3,pe3
  551. do jj=ps2,ps2+HALO_WIDTH-1
  552. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
  553. processors(my_x+1,my_y-1), my_proc_id, comm, mpi_ierr)
  554. end do
  555. end do
  556. end if
  557. if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
  558. do kk=ps3,pe3
  559. do jj=me2-HALO_WIDTH+1,me2
  560. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  561. processors(my_x-1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  562. end do
  563. end do
  564. end if
  565. !
  566. ! Get upper-right corner of halo
  567. !
  568. if (my_y /= 0 .and. my_x /= 0) then
  569. do kk=ps3,pe3
  570. do jj=ps2,ps2+HALO_WIDTH-1
  571. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  572. processors(my_x-1,my_y-1), my_proc_id, comm, mpi_ierr)
  573. end do
  574. end do
  575. end if
  576. if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
  577. do kk=ps3,pe3
  578. do jj=me2-HALO_WIDTH+1,me2
  579. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
  580. processors(my_x+1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  581. end do
  582. end do
  583. end if
  584. !
  585. ! Get lower-left corner of halo
  586. !
  587. if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
  588. do kk=ps3,pe3
  589. do jj=pe2-HALO_WIDTH+1,pe2
  590. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
  591. processors(my_x+1,my_y+1), my_proc_id, comm, mpi_ierr)
  592. end do
  593. end do
  594. end if
  595. if (my_y /= 0 .and. my_x /= 0) then
  596. do kk=ps3,pe3
  597. do jj=ms2,ms2+HALO_WIDTH-1
  598. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
  599. processors(my_x-1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  600. end do
  601. end do
  602. end if
  603. #endif
  604. end subroutine exchange_halo_r
  605. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  606. ! Name: exchange_halo_i
  607. !
  608. ! Purpose:
  609. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  610. subroutine exchange_halo_i(patch_array, &
  611. ms1, me1, ms2, me2, ms3, me3, &
  612. ps1, pe1, ps2, pe2, ps3, pe3)
  613. implicit none
  614. ! Arguments
  615. integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
  616. ms1, me1, ms2, me2, ms3, me3
  617. integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
  618. ! Local variables
  619. #ifdef _MPI
  620. integer :: jj, kk
  621. integer :: mpi_ierr
  622. integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
  623. !
  624. ! Get left edge of halo
  625. !
  626. if (my_x /= (nproc_x - 1)) then
  627. do kk=ps3,pe3
  628. do jj=ms2,me2
  629. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  630. processors(my_x+1,my_y), my_proc_id, comm, mpi_ierr)
  631. end do
  632. end do
  633. end if
  634. if (my_x /= 0) then
  635. do kk=ps3,pe3
  636. do jj=ms2,me2
  637. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  638. processors(my_x-1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  639. end do
  640. end do
  641. end if
  642. !
  643. ! Get right edge of halo
  644. !
  645. if (my_x /= 0) then
  646. do kk=ps3,pe3
  647. do jj=ms2,me2
  648. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  649. processors(my_x-1,my_y), my_proc_id, comm, mpi_ierr)
  650. end do
  651. end do
  652. end if
  653. if (my_x /= (nproc_x - 1)) then
  654. do kk=ps3,pe3
  655. do jj=ms2,me2
  656. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  657. processors(my_x+1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  658. end do
  659. end do
  660. end if
  661. !
  662. ! Get bottom edge of halo
  663. !
  664. if (my_y /= (nproc_y - 1)) then
  665. do kk=ps3,pe3
  666. do jj=pe2-HALO_WIDTH+1,pe2
  667. call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
  668. processors(my_x,my_y+1), my_proc_id, comm, mpi_ierr)
  669. end do
  670. end do
  671. end if
  672. if (my_y /= 0) then
  673. do kk=ps3,pe3
  674. do jj=ms2,ms2+HALO_WIDTH-1
  675. call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
  676. processors(my_x,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  677. end do
  678. end do
  679. end if
  680. !
  681. ! Get top edge of halo
  682. !
  683. if (my_y /= 0) then
  684. do kk=ps3,pe3
  685. do jj=ps2,ps2+HALO_WIDTH-1
  686. call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
  687. processors(my_x,my_y-1), my_proc_id, comm, mpi_ierr)
  688. end do
  689. end do
  690. end if
  691. if (my_y /= (nproc_y - 1)) then
  692. do kk=ps3,pe3
  693. do jj=me2-HALO_WIDTH+1,me2
  694. call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
  695. processors(my_x,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  696. end do
  697. end do
  698. end if
  699. !
  700. ! Get lower-right corner of halo
  701. !
  702. if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
  703. do kk=ps3,pe3
  704. do jj=pe2-HALO_WIDTH+1,pe2
  705. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  706. processors(my_x-1,my_y+1), my_proc_id, comm, mpi_ierr)
  707. end do
  708. end do
  709. end if
  710. if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
  711. do kk=ps3,pe3
  712. do jj=ms2,ms2+HALO_WIDTH-1
  713. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  714. processors(my_x+1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  715. end do
  716. end do
  717. end if
  718. !
  719. ! Get upper-left corner of halo
  720. !
  721. if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
  722. do kk=ps3,pe3
  723. do jj=ps2,ps2+HALO_WIDTH-1
  724. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  725. processors(my_x+1,my_y-1), my_proc_id, comm, mpi_ierr)
  726. end do
  727. end do
  728. end if
  729. if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
  730. do kk=ps3,pe3
  731. do jj=me2-HALO_WIDTH+1,me2
  732. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  733. processors(my_x-1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  734. end do
  735. end do
  736. end if
  737. !
  738. ! Get upper-right corner of halo
  739. !
  740. if (my_y /= 0 .and. my_x /= 0) then
  741. do kk=ps3,pe3
  742. do jj=ps2,ps2+HALO_WIDTH-1
  743. call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  744. processors(my_x-1,my_y-1), my_proc_id, comm, mpi_ierr)
  745. end do
  746. end do
  747. end if
  748. if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
  749. do kk=ps3,pe3
  750. do jj=me2-HALO_WIDTH+1,me2
  751. call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  752. processors(my_x+1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  753. end do
  754. end do
  755. end if
  756. !
  757. ! Get lower-left corner of halo
  758. !
  759. if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
  760. do kk=ps3,pe3
  761. do jj=pe2-HALO_WIDTH+1,pe2
  762. call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  763. processors(my_x+1,my_y+1), my_proc_id, comm, mpi_ierr)
  764. end do
  765. end do
  766. end if
  767. if (my_y /= 0 .and. my_x /= 0) then
  768. do kk=ps3,pe3
  769. do jj=ms2,ms2+HALO_WIDTH-1
  770. call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
  771. processors(my_x-1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
  772. end do
  773. end do
  774. end if
  775. #endif
  776. end subroutine exchange_halo_i
  777. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  778. ! Name: parallel_bcast_logical
  779. !
  780. ! Purpose:
  781. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  782. subroutine parallel_bcast_logical(lval)
  783. implicit none
  784. ! Argument
  785. logical, intent(inout) :: lval
  786. ! Local variables
  787. #ifdef _MPI
  788. integer :: mpi_ierr
  789. call MPI_Bcast(lval, 1, MPI_LOGICAL, IO_NODE, comm, mpi_ierr)
  790. #endif
  791. end subroutine parallel_bcast_logical
  792. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  793. ! Name: parallel_bcast_int
  794. !
  795. ! Purpose:
  796. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  797. subroutine parallel_bcast_int(ival, from_whom)
  798. implicit none
  799. ! Argument
  800. integer, intent(inout) :: ival
  801. integer, intent(in), optional :: from_whom
  802. ! Local variables
  803. #ifdef _MPI
  804. integer :: mpi_ierr
  805. if (present(from_whom)) then
  806. call MPI_Bcast(ival, 1, MPI_INTEGER, from_whom, comm, mpi_ierr)
  807. else
  808. call MPI_Bcast(ival, 1, MPI_INTEGER, IO_NODE, comm, mpi_ierr)
  809. end if
  810. #endif
  811. end subroutine parallel_bcast_int
  812. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  813. ! Name: parallel_bcast_real
  814. !
  815. ! Purpose:
  816. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  817. subroutine parallel_bcast_real(rval, from_whom)
  818. implicit none
  819. ! Argument
  820. real, intent(inout) :: rval
  821. integer, intent(in), optional :: from_whom
  822. ! Local variables
  823. #ifdef _MPI
  824. integer :: mpi_ierr
  825. if (present(from_whom)) then
  826. call MPI_Bcast(rval, 1, MPI_REAL, from_whom, comm, mpi_ierr)
  827. else
  828. call MPI_Bcast(rval, 1, MPI_REAL, IO_NODE, comm, mpi_ierr)
  829. end if
  830. #endif
  831. end subroutine parallel_bcast_real
  832. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  833. ! Name: parallel_bcast_char
  834. !
  835. ! Purpose:
  836. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  837. subroutine parallel_bcast_char(cval, n, from_whom)
  838. implicit none
  839. ! Argument
  840. integer, intent(in) :: n
  841. character (len=n), intent(inout) :: cval
  842. integer, intent(in), optional :: from_whom
  843. ! Local variables
  844. #ifdef _MPI
  845. integer :: mpi_ierr
  846. if (present(from_whom)) then
  847. call MPI_Bcast(cval, n, MPI_CHARACTER, from_whom, comm, mpi_ierr)
  848. else
  849. call MPI_Bcast(cval, n, MPI_CHARACTER, IO_NODE, comm, mpi_ierr)
  850. end if
  851. #endif
  852. end subroutine parallel_bcast_char
  853. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  854. ! Name: parallel_finish
  855. !
  856. ! Purpose: Free up, deallocate, and for MPI, finalize.
  857. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  858. subroutine parallel_finish()
  859. implicit none
  860. ! Arguments
  861. ! Local variables
  862. #ifdef _MPI
  863. integer :: mpi_ierr
  864. call MPI_Finalize(mpi_ierr)
  865. #endif
  866. if (associated(processors)) deallocate(processors)
  867. if (associated(proc_minx)) deallocate(proc_minx)
  868. if (associated(proc_maxx)) deallocate(proc_maxx)
  869. if (associated(proc_miny)) deallocate(proc_miny)
  870. if (associated(proc_maxy)) deallocate(proc_maxy)
  871. end subroutine parallel_finish
  872. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  873. ! Name: parallel_abort
  874. !
  875. ! Purpose: Terminate everything
  876. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  877. subroutine parallel_abort()
  878. implicit none
  879. ! Arguments
  880. ! Local variables
  881. #ifdef _MPI
  882. integer :: mpi_ierr, mpi_errcode
  883. call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr)
  884. #endif
  885. stop
  886. end subroutine parallel_abort
  887. end module parallel_module