/wrfv2_fire/external/RSL_LITE/module_dm.F
FORTRAN Legacy | 4562 lines | 3088 code | 503 blank | 971 comment | 19 complexity | f5d2014a5e852efe6c1288f8887dbdf5 MD5 | raw file
Possible License(s): AGPL-1.0
Large files files are truncated, but you can click here to view the full file
- !WRF:PACKAGE:RSL
- !
- MODULE module_dm
- USE module_machine
- USE module_wrf_error
- USE module_driver_constants
- ! USE module_comm_dm
- IMPLICIT NONE
- #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
- INTEGER, PARAMETER :: max_halo_width = 6
- #else
- INTEGER, PARAMETER :: max_halo_width = 6 ! 5
- #endif
- INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
- INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
- INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
- INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
- LOGICAL :: dm_debug_flag = .FALSE.
- INTERFACE wrf_dm_maxval
- #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
- MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
- #else
- MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
- #endif
- END INTERFACE
- INTERFACE wrf_dm_minval ! gopal's doing
- #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
- MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
- #else
- MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
- #endif
- END INTERFACE
- CONTAINS
- SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
- IMPLICIT NONE
- INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
- MINI = 2*P
- MINM = 1
- MINN = P
- DO M = 1, P
- IF ( MOD( P, M ) .EQ. 0 ) THEN
- N = P / M
- IF ( ABS(M-N) .LT. MINI &
- .AND. M .GE. PROCMIN_M &
- .AND. N .GE. PROCMIN_N &
- ) THEN
- MINI = ABS(M-N)
- MINM = M
- MINN = N
- ENDIF
- ENDIF
- ENDDO
- IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
- WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- WRITE(0,*)' PROCMIN_M ', PROCMIN_M
- WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- WRITE( wrf_err_message , * )' P ', P
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- WRITE( wrf_err_message , * )' MINM ', MINM
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- WRITE( wrf_err_message , * )' MINN ', MINN
- CALL wrf_message ( TRIM ( wrf_err_message ) )
- CALL wrf_error_fatal ( 'module_dm: mpaspect' )
- ENDIF
- RETURN
- END SUBROUTINE MPASPECT
- SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: ntasks
- INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
- CALL nl_get_nproc_x ( 1, ntasks_x )
- CALL nl_get_nproc_y ( 1, ntasks_y )
- ! check if user has specified in the namelist
- IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
- ! if only ntasks_x is specified then make it 1-d decomp in i
- IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
- ntasks_y = ntasks / ntasks_x
- ! if only ntasks_y is specified then make it 1-d decomp in j
- ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
- ntasks_x = ntasks / ntasks_y
- ENDIF
- ! make sure user knows what they're doing
- IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
- WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
- CALL wrf_error_fatal ( wrf_err_message )
- ENDIF
- ELSE
- ! When neither is specified, work out mesh with MPASPECT
- ! Pass nproc_ln and nproc_nt so that number of procs in
- ! i-dim (nproc_ln) is equal or lesser.
- CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
- ENDIF
- END SUBROUTINE compute_mesh
- SUBROUTINE wrf_dm_initialize
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
- INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
- INTEGER comdup
- INTEGER, DIMENSION(2) :: dims, coords
- LOGICAL, DIMENSION(2) :: isperiodic
- LOGICAL :: reorder_mesh
- CALL wrf_get_dm_communicator ( local_comm )
- CALL mpi_comm_size( local_comm, ntasks, ierr )
- CALL nl_get_reorder_mesh( 1, reorder_mesh )
- CALL compute_mesh( ntasks, ntasks_x, ntasks_y )
- WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
- CALL wrf_message( wrf_err_message )
- CALL mpi_comm_rank( local_comm, mytask, ierr )
- ! extra code to reorder the communicator 20051212jm
- IF ( reorder_mesh ) THEN
- ALLOCATE (ranks(ntasks))
- CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
- CALL mpi_comm_group ( local_comm2, group, ierr )
- DO p1=1,ntasks
- p = p1 - 1
- ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
- ENDDO
- CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
- DEALLOCATE (ranks)
- CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
- ELSE
- new_local_comm = local_comm
- ENDIF
- ! end extra code to reorder the communicator 20051212jm
- dims(1) = ntasks_y ! rows
- dims(2) = ntasks_x ! columns
- isperiodic(1) = .false.
- isperiodic(2) = .false.
- CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
- dims(1) = ntasks_y ! rows
- dims(2) = ntasks_x ! columns
- isperiodic(1) = .true.
- isperiodic(2) = .true.
- CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
- ! debug
- CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
- CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
- ! write(0,*)'periodic coords ',mytask, coords
- CALL mpi_comm_rank( local_communicator, mytask, ierr )
- CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
- ! write(0,*)'non periodic coords ',mytask, coords
- mytask_x = coords(2) ! col task (x)
- mytask_y = coords(1) ! row task (y)
- CALL nl_set_nproc_x ( 1, ntasks_x )
- CALL nl_set_nproc_y ( 1, ntasks_y )
- ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
- ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
- ! in other words, local_comm_x has all the processes with the same rank in Y
- CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
- IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
- CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
- IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
- CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
- IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
- ! end 20061228
- CALL wrf_set_dm_communicator ( local_communicator )
- #else
- ntasks = 1
- ntasks_x = 1
- ntasks_y = 1
- mytask = 0
- mytask_x = 0
- mytask_y = 0
- #endif
- RETURN
- END SUBROUTINE wrf_dm_initialize
- SUBROUTINE get_dm_max_halo_width( id, width )
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: id
- INTEGER, INTENT(OUT) :: width
- IF ( id .EQ. 1 ) THEN ! this is coarse domain
- width = max_halo_width
- ELSE
- width = max_halo_width + 3
- ENDIF
- RETURN
- END SUBROUTINE get_dm_max_halo_width
- SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
- sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
- sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
- sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
- sp1x , ep1x , sm1x , em1x , &
- sp2x , ep2x , sm2x , em2x , &
- sp3x , ep3x , sm3x , em3x , &
- sp1y , ep1y , sm1y , em1y , &
- sp2y , ep2y , sm2y , em2y , &
- sp3y , ep3y , sm3y , em3y , &
- bdx , bdy )
- USE module_domain, ONLY : domain, head_grid, find_grid_by_id, alloc_space_field
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
- INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
- sm1 , em1 , sm2 , em2 , sm3 , em3
- INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
- sm1x , em1x , sm2x , em2x , sm3x , em3x
- INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
- sm1y , em1y , sm2y , em2y , sm3y , em3y
- INTEGER, INTENT(IN) :: id, parent_id
- TYPE(domain),POINTER :: parent
- ! Local variables
- INTEGER :: ids, ide, jds, jde, kds, kde
- INTEGER :: ims, ime, jms, jme, kms, kme
- INTEGER :: ips, ipe, jps, jpe, kps, kpe
- INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
- INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
- INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
- INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
- INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
- INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
- c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
- INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
- c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
- INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
- c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
- INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
- INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
- INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
- INTEGER :: idim , jdim , kdim , rem , a, b
- INTEGER :: i, j, ni, nj, Px, Py, P
- INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
- INTEGER :: shw
- INTEGER :: idim_cd, jdim_cd, ierr
- INTEGER :: max_dom
- TYPE(domain), POINTER :: intermediate_grid
- TYPE(domain), POINTER :: nest_grid
- CHARACTER*256 :: mess
- INTEGER parent_max_halo_width
- INTEGER thisdomain_max_halo_width
- SELECT CASE ( model_data_order )
- ! need to finish other cases
- CASE ( DATA_ORDER_ZXY )
- ids = sd2 ; ide = ed2
- jds = sd3 ; jde = ed3
- kds = sd1 ; kde = ed1
- CASE ( DATA_ORDER_XYZ )
- ids = sd1 ; ide = ed1
- jds = sd2 ; jde = ed2
- kds = sd3 ; kde = ed3
- CASE ( DATA_ORDER_XZY )
- ids = sd1 ; ide = ed1
- jds = sd3 ; jde = ed3
- kds = sd2 ; kde = ed2
- CASE ( DATA_ORDER_YXZ)
- ids = sd2 ; ide = ed2
- jds = sd1 ; jde = ed1
- kds = sd3 ; kde = ed3
- END SELECT
- CALL nl_get_max_dom( 1 , max_dom )
- CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
- IF ( id .GT. 1 ) THEN
- CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
- ENDIF
- CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- imsx, imex, jmsx, jmex, kmsx, kmex, &
- imsy, imey, jmsy, jmey, kmsy, kmey, &
- ips, ipe, jps, jpe, kps, kpe, &
- ipsx, ipex, jpsx, jpex, kpsx, kpex, &
- ipsy, ipey, jpsy, jpey, kpsy, kpey )
- ! ensure that the every parent domain point has a full set of nested points under it
- ! even at the borders. Do this by making sure the number of nest points is a multiple of
- ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
- ! is the subject of the scatter gather comms with the parent
- IF ( id .GT. 1 ) THEN
- CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
- if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
- if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
- ENDIF
- SELECT CASE ( model_data_order )
- CASE ( DATA_ORDER_ZXY )
- sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
- sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
- sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
- sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
- sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
- sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
- sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
- sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
- sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
- CASE ( DATA_ORDER_ZYX )
- sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
- sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
- sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
- sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
- sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
- sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
- sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
- sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
- sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
- CASE ( DATA_ORDER_XYZ )
- sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
- sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
- sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
- sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
- sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
- sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
- sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
- sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
- sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
- CASE ( DATA_ORDER_YXZ)
- sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
- sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
- sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
- sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
- sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
- sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
- sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
- sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
- sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
- CASE ( DATA_ORDER_XZY )
- sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
- sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
- sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
- sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
- sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
- sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
- sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
- sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
- sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
- CASE ( DATA_ORDER_YZX )
- sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
- sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
- sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
- sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
- sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
- sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
- sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
- sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
- sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
- END SELECT
- IF ( id.EQ.1 ) THEN
- WRITE(wrf_err_message,*)'*************************************'
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'Parent domain'
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'*************************************'
- CALL wrf_message( TRIM(wrf_err_message) )
- ENDIF
- IF ( id .GT. 1 ) THEN
- CALL nl_get_shw( id, shw )
- CALL nl_get_i_parent_start( id , i_parent_start )
- CALL nl_get_j_parent_start( id , j_parent_start )
- CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
- SELECT CASE ( model_data_order )
- CASE ( DATA_ORDER_ZXY )
- idim = ed2-sd2+1
- jdim = ed3-sd3+1
- kdim = ed1-sd1+1
- c_kds = sd1 ; c_kde = ed1
- CASE ( DATA_ORDER_ZYX )
- idim = ed3-sd3+1
- jdim = ed2-sd2+1
- kdim = ed1-sd1+1
- c_kds = sd1 ; c_kde = ed1
- CASE ( DATA_ORDER_XYZ )
- idim = ed1-sd1+1
- jdim = ed2-sd2+1
- kdim = ed3-sd3+1
- c_kds = sd3 ; c_kde = ed3
- CASE ( DATA_ORDER_YXZ)
- idim = ed2-sd2+1
- jdim = ed1-sd1+1
- kdim = ed3-sd3+1
- c_kds = sd3 ; c_kde = ed3
- CASE ( DATA_ORDER_XZY )
- idim = ed1-sd1+1
- jdim = ed3-sd3+1
- kdim = ed2-sd2+1
- c_kds = sd2 ; c_kde = ed2
- CASE ( DATA_ORDER_YZX )
- idim = ed3-sd3+1
- jdim = ed1-sd1+1
- kdim = ed2-sd2+1
- c_kds = sd2 ; c_kde = ed2
- END SELECT
- idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
- jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
- c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
- c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
- ! we want the intermediate domain to be decomposed the
- ! the same as the underlying nest. So try this:
- c_ips = -1
- nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
- ierr = 0
- DO i = c_ids, c_ide
- ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
- CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Px .EQ. mytask_x ) THEN
- c_ipe = i
- IF ( c_ips .EQ. -1 ) c_ips = i
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- IF (c_ips .EQ. -1 ) THEN
- c_ipe = -1
- c_ips = 0
- ENDIF
- c_jps = -1
- ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
- ierr = 0
- DO j = c_jds, c_jde
- nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
- CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Py .EQ. mytask_y ) THEN
- c_jpe = j
- IF ( c_jps .EQ. -1 ) c_jps = j
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- IF (c_jps .EQ. -1 ) THEN
- c_jpe = -1
- c_jps = 0
- ENDIF
- IF ( c_ips <= c_ipe ) THEN
- ! extend the patch dimensions out shw along edges of domain
- IF ( mytask_x .EQ. 0 ) THEN
- c_ips = c_ips - shw
- ENDIF
- IF ( mytask_x .EQ. ntasks_x-1 ) THEN
- c_ipe = c_ipe + shw
- ENDIF
- c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
- c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
- ELSE
- c_ims = 0
- c_ime = 0
- ENDIF
- ! handle j dims
- IF ( c_jps <= c_jpe ) THEN
- ! extend the patch dimensions out shw along edges of domain
- IF ( mytask_y .EQ. 0 ) THEN
- c_jps = c_jps - shw
- ENDIF
- IF ( mytask_y .EQ. ntasks_y-1 ) THEN
- c_jpe = c_jpe + shw
- ENDIF
- c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
- c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
- ! handle k dims
- ELSE
- c_jms = 0
- c_jme = 0
- ENDIF
- c_kps = 1
- c_kpe = c_kde
- c_kms = 1
- c_kme = c_kde
- WRITE(wrf_err_message,*)'*************************************'
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'Nesting domain'
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'INTERMEDIATE domain'
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
- CALL wrf_message( TRIM(wrf_err_message) )
- WRITE(wrf_err_message,*)'*************************************'
- CALL wrf_message( TRIM(wrf_err_message) )
- SELECT CASE ( model_data_order )
- CASE ( DATA_ORDER_ZXY )
- c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
- c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
- c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
- CASE ( DATA_ORDER_ZYX )
- c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
- c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
- c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
- CASE ( DATA_ORDER_XYZ )
- c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
- c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
- c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
- CASE ( DATA_ORDER_YXZ)
- c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
- c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
- c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
- CASE ( DATA_ORDER_XZY )
- c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
- c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
- c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
- CASE ( DATA_ORDER_YZX )
- c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
- c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
- c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
- END SELECT
- ALLOCATE ( intermediate_grid )
- ALLOCATE ( intermediate_grid%parents( max_parents ) )
- ALLOCATE ( intermediate_grid%nests( max_nests ) )
- intermediate_grid%allocated=.false.
- NULLIFY( intermediate_grid%sibling )
- DO i = 1, max_nests
- NULLIFY( intermediate_grid%nests(i)%ptr )
- ENDDO
- NULLIFY (intermediate_grid%next)
- NULLIFY (intermediate_grid%same_level)
- NULLIFY (intermediate_grid%i_start)
- NULLIFY (intermediate_grid%j_start)
- NULLIFY (intermediate_grid%i_end)
- NULLIFY (intermediate_grid%j_end)
- intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
- intermediate_grid%num_nests = 0
- intermediate_grid%num_siblings = 0
- intermediate_grid%num_parents = 1
- intermediate_grid%max_tiles = 0
- intermediate_grid%num_tiles_spec = 0
- CALL find_grid_by_id ( id, head_grid, nest_grid )
- nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
- intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
- intermediate_grid%num_parents = 1
- intermediate_grid%is_intermediate = .TRUE.
- SELECT CASE ( model_data_order )
- CASE ( DATA_ORDER_ZXY )
- intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
- intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
- CASE ( DATA_ORDER_ZYX )
- intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
- intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
- CASE ( DATA_ORDER_XYZ )
- intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
- intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
- CASE ( DATA_ORDER_YXZ)
- intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
- intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
- CASE ( DATA_ORDER_XZY )
- intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
- intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
- CASE ( DATA_ORDER_YZX )
- intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
- intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
- END SELECT
- intermediate_grid%nids = ids
- intermediate_grid%nide = ide
- intermediate_grid%njds = jds
- intermediate_grid%njde = jde
- c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
- c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
- intermediate_grid%sm31x = c_sm1x
- intermediate_grid%em31x = c_em1x
- intermediate_grid%sm32x = c_sm2x
- intermediate_grid%em32x = c_em2x
- intermediate_grid%sm33x = c_sm3x
- intermediate_grid%em33x = c_em3x
- intermediate_grid%sm31y = c_sm1y
- intermediate_grid%em31y = c_em1y
- intermediate_grid%sm32y = c_sm2y
- intermediate_grid%em32y = c_em2y
- intermediate_grid%sm33y = c_sm3y
- intermediate_grid%em33y = c_em3y
- #if ( defined(SGIALTIX) && (! defined(MOVE_NESTS) ) ) || ( defined(FUJITSU_FX10) && (! defined(MOVE_NESTS) ) )
- ! allocate space for the intermediate domain
- CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
- c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
- c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
- c_sp1, c_ep1, c_sp2, c_ep2, c_sp3, c_ep3, &
- c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
- c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
- c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
- c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
- #endif
- intermediate_grid%sd31 = c_sd1
- intermediate_grid%ed31 = c_ed1
- intermediate_grid%sp31 = c_sp1
- intermediate_grid%ep31 = c_ep1
- intermediate_grid%sm31 = c_sm1
- intermediate_grid%em31 = c_em1
- intermediate_grid%sd32 = c_sd2
- intermediate_grid%ed32 = c_ed2
- intermediate_grid%sp32 = c_sp2
- intermediate_grid%ep32 = c_ep2
- intermediate_grid%sm32 = c_sm2
- intermediate_grid%em32 = c_em2
- intermediate_grid%sd33 = c_sd3
- intermediate_grid%ed33 = c_ed3
- intermediate_grid%sp33 = c_sp3
- intermediate_grid%ep33 = c_ep3
- intermediate_grid%sm33 = c_sm3
- intermediate_grid%em33 = c_em3
- CALL med_add_config_info_to_grid ( intermediate_grid )
- intermediate_grid%dx = parent%dx
- intermediate_grid%dy = parent%dy
- intermediate_grid%dt = parent%dt
- ENDIF
- RETURN
- END SUBROUTINE patch_domain_rsl_lite
- SUBROUTINE compute_memory_dims_rsl_lite ( &
- id , maxhalowidth , &
- shw , bdx, bdy , &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- imsx, imex, jmsx, jmex, kmsx, kmex, &
- imsy, imey, jmsy, jmey, kmsy, kmey, &
- ips, ipe, jps, jpe, kps, kpe, &
- ipsx, ipex, jpsx, jpex, kpsx, kpex, &
- ipsy, ipey, jpsy, jpey, kpsy, kpey )
- IMPLICIT NONE
- INTEGER, INTENT(IN) :: id , maxhalowidth
- INTEGER, INTENT(IN) :: shw, bdx, bdy
- INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
- INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
- INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
- INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
- INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
- INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
- INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
- INTEGER Px, Py, P, i, j, k, ierr
- #if ( ! NMM_CORE == 1 )
- ! xy decomposition
- ips = -1
- j = jds
- ierr = 0
- DO i = ids, ide
- CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Px .EQ. mytask_x ) THEN
- ipe = i
- IF ( ips .EQ. -1 ) ips = i
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- ! handle setting the memory dimensions where there are no X elements assigned to this proc
- IF (ips .EQ. -1 ) THEN
- ipe = -1
- ips = 0
- ENDIF
- jps = -1
- i = ids
- ierr = 0
- DO j = jds, jde
- CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Py .EQ. mytask_y ) THEN
- jpe = j
- IF ( jps .EQ. -1 ) jps = j
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- ! handle setting the memory dimensions where there are no Y elements assigned to this proc
- IF (jps .EQ. -1 ) THEN
- jpe = -1
- jps = 0
- ENDIF
- !begin: wig; 12-Mar-2008
- ! This appears redundant with the conditionals above, but we get cases with only
- ! one of the directions being set to "missing" when turning off extra processors.
- ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
- IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
- ipe = -1
- ips = 0
- jpe = -1
- jps = 0
- ENDIF
- !end: wig; 12-Mar-2008
- !
- ! description of transpose decomposition strategy for RSL LITE. 20061231jm
- !
- ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
- ! XY corresponds to the dimension of the processor mesh, lower-case xyz
- ! corresponds to grid dimension.
- !
- ! xy zy zx
- !
- ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
- ! ^ ^
- ! | |
- ! +------------------+ <- this edge is costly; see below
- !
- ! The aim is to avoid all-to-all communication over whole
- ! communicator. Instead, when possible, use a transpose scheme that requires
- ! all-to-all within dimensional communicators; that is, communicators
- ! defined for the processes in a rank or column of the processor mesh. Note,
- ! however, it is not possible to create a ring of transposes between
- ! xy-yz-xz decompositions without at least one of the edges in the ring
- ! being fully all-to-all (in other words, one of the tranpose edges must
- ! rotate and not just transpose a plane of the model grid within the
- ! processor mesh). The issue is then, where should we put this costly edge
- ! in the tranpose scheme we chose? To avoid being completely arbitrary,
- ! we chose a scheme most natural for models that use parallel spectral
- ! transforms, where the costly edge is the one that goes from the xz to
- ! the xy decomposition. (May be implemented as just a two step transpose
- ! back through yz).
- !
- ! Additional notational convention, below. The 'x' or 'y' appended to the
- ! dimension start or end variable refers to which grid dimension is all
- ! on-processor in the given decomposition. That is ipsx and ipex are the
- ! start and end for the i-dimension in the zy decomposition where x is
- ! on-processor. ('z' is assumed for xy decomposition and not appended to
- ! the ips, ipe, etc. variable names).
- !
- ! XzYy decomposition
- kpsx = -1
- j = jds ;
- ierr = 0
- DO k = kds, kde
- CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Px .EQ. mytask_x ) THEN
- kpex = k
- IF ( kpsx .EQ. -1 ) kpsx = k
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
-
- ! handle case where no levels are assigned to this process
- ! no iterations. Do same for I and J. Need to handle memory alloc below.
- IF (kpsx .EQ. -1 ) THEN
- kpex = -1
- kpsx = 0
- ENDIF
- jpsx = -1
- k = kds ;
- ierr = 0
- DO j = jds, jde
- CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
- 1, 1, ierr )
- IF ( Py .EQ. mytask_y ) THEN
- jpex = j
- IF ( jpsx .EQ. -1 ) jpsx = j
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- IF (jpsx .EQ. -1 ) THEN
- jpex = -1
- jpsx = 0
- ENDIF
- !begin: wig; 12-Mar-2008
- ! This appears redundant with the conditionals above, but we get cases with only
- ! one of the directions being set to "missing" when turning off extra processors.
- ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
- IF (ipex .EQ. -1 .or. jpex .EQ. -1) THEN
- ipex = -1
- ipsx = 0
- jpex = -1
- jpsx = 0
- ENDIF
- !end: wig; 12-Mar-2008
- ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
- kpsy = kpsx ! same as above
- kpey = kpex ! same as above
- ipsy = -1
- k = kds ;
- ierr = 0
- DO i = ids, ide
- CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
- 1, 1, ierr ) ! x and y for proc mesh reversed
- IF ( Py .EQ. mytask_y ) THEN
- ipey = i
- IF ( ipsy .EQ. -1 ) ipsy = i
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- IF (ipsy .EQ. -1 ) THEN
- ipey = -1
- ipsy = 0
- ENDIF
- #else
- ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
- ! adjust decomposition to reflect. 20051020 JM
- ips = -1
- j = jds
- ierr = 0
- DO i = ids, ide-1
- CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
- 1, 1 , ierr )
- IF ( Px .EQ. mytask_x ) THEN
- ipe = i
- IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
- IF ( ips .EQ. -1 ) ips = i
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- jps = -1
- i = ids ;
- ierr = 0
- DO j = jds, jde-1
- CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
- 1 , 1 , ierr )
- IF ( Py .EQ. mytask_y ) THEN
- jpe = j
- IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
- IF ( jps .EQ. -1 ) jps = j
- ENDIF
- ENDDO
- IF ( ierr .NE. 0 ) THEN
- CALL tfp_message(__FILE__,__LINE__)
- ENDIF
- #endif
- ! extend the patch dimensions out shw along edges of domain
- IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
- IF ( mytask_x .EQ. 0 ) THEN
- ips = ips - shw
- ipsy = ipsy - shw
- ENDIF
- IF ( mytask_x .EQ. ntasks_x-1 ) THEN
- ipe = ipe + shw
- ipey = ipey + shw
- ENDIF
- IF ( mytask_y .EQ. 0 ) THEN
- jps = jps - shw
- jpsx = jpsx - shw
- ENDIF
- IF ( mytask_y .EQ. ntasks_y-1 ) THEN
- jpe = jpe + shw
- jpex = jpex + shw
- ENDIF
- ENDIF !wig; 11-Mar-2008
- kps = 1
- kpe = kde-kds+1
- kms = 1
- kme = kpe
- kmsx = kpsx
- kmex = kpex
- kmsy = kpsy
- kmey = kpey
- ! handle setting the memory dimensions where there are no levels assigned to this proc
- IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
- kmsx = 0
- kmex = 0
- ENDIF
- IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
- kmsy = 0
- kmey = 0
- ENDIF
- IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
- ims = 0
- ime = 0
- ELSE
- ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
- ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
- ENDIF
- imsx = ids
- imex = ide
- ipsx = imsx
- ipex = imex
- ! handle setting the memory dimensions where there are no Y elements assigned to this proc
- IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
- imsy = 0
- imey = 0
- ELSE
- imsy = ipsy
- imey = ipey
- ENDIF
- IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
- jms = 0
- jme = 0
- ELSE
- jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
- jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
- ENDIF
- jmsx = jpsx
- jmex = jpex
- jmsy = jds
- jmey = jde
- ! handle setting the memory dimensions where there are no X elements assigned to this proc
- IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
- jmsx = 0
- jmex = 0
- ELSE
- jpsy = jmsy
- jpey = jmey
- ENDIF
- END SUBROUTINE compute_memory_dims_rsl_lite
- ! internal, used below for switching the argument to MPI calls
- ! if reals are being autopromoted to doubles in the build of WRF
- INTEGER function getrealmpitype()
- #ifndef STUBMPI
- IMPLICIT NONE
- INCLUDE 'mpif.h'
- INTEGER rtypesize, dtypesize, ierr
- CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
- CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
- IF ( RWORDSIZE .EQ. rtypesize ) THEN
- getrealmpitype = MPI_REAL
- ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
- getrealmpitype = MPI_DOUBLE_PRECISION
- ELSE
- CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
- ENDIF
- #else
- ! required dummy initialization for function that is never called
- getrealmpitype = 1
- #endif
- RETURN
- END FUNCTION getrealmpitype
- REAL FUNCTION wrf_dm_max_real ( inval )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL inval, retval
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
- wrf_dm_max_real = retval
- #else
- REAL inval
- wrf_dm_max_real = inval
- #endif
- END FUNCTION wrf_dm_max_real
- REAL FUNCTION wrf_dm_min_real ( inval )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL inval, retval
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
- wrf_dm_min_real = retval
- #else
- REAL inval
- wrf_dm_min_real = inval
- #endif
- END FUNCTION wrf_dm_min_real
- SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
- IMPLICIT NONE
- INTEGER n
- REAL inval(*)
- REAL retval(*)
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, local_communicator, ierr )
- #else
- retval(1:n) = inval(1:n)
- #endif
- END SUBROUTINE wrf_dm_min_reals
- REAL FUNCTION wrf_dm_sum_real ( inval )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL inval, retval
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
- wrf_dm_sum_real = retval
- #else
- REAL inval
- wrf_dm_sum_real = inval
- #endif
- END FUNCTION wrf_dm_sum_real
- SUBROUTINE wrf_dm_sum_reals (inval, retval)
- IMPLICIT NONE
- REAL, INTENT(IN) :: inval(:)
- REAL, INTENT(OUT) :: retval(:)
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
- #else
- retval = inval
- #endif
- END SUBROUTINE wrf_dm_sum_reals
- INTEGER FUNCTION wrf_dm_sum_integer ( inval )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER inval, retval
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
- wrf_dm_sum_integer = retval
- #else
- INTEGER inval
- wrf_dm_sum_integer = inval
- #endif
- END FUNCTION wrf_dm_sum_integer
- #ifdef HWRF
- SUBROUTINE wrf_dm_minloc_real ( val, lat, lon, z, idex, jdex )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL val, lat, lon, z
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- REAL vll(4)
- INTEGER dex_all (2,ntasks)
- REAL vll_all(4,ntasks)
- INTEGER i
- vll= (/ val, lat, lon, z /)
- dex(1) = idex ; dex(2) = jdex
- CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
- CALL mpi_allgather ( vll, 4, getrealmpitype(), vll_all , 4, getrealmpitype(), local_communicator, ierr )
- val = vll_all(1,1) ; lat = vll_all(2,1)
- lon = vll_all(3,1) ; z = vll_all(4,1)
- idex = dex_all(1,1) ; jdex = dex_all(2,1)
- DO i = 2, ntasks
- IF ( vll_all(1,i) .LT. val ) THEN
- val = vll_all(1,i)
- lat = vll_all(2,i)
- lon = vll_all(3,i)
- z = vll_all(4,i)
- idex = dex_all(1,i)
- jdex = dex_all(2,i)
- ENDIF
- ENDDO
- #else
- REAL val,lat,lon,z
- INTEGER idex, jdex, ierr
- #endif
- END SUBROUTINE wrf_dm_minloc_real
- SUBROUTINE wrf_dm_maxloc_real ( val, lat, lon, z, idex, jdex )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL val, lat, lon, z
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- REAL vll(4)
- INTEGER dex_all (2,ntasks)
- REAL vll_all(4,ntasks)
- INTEGER i
- vll= (/ val, lat, lon, z /)
- dex(1) = idex ; dex(2) = jdex
- CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
- CALL mpi_allgather ( vll, 4, getrealmpitype(), vll_all , 4, getrealmpitype(), local_communicator, ierr )
- val = vll_all(1,1) ; lat = vll_all(2,1)
- lon = vll_all(3,1) ; z = vll_all(4,1)
- idex = dex_all(1,1) ; jdex = dex_all(2,1)
- DO i = 2, ntasks
- IF ( vll_all(1,i) .GT. val ) THEN
- val = vll_all(1,i)
- lat = vll_all(2,i)
- lon = vll_all(3,i)
- z = vll_all(4,i)
- idex = dex_all(1,i)
- jdex = dex_all(2,i)
- ENDIF
- ENDDO
- #else
- REAL val,lat,lon,z
- INTEGER idex, jdex, ierr
- #endif
- END SUBROUTINE wrf_dm_maxloc_real
- #endif
- INTEGER FUNCTION wrf_dm_bxor_integer ( inval )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER inval, retval
- INTEGER ierr
- CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_BXOR, local_communicator, ierr )
- wrf_dm_bxor_integer = retval
- #else
- INTEGER inval
- wrf_dm_bxor_integer = inval
- #endif
- END FUNCTION wrf_dm_bxor_integer
- SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- REAL val, val_all( ntasks )
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- INTEGER dex_all (2,ntasks)
- INTEGER i
- dex(1) = idex ; dex(2) = jdex
- CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
- CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
- val = val_all(1)
- idex = dex_all(1,1) ; jdex = dex_all(2,1)
- DO i = 2, ntasks
- IF ( val_all(i) .GT. val ) THEN
- val = val_all(i)
- idex = dex_all(1,i)
- jdex = dex_all(2,i)
- ENDIF
- ENDDO
- #else
- REAL val
- INTEGER idex, jdex, ierr
- #endif
- END SUBROUTINE wrf_dm_maxval_real
- #ifndef PROMOTE_FLOAT
- SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
- IMPLICIT NONE
- # ifndef STUBMPI
- INCLUDE 'mpif.h'
- DOUBLE PRECISION val, val_all( ntasks )
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- INTEGER dex_all (2,ntasks)
- INTEGER i
- dex(1) = idex ; dex(2) = jdex
- CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
- CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
- val = val_all(1)
- idex = dex_all(1,1) ; jdex = dex_all(2,1)
- DO i = 2, ntasks
- IF ( val_all(i) .GT. val ) THEN
- val = val_all(i)
- idex = dex_all(1,i)
- jdex = dex_all(2,i)
- ENDIF
- ENDDO
- # else
- DOUBLE PRECISION val
- INTEGER idex, jdex, ierr
- # endif
- END SUBROUTINE wrf_dm_maxval_doubleprecision
- #endif
- SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
- IMPLICIT NONE
- #ifndef STUBMPI
- INCLUDE 'mpif.h'
- INTEGER val, val_all( ntasks )
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- INTEGER dex_all (2,ntasks)
- INTEGER i
- dex(1) = idex ; dex(2) = jdex
- CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
- CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
- val = val_all(1)
- idex = dex_all(1,1) ; jdex = dex_all(2,1)
- DO i = 2, ntasks
- IF ( val_all(i) .GT. val ) THEN
- val = val_all(i)
- idex = dex_all(1,i)
- jdex = dex_all(2,i)
- ENDIF
- ENDDO
- #else
- INTEGER val
- INTEGER idex, jdex
- #endif
- END SUBROUTINE wrf_dm_maxval_integer
- ! For HWRF some additional computation is required. This is gopal's doing
- SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
- IMPLICIT NONE
- REAL val, val_all( ntasks )
- INTEGER idex, jdex, ierr
- INTEGER dex(2)
- INTEGER dex_all (2,ntasks)
- ! <DESCRIPTION>
- ! Collective operation. Each processor calls passing a local value and its index; on return
- ! all processors are passed back th…
Large files files are truncated, but you can click here to view the full file