PageRenderTime 104ms CodeModel.GetById 33ms RepoModel.GetById 0ms app.codeStats 1ms

/wrfv2_fire/dyn_em/solve_em.F

http://github.com/jbeezley/wrf-fire
FORTRAN Legacy | 4014 lines | 2574 code | 461 blank | 979 comment | 12 complexity | b11609b3a9b0c8f951dfd34819ba8f0b MD5 | raw file
Possible License(s): AGPL-1.0
  1. !WRF:MEDIATION_LAYER:SOLVER
  2. SUBROUTINE solve_em ( grid , config_flags &
  3. ! Arguments generated from Registry
  4. #include "dummy_new_args.inc"
  5. !
  6. )
  7. ! Driver layer modules
  8. USE module_state_description
  9. USE module_domain, ONLY : &
  10. domain, get_ijk_from_grid, get_ijk_from_subgrid &
  11. ,domain_get_current_time, domain_get_start_time &
  12. ,domain_get_sim_start_time, domain_clock_get
  13. USE module_domain_type, ONLY : history_alarm, restart_alarm
  14. USE module_configure, ONLY : grid_config_rec_type
  15. USE module_driver_constants
  16. USE module_machine
  17. USE module_tiles, ONLY : set_tiles
  18. #ifdef DM_PARALLEL
  19. USE module_dm, ONLY : &
  20. local_communicator, mytask, ntasks, ntasks_x, ntasks_y &
  21. ,local_communicator_periodic, wrf_dm_maxval
  22. USE module_comm_dm, ONLY : &
  23. halo_em_a_sub,halo_em_b_sub,halo_em_c2_sub,halo_em_chem_e_3_sub &
  24. ,halo_em_chem_e_5_sub,halo_em_chem_e_7_sub,halo_em_chem_old_e_5_sub &
  25. ,halo_em_chem_old_e_7_sub,halo_em_c_sub,halo_em_d2_3_sub &
  26. ,halo_em_d2_5_sub,halo_em_d3_3_sub,halo_em_d3_5_sub,halo_em_d_sub &
  27. ,halo_em_e_3_sub,halo_em_e_5_sub,halo_em_hydro_uv_sub &
  28. ,halo_em_moist_e_3_sub,halo_em_moist_e_5_sub,halo_em_moist_e_7_sub &
  29. ,halo_em_moist_old_e_5_sub,halo_em_moist_old_e_7_sub &
  30. ,halo_em_scalar_e_3_sub,halo_em_scalar_e_5_sub,halo_em_scalar_e_7_sub &
  31. ,halo_em_scalar_old_e_5_sub,halo_em_scalar_old_e_7_sub,halo_em_tke_3_sub &
  32. ,halo_em_tke_5_sub,halo_em_tke_7_sub,halo_em_tke_advect_3_sub &
  33. ,halo_em_tke_advect_5_sub,halo_em_tke_old_e_5_sub &
  34. ,halo_em_tke_old_e_7_sub,halo_em_tracer_e_3_sub,halo_em_tracer_e_5_sub &
  35. ,halo_em_tracer_e_7_sub,halo_em_tracer_old_e_5_sub &
  36. ,halo_em_tracer_old_e_7_sub,period_bdy_em_a_sub &
  37. ,period_bdy_em_b3_sub,period_bdy_em_b_sub,period_bdy_em_chem2_sub &
  38. ,period_bdy_em_chem_old_sub,period_bdy_em_chem_sub,period_bdy_em_d3_sub &
  39. ,period_bdy_em_d_sub,period_bdy_em_e_sub,period_bdy_em_moist2_sub &
  40. ,period_bdy_em_moist_old_sub,period_bdy_em_moist_sub &
  41. ,period_bdy_em_scalar2_sub,period_bdy_em_scalar_old_sub &
  42. ,period_bdy_em_scalar_sub,period_bdy_em_tke_old_sub &
  43. ,period_bdy_em_tracer2_sub,period_bdy_em_tracer_old_sub &
  44. ,period_bdy_em_tracer_sub,period_em_da_sub,period_em_hydro_uv_sub
  45. #endif
  46. USE module_utility
  47. ! Mediation layer modules
  48. ! Model layer modules
  49. USE module_model_constants
  50. USE module_small_step_em
  51. USE module_em
  52. USE module_big_step_utilities_em
  53. USE module_bc
  54. USE module_bc_em
  55. USE module_solvedebug_em
  56. USE module_physics_addtendc
  57. USE module_diffusion_em
  58. USE module_polarfft
  59. USE module_microphysics_driver
  60. USE module_microphysics_zero_out
  61. USE module_fddaobs_driver
  62. USE module_diagnostics
  63. #ifdef WRF_CHEM
  64. USE module_input_chem_data
  65. USE module_input_tracer
  66. USE module_chem_utilities
  67. #endif
  68. USE module_first_rk_step_part1
  69. USE module_first_rk_step_part2
  70. USE module_llxy, ONLY : proj_cassini
  71. USE module_avgflx_em, ONLY : zero_avgflx, upd_avgflx
  72. IMPLICIT NONE
  73. ! Input data.
  74. TYPE(domain) , TARGET :: grid
  75. ! Definitions of dummy arguments to this routine (generated from Registry).
  76. #include "dummy_new_decl.inc"
  77. ! Structure that contains run-time configuration (namelist) data for domain
  78. TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
  79. ! Local data
  80. INTEGER :: k_start , k_end, its, ite, jts, jte
  81. INTEGER :: ids , ide , jds , jde , kds , kde , &
  82. ims , ime , jms , jme , kms , kme , &
  83. ips , ipe , jps , jpe , kps , kpe
  84. INTEGER :: sids , side , sjds , sjde , skds , skde , &
  85. sims , sime , sjms , sjme , skms , skme , &
  86. sips , sipe , sjps , sjpe , skps , skpe
  87. INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex, &
  88. ipsx, ipex, jpsx, jpex, kpsx, kpex, &
  89. imsy, imey, jmsy, jmey, kmsy, kmey, &
  90. ipsy, ipey, jpsy, jpey, kpsy, kpey
  91. INTEGER :: ij , iteration
  92. INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
  93. INTEGER :: loop
  94. INTEGER :: sz
  95. INTEGER :: iswater
  96. LOGICAL :: specified_bdy, channel_bdy
  97. REAL :: t_new
  98. ! Changes in tendency at this timestep
  99. real ,DIMENSION(grid%sm31:grid%em31,grid%sm32:grid%em32,grid%sm33:grid%em33) :: h_tendency, &
  100. z_tendency
  101. ! Whether advection should produce decoupled horizontal and vertical advective tendency outputs
  102. LOGICAL :: tenddec
  103. ! Flag for microphysics routines to produce diagnostic fields (e.g., radar reflectivity)
  104. LOGICAL :: diagflag
  105. #ifdef WRF_CHEM
  106. ! Index cross-referencing array for tendency accumulation
  107. INTEGER, DIMENSION( num_chem ) :: adv_ct_indices
  108. #endif
  109. ! storage for tendencies and decoupled state (generated from Registry)
  110. #include <i1_decl.inc>
  111. ! Previous time level of tracer arrays now defined as i1 variables;
  112. ! the state 4d arrays now redefined as 1-time level arrays in Registry.
  113. ! Benefit: save memory in nested runs, since only 1 domain is active at a
  114. ! time. Potential problem on stack-limited architectures: increases
  115. ! amount of data on program stack by making these automatic arrays.
  116. INTEGER :: rc
  117. INTEGER :: number_of_small_timesteps, rk_step
  118. INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
  119. INTEGER :: idum1, idum2, dynamics_option
  120. INTEGER :: rk_order, iwmax, jwmax, kwmax
  121. REAL :: dt_rk, dts_rk, dts, dtm, wmax
  122. REAL , ALLOCATABLE , DIMENSION(:) :: max_vert_cfl_tmp, max_horiz_cfl_tmp
  123. LOGICAL :: leapfrog
  124. INTEGER :: l,kte,kk
  125. LOGICAL :: f_flux ! flag for computing averaged fluxes in cu_gd
  126. REAL :: curr_secs
  127. INTEGER :: num_sound_steps
  128. INTEGER :: idex, jdex
  129. REAL :: max_msft
  130. REAL :: spacing
  131. INTEGER :: ii, jj !kk is above after l,kte
  132. REAL :: dclat
  133. INTEGER :: debug_level
  134. ! urban related variables
  135. INTEGER :: NUM_ROOF_LAYERS, NUM_WALL_LAYERS, NUM_ROAD_LAYERS ! urban
  136. TYPE(WRFU_TimeInterval) :: tmpTimeInterval
  137. REAL :: real_time
  138. LOGICAL :: adapt_step_flag
  139. LOGICAL :: fill_w_flag
  140. ! variables for flux-averaging code 20091223
  141. CHARACTER*256 :: message, message2
  142. REAL :: old_dt
  143. TYPE(WRFU_Time) :: temp_time, CurrTime, restart_time
  144. INTEGER, PARAMETER :: precision = 100
  145. INTEGER :: num, den
  146. TYPE(WRFU_TimeInterval) :: dtInterval, intervaltime,restartinterval
  147. ! Define benchmarking timers if -DBENCH is compiled
  148. #include <bench_solve_em_def.h>
  149. !----------------------
  150. ! Executable statements
  151. !----------------------
  152. !<DESCRIPTION>
  153. !<pre>
  154. ! solve_em is the main driver for advancing a grid a single timestep.
  155. ! It is a mediation-layer routine -> DM and SM calls are made where
  156. ! needed for parallel processing.
  157. !
  158. ! solve_em can integrate the equations using 3 time-integration methods
  159. !
  160. ! - 3rd order Runge-Kutta time integration (recommended)
  161. !
  162. ! - 2nd order Runge-Kutta time integration
  163. !
  164. ! The main sections of solve_em are
  165. !
  166. ! (1) Runge-Kutta (RK) loop
  167. !
  168. ! (2) Non-timesplit physics (i.e., tendencies computed for updating
  169. ! model state variables during the first RK sub-step (loop)
  170. !
  171. ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
  172. !
  173. ! (4) scalar advance for moist and chem scalar variables (and TKE)
  174. ! within the RK sub-steps.
  175. !
  176. ! (5) time-split physics (after the RK step), currently this includes
  177. ! only microphyics
  178. !
  179. ! A more detailed description of these sections follows.
  180. !</pre>
  181. !</DESCRIPTION>
  182. ! Initialize timers if compiled with -DBENCH
  183. #include <bench_solve_em_init.h>
  184. ! set runge-kutta solver (2nd or 3rd order)
  185. dynamics_option = config_flags%rk_ord
  186. ! Obtain dimension information stored in the grid data structure.
  187. CALL get_ijk_from_grid ( grid , &
  188. ids, ide, jds, jde, kds, kde, &
  189. ims, ime, jms, jme, kms, kme, &
  190. ips, ipe, jps, jpe, kps, kpe, &
  191. imsx, imex, jmsx, jmex, kmsx, kmex, &
  192. ipsx, ipex, jpsx, jpex, kpsx, kpex, &
  193. imsy, imey, jmsy, jmey, kmsy, kmey, &
  194. ipsy, ipey, jpsy, jpey, kpsy, kpey )
  195. CALL get_ijk_from_subgrid ( grid , &
  196. sids, side, sjds, sjde, skds, skde, &
  197. sims, sime, sjms, sjme, skms, skme, &
  198. sips, sipe, sjps, sjpe, skps, skpe )
  199. k_start = kps
  200. k_end = kpe
  201. num_3d_m = num_moist
  202. num_3d_c = num_chem
  203. num_3d_s = num_scalar
  204. f_flux = config_flags%do_avgflx_cugd .EQ. 1
  205. ! Compute these starting and stopping locations for each tile and number of tiles.
  206. ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
  207. CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
  208. ! Max values of CFL for adaptive time step scheme
  209. ALLOCATE (max_vert_cfl_tmp(grid%num_tiles))
  210. ALLOCATE (max_horiz_cfl_tmp(grid%num_tiles))
  211. !
  212. ! Calculate current time in seconds since beginning of model run.
  213. ! Unfortunately, ESMF does not seem to have a way to return
  214. ! floating point seconds based on a TimeInterval. So, we will
  215. ! calculate it here--but, this is not clean!!
  216. !
  217. tmpTimeInterval = domain_get_current_time ( grid ) - domain_get_sim_start_time ( grid )
  218. curr_secs = real_time(tmpTimeInterval)
  219. old_dt = grid%dt ! store old time step for flux averaging code at end of RK loop
  220. !-----------------------------------------------------------------------------
  221. ! Adaptive time step: Added by T. Hutchinson, WSI 3/5/07
  222. ! In this call, we do the time-step adaptation and set time-dependent lateral
  223. ! boundary condition nudging weights.
  224. !
  225. IF ( (config_flags%use_adaptive_time_step) .and. &
  226. ( (.not. grid%nested) .or. &
  227. ( (grid%nested) .and. (abs(grid%dtbc) < 0.0001) ) ) )THEN
  228. CALL adapt_timestep(grid, config_flags)
  229. adapt_step_flag = .TRUE.
  230. ELSE
  231. adapt_step_flag = .FALSE.
  232. ENDIF
  233. ! End of adaptive time step modifications
  234. !-----------------------------------------------------------------------------
  235. grid%itimestep = grid%itimestep + 1
  236. IF (config_flags%polar) dclat = 90./REAL(jde-jds) !(0.5 * 180/ny)
  237. #ifdef WRF_CHEM
  238. kte=min(k_end,kde-1)
  239. # ifdef DM_PARALLEL
  240. if ( num_chem >= PARAM_FIRST_SCALAR ) then
  241. !-----------------------------------------------------------------------
  242. ! see matching halo calls below for stencils
  243. !--------------------------------------------------------------
  244. CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
  245. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  246. # include "HALO_EM_CHEM_E_3.inc"
  247. IF( config_flags%progn > 0 ) THEN
  248. # include "HALO_EM_SCALAR_E_3.inc"
  249. ENDIF
  250. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  251. # include "HALO_EM_CHEM_E_5.inc"
  252. IF( config_flags%progn > 0 ) THEN
  253. # include "HALO_EM_SCALAR_E_5.inc"
  254. ENDIF
  255. ELSE
  256. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  257. CALL wrf_error_fatal(TRIM(wrf_err_message))
  258. ENDIF
  259. ENDIF
  260. if ( num_tracer >= PARAM_FIRST_SCALAR ) then
  261. !-----------------------------------------------------------------------
  262. ! see matching halo calls below for stencils
  263. !--------------------------------------------------------------
  264. CALL wrf_debug ( 200 , ' call HALO_RK_tracer' )
  265. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  266. # include "HALO_EM_TRACER_E_3.inc"
  267. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  268. # include "HALO_EM_TRACER_E_5.inc"
  269. ELSE
  270. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  271. CALL wrf_error_fatal(TRIM(wrf_err_message))
  272. ENDIF
  273. ENDIF
  274. # endif
  275. !--------------------------------------------------------------
  276. adv_ct_indices( : ) = 1
  277. IF ( config_flags%chemdiag == USECHEMDIAG ) THEN
  278. ! modify tendency list here
  279. ! note that the referencing direction here is opposite of that in chem_driver
  280. adv_ct_indices(p_co ) = p_advh_co
  281. adv_ct_indices(p_o3 ) = p_advh_o3
  282. adv_ct_indices(p_no ) = p_advh_no
  283. adv_ct_indices(p_no2 ) = p_advh_no2
  284. adv_ct_indices(p_hno3) = p_advh_hno3
  285. adv_ct_indices(p_iso ) = p_advh_iso
  286. adv_ct_indices(p_ho ) = p_advh_ho
  287. adv_ct_indices(p_ho2 ) = p_advh_ho2
  288. END IF
  289. #endif
  290. rk_order = config_flags%rk_ord
  291. IF ( grid%time_step_sound == 0 ) THEN
  292. ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
  293. spacing = min(grid%dx, grid%dy)
  294. IF ( ( config_flags%use_adaptive_time_step ) .AND. ( config_flags%map_proj == PROJ_CASSINI ) ) THEN
  295. max_msft=MIN ( MAX(grid%max_msftx, grid%max_msfty) , &
  296. 1.0/COS(config_flags%fft_filter_lat*degrad) )
  297. num_sound_steps = max ( 2 * ( INT (300. * grid%dt / (spacing / max_msft) - 0.01 ) + 1 ), 4 )
  298. ELSE IF ( config_flags%use_adaptive_time_step ) THEN
  299. max_msft= MAX(grid%max_msftx, grid%max_msfty)
  300. num_sound_steps = max ( 2 * ( INT (300. * grid%dt / (spacing / max_msft) - 0.01 ) + 1 ), 4 )
  301. ELSE
  302. num_sound_steps = max ( 2 * ( INT (300. * grid%dt / spacing - 0.01 ) + 1 ), 4 )
  303. END IF
  304. WRITE(wrf_err_message,*)'grid spacing, dt, time_step_sound=',spacing,grid%dt,num_sound_steps
  305. CALL wrf_debug ( 50 , wrf_err_message )
  306. ELSE
  307. num_sound_steps = grid%time_step_sound
  308. ENDIF
  309. dts = grid%dt/float(num_sound_steps)
  310. IF (config_flags%use_adaptive_time_step) THEN
  311. CALL get_wrf_debug_level( debug_level )
  312. IF ((config_flags%time_step < 0) .AND. (debug_level.GE.50)) THEN
  313. #ifdef DM_PARALLEL
  314. CALL wrf_dm_maxval(grid%max_vert_cfl, idex, jdex)
  315. #endif
  316. WRITE(wrf_err_message,*)'variable dt, max horiz cfl, max vert cfl: ',&
  317. grid%dt, grid%max_horiz_cfl, grid%max_vert_cfl
  318. CALL wrf_debug ( 0 , wrf_err_message )
  319. ENDIF
  320. grid%max_cfl_val = 0
  321. grid%max_horiz_cfl = 0
  322. grid%max_vert_cfl = 0
  323. ENDIF
  324. ! setting bdy tendencies to zero for DFI if constant_bc = true
  325. !$OMP PARALLEL DO &
  326. !$OMP PRIVATE ( ij )
  327. DO ij = 1 , grid%num_tiles
  328. ! IF( config_flags%specified .AND. grid%dfi_opt .NE. DFI_NODFI &
  329. ! .AND. config_flags%constant_bc .AND. (grid%dfi_stage .EQ. DFI_BCK .OR. grid%dfi_stage .EQ. DFI_FWD) ) THEN
  330. IF( config_flags%specified .AND. config_flags%constant_bc ) THEN
  331. CALL zero_bdytend (grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
  332. grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
  333. grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
  334. grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
  335. grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
  336. grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
  337. moist_btxs,moist_btxe, &
  338. moist_btys,moist_btye, &
  339. grid%spec_bdy_width,num_3d_m, &
  340. ids,ide, jds,jde, kds,kde, &
  341. ims,ime, jms,jme, kms,kme, &
  342. ips,ipe, jps,jpe, kps,kpe, &
  343. grid%i_start(ij), grid%i_end(ij), &
  344. grid%j_start(ij), grid%j_end(ij), &
  345. k_start, k_end )
  346. ENDIF
  347. ENDDO
  348. !$OMP END PARALLEL DO
  349. !**********************************************************************
  350. !
  351. ! LET US BEGIN.......
  352. !
  353. !<DESCRIPTION>
  354. !<pre>
  355. ! (1) RK integration loop is named the "Runge_Kutta_loop:"
  356. !
  357. ! Predictor-corrector type time integration.
  358. ! Advection terms are evaluated at time t for the predictor step,
  359. ! and advection is re-evaluated with the latest predicted value for
  360. ! each succeeding time corrector step
  361. !
  362. ! 2nd order Runge Kutta (rk_order = 2):
  363. ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
  364. !
  365. ! 3rd order Runge Kutta (rk_order = 3):
  366. ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
  367. ! and step 3 is from t to dt.
  368. !
  369. ! non-timesplit physics are evaluated during first RK step and
  370. ! these physics tendencies are stored for use in each RK pass.
  371. !</pre>
  372. !</DESCRIPTION>
  373. !**********************************************************************
  374. Runge_Kutta_loop: DO rk_step = 1, rk_order
  375. ! Set the step size and number of small timesteps for
  376. ! each part of the timestep
  377. dtm = grid%dt
  378. IF ( rk_order == 1 ) THEN
  379. write(wrf_err_message,*)' leapfrog removed, error exit for dynamics_option = ',dynamics_option
  380. CALL wrf_error_fatal( wrf_err_message )
  381. ELSE IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
  382. IF ( rk_step == 1) THEN
  383. dt_rk = 0.5*grid%dt
  384. dts_rk = dts
  385. number_of_small_timesteps = num_sound_steps/2
  386. ELSE
  387. dt_rk = grid%dt
  388. dts_rk = dts
  389. number_of_small_timesteps = num_sound_steps
  390. ENDIF
  391. ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
  392. IF ( rk_step == 1) THEN
  393. dt_rk = grid%dt/3.
  394. dts_rk = dt_rk
  395. number_of_small_timesteps = 1
  396. ELSE IF (rk_step == 2) THEN
  397. dt_rk = 0.5*grid%dt
  398. dts_rk = dts
  399. number_of_small_timesteps = num_sound_steps/2
  400. ELSE
  401. dt_rk = grid%dt
  402. dts_rk = dts
  403. number_of_small_timesteps = num_sound_steps
  404. ENDIF
  405. ELSE
  406. write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
  407. CALL wrf_error_fatal( wrf_err_message )
  408. END IF
  409. ! Ensure that polar meridional velocity is zero
  410. IF (config_flags%polar) THEN
  411. !$OMP PARALLEL DO &
  412. !$OMP PRIVATE ( ij )
  413. DO ij = 1 , grid%num_tiles
  414. CALL zero_pole ( grid%v_1, &
  415. ids, ide, jds, jde, kds, kde, &
  416. ims, ime, jms, jme, kms, kme, &
  417. grid%i_start(ij), grid%i_end(ij), &
  418. grid%j_start(ij), grid%j_end(ij), &
  419. k_start, k_end )
  420. CALL zero_pole ( grid%v_2, &
  421. ids, ide, jds, jde, kds, kde, &
  422. ims, ime, jms, jme, kms, kme, &
  423. grid%i_start(ij), grid%i_end(ij), &
  424. grid%j_start(ij), grid%j_end(ij), &
  425. k_start, k_end )
  426. END DO
  427. !$OMP END PARALLEL DO
  428. END IF
  429. !
  430. ! Time level t is in the *_2 variable in the first part
  431. ! of the step, and in the *_1 variable after the predictor.
  432. ! the latest predicted values are stored in the *_2 variables.
  433. !
  434. CALL wrf_debug ( 200 , ' call rk_step_prep ' )
  435. BENCH_START(step_prep_tim)
  436. !$OMP PARALLEL DO &
  437. !$OMP PRIVATE ( ij )
  438. DO ij = 1 , grid%num_tiles
  439. CALL rk_step_prep ( config_flags, rk_step, &
  440. grid%u_2, grid%v_2, grid%w_2, grid%t_2, grid%ph_2, grid%mu_2, &
  441. moist, &
  442. grid%ru, grid%rv, grid%rw, grid%ww, grid%php, grid%alt, grid%muu, grid%muv, &
  443. grid%mub, grid%mut, grid%phb, grid%pb, grid%p, grid%al, grid%alb, &
  444. cqu, cqv, cqw, &
  445. grid%msfux, grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  446. grid%msfvy, grid%msftx, grid%msfty, &
  447. grid%fnm, grid%fnp, grid%dnw, grid%rdx, grid%rdy, &
  448. num_3d_m, &
  449. ids, ide, jds, jde, kds, kde, &
  450. ims, ime, jms, jme, kms, kme, &
  451. grid%i_start(ij), grid%i_end(ij), &
  452. grid%j_start(ij), grid%j_end(ij), &
  453. k_start, k_end )
  454. END DO
  455. !$OMP END PARALLEL DO
  456. BENCH_END(step_prep_tim)
  457. #ifdef DM_PARALLEL
  458. !-----------------------------------------------------------------------
  459. ! Stencils for patch communications (WCS, 29 June 2001)
  460. ! Note: the small size of this halo exchange reflects the
  461. ! fact that we are carrying the uncoupled variables
  462. ! as state variables in the mass coordinate model, as
  463. ! opposed to the coupled variables as in the height
  464. ! coordinate model.
  465. !
  466. ! * * * * *
  467. ! * * * * * * * * *
  468. ! * + * * + * * * + * *
  469. ! * * * * * * * * *
  470. ! * * * * *
  471. !
  472. ! 3D variables - note staggering! ru(X), rv(Y), ww(Z), php(Z)
  473. !
  474. ! ru x
  475. ! rv x
  476. ! ww x
  477. ! php x
  478. ! alt x
  479. ! ph_2 x
  480. ! phb x
  481. !
  482. ! the following are 2D (xy) variables
  483. !
  484. ! muu x
  485. ! muv x
  486. ! mut x
  487. !--------------------------------------------------------------
  488. # include "HALO_EM_A.inc"
  489. #endif
  490. ! set boundary conditions on variables
  491. ! from big_step_prep for use in big_step_proc
  492. #ifdef DM_PARALLEL
  493. # include "PERIOD_BDY_EM_A.inc"
  494. #endif
  495. BENCH_START(set_phys_bc_tim)
  496. !$OMP PARALLEL DO &
  497. !$OMP PRIVATE ( ij, ii, jj, kk )
  498. DO ij = 1 , grid%num_tiles
  499. CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
  500. CALL rk_phys_bc_dry_1( config_flags, grid%ru, grid%rv, grid%rw, grid%ww, &
  501. grid%muu, grid%muv, grid%mut, grid%php, grid%alt, grid%p, &
  502. ids, ide, jds, jde, kds, kde, &
  503. ims, ime, jms, jme, kms, kme, &
  504. ips, ipe, jps, jpe, kps, kpe, &
  505. grid%i_start(ij), grid%i_end(ij), &
  506. grid%j_start(ij), grid%j_end(ij), &
  507. k_start, k_end )
  508. CALL set_physical_bc3d( grid%al, 'p', config_flags, &
  509. ids, ide, jds, jde, kds, kde, &
  510. ims, ime, jms, jme, kms, kme, &
  511. ips, ipe, jps, jpe, kps, kpe, &
  512. grid%i_start(ij), grid%i_end(ij), &
  513. grid%j_start(ij), grid%j_end(ij), &
  514. k_start , k_end )
  515. CALL set_physical_bc3d( grid%ph_2, 'w', config_flags, &
  516. ids, ide, jds, jde, kds, kde, &
  517. ims, ime, jms, jme, kms, kme, &
  518. ips, ipe, jps, jpe, kps, kpe, &
  519. grid%i_start(ij), grid%i_end(ij), &
  520. grid%j_start(ij), grid%j_end(ij), &
  521. k_start, k_end )
  522. IF (config_flags%polar) THEN
  523. !-------------------------------------------------------
  524. ! lat-lon grid pole-point (v) specification (extrapolate v, rv to the pole)
  525. !-------------------------------------------------------
  526. CALL pole_point_bc ( grid%v_1, &
  527. ids, ide, jds, jde, kds, kde, &
  528. ims, ime, jms, jme, kms, kme, &
  529. grid%i_start(ij), grid%i_end(ij), &
  530. grid%j_start(ij), grid%j_end(ij), &
  531. k_start, k_end )
  532. CALL pole_point_bc ( grid%v_2, &
  533. ids, ide, jds, jde, kds, kde, &
  534. ims, ime, jms, jme, kms, kme, &
  535. grid%i_start(ij), grid%i_end(ij), &
  536. grid%j_start(ij), grid%j_end(ij), &
  537. k_start, k_end )
  538. !-------------------------------------------------------
  539. ! end lat-lon grid pole-point (v) specification
  540. !-------------------------------------------------------
  541. ENDIF
  542. END DO
  543. !$OMP END PARALLEL DO
  544. BENCH_END(set_phys_bc_tim)
  545. rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
  546. !<DESCRIPTION>
  547. !<pre>
  548. !(2) The non-timesplit physics begins with a call to "phy_prep"
  549. ! (which computes some diagnostic variables such as temperature,
  550. ! pressure, u and v at p points, etc). This is followed by
  551. ! calls to the physics drivers:
  552. !
  553. ! radiation,
  554. ! surface,
  555. ! pbl,
  556. ! cumulus,
  557. ! fddagd,
  558. ! 3D TKE and mixing.
  559. !<pre>
  560. !</DESCRIPTION>
  561. CALL first_rk_step_part1 ( grid, config_flags &
  562. , moist , moist_tend &
  563. , chem , chem_tend &
  564. , tracer, tracer_tend &
  565. , scalar , scalar_tend &
  566. , fdda3d, fdda2d &
  567. , ru_tendf, rv_tendf &
  568. , rw_tendf, t_tendf &
  569. , ph_tendf, mu_tendf &
  570. , tke_tend &
  571. , config_flags%use_adaptive_time_step &
  572. , curr_secs &
  573. , psim , psih , wspd , gz1oz0 &
  574. , br , chklowq &
  575. , cu_act_flag , hol , th_phy &
  576. , pi_phy , p_phy , grid%t_phy &
  577. , u_phy , v_phy &
  578. , dz8w , p8w , t8w , rho_phy , rho &
  579. , ids, ide, jds, jde, kds, kde &
  580. , ims, ime, jms, jme, kms, kme &
  581. , ips, ipe, jps, jpe, kps, kpe &
  582. , imsx, imex, jmsx, jmex, kmsx, kmex &
  583. , ipsx, ipex, jpsx, jpex, kpsx, kpex &
  584. , imsy, imey, jmsy, jmey, kmsy, kmey &
  585. , ipsy, ipey, jpsy, jpey, kpsy, kpey &
  586. , k_start , k_end &
  587. , f_flux &
  588. )
  589. #ifdef DM_PARALLEL
  590. IF ( config_flags%bl_pbl_physics == MYNNPBLSCHEME2 .OR. &
  591. config_flags%bl_pbl_physics == MYNNPBLSCHEME3 ) THEN
  592. # include "HALO_EM_SCALAR_E_5.inc"
  593. ENDIF
  594. #endif
  595. CALL first_rk_step_part2 ( grid, config_flags &
  596. , moist , moist_tend &
  597. , chem , chem_tend &
  598. , tracer, tracer_tend &
  599. , scalar , scalar_tend &
  600. , fdda3d, fdda2d &
  601. , ru_tendf, rv_tendf &
  602. , rw_tendf, t_tendf &
  603. , ph_tendf, mu_tendf &
  604. , tke_tend &
  605. , adapt_step_flag , curr_secs &
  606. , psim , psih , wspd , gz1oz0 &
  607. , br , chklowq &
  608. , cu_act_flag , hol , th_phy &
  609. , pi_phy , p_phy , grid%t_phy &
  610. , u_phy , v_phy &
  611. , dz8w , p8w , t8w , rho_phy , rho &
  612. , nba_mij, num_nba_mij & !JDM
  613. , nba_rij, num_nba_rij & !JDM
  614. , ids, ide, jds, jde, kds, kde &
  615. , ims, ime, jms, jme, kms, kme &
  616. , ips, ipe, jps, jpe, kps, kpe &
  617. , imsx, imex, jmsx, jmex, kmsx, kmex &
  618. , ipsx, ipex, jpsx, jpex, kpsx, kpex &
  619. , imsy, imey, jmsy, jmey, kmsy, kmey &
  620. , ipsy, ipey, jpsy, jpey, kpsy, kpey &
  621. , k_start , k_end &
  622. )
  623. END IF rk_step_is_one
  624. BENCH_START(rk_tend_tim)
  625. !$OMP PARALLEL DO &
  626. !$OMP PRIVATE ( ij )
  627. DO ij = 1 , grid%num_tiles
  628. CALL wrf_debug ( 200 , ' call rk_tendency' )
  629. CALL rk_tendency ( config_flags, rk_step &
  630. ,grid%ru_tend, grid%rv_tend, rw_tend, ph_tend, t_tend &
  631. ,ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf &
  632. ,mu_tend, grid%u_save, grid%v_save, w_save, ph_save &
  633. ,grid%t_save, mu_save, grid%rthften &
  634. ,grid%ru, grid%rv, grid%rw, grid%ww &
  635. ,grid%u_2, grid%v_2, grid%w_2, grid%t_2, grid%ph_2 &
  636. ,grid%u_1, grid%v_1, grid%w_1, grid%t_1, grid%ph_1 &
  637. ,grid%h_diabatic, grid%phb, grid%t_init &
  638. ,grid%mu_2, grid%mut, grid%muu, grid%muv, grid%mub &
  639. ,grid%al, grid%alt, grid%p, grid%pb, grid%php, cqu, cqv, cqw &
  640. ,grid%u_base, grid%v_base, grid%t_base, grid%qv_base, grid%z_base &
  641. ,grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv &
  642. ,grid%msfvy, grid%msftx,grid%msfty, grid%clat, grid%f, grid%e, grid%sina, grid%cosa &
  643. ,grid%fnm, grid%fnp, grid%rdn, grid%rdnw &
  644. ,grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmh, grid%xkhh &
  645. ,grid%diff_6th_opt, grid%diff_6th_factor &
  646. ,config_flags%momentum_adv_opt &
  647. ,grid%dampcoef,grid%zdamp,config_flags%damp_opt,config_flags%rad_nudge &
  648. ,grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m &
  649. ,config_flags%non_hydrostatic, config_flags%top_lid &
  650. ,grid%u_frame, grid%v_frame &
  651. ,ids, ide, jds, jde, kds, kde &
  652. ,ims, ime, jms, jme, kms, kme &
  653. ,grid%i_start(ij), grid%i_end(ij) &
  654. ,grid%j_start(ij), grid%j_end(ij) &
  655. ,k_start, k_end &
  656. ,max_vert_cfl_tmp(ij), max_horiz_cfl_tmp(ij) )
  657. END DO
  658. !$OMP END PARALLEL DO
  659. BENCH_END(rk_tend_tim)
  660. IF (config_flags%use_adaptive_time_step) THEN
  661. DO ij = 1 , grid%num_tiles
  662. IF (max_horiz_cfl_tmp(ij) .GT. grid%max_horiz_cfl) THEN
  663. grid%max_horiz_cfl = max_horiz_cfl_tmp(ij)
  664. ENDIF
  665. IF (max_vert_cfl_tmp(ij) .GT. grid%max_vert_cfl) THEN
  666. grid%max_vert_cfl = max_vert_cfl_tmp(ij)
  667. ENDIF
  668. END DO
  669. IF (grid%max_horiz_cfl .GT. grid%max_cfl_val) THEN
  670. grid%max_cfl_val = grid%max_horiz_cfl
  671. ENDIF
  672. IF (grid%max_vert_cfl .GT. grid%max_cfl_val) THEN
  673. grid%max_cfl_val = grid%max_vert_cfl
  674. ENDIF
  675. ENDIF
  676. BENCH_START(relax_bdy_dry_tim)
  677. !$OMP PARALLEL DO &
  678. !$OMP PRIVATE ( ij )
  679. DO ij = 1 , grid%num_tiles
  680. IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
  681. CALL relax_bdy_dry ( config_flags, &
  682. grid%u_save, grid%v_save, ph_save, grid%t_save, &
  683. w_save, mu_tend, &
  684. grid%ru, grid%rv, grid%ph_2, grid%t_2, &
  685. grid%w_2, grid%mu_2, grid%mut, &
  686. grid%u_bxs,grid%u_bxe,grid%u_bys,grid%u_bye, &
  687. grid%v_bxs,grid%v_bxe,grid%v_bys,grid%v_bye, &
  688. grid%ph_bxs,grid%ph_bxe,grid%ph_bys,grid%ph_bye, &
  689. grid%t_bxs,grid%t_bxe,grid%t_bys,grid%t_bye, &
  690. grid%w_bxs,grid%w_bxe,grid%w_bys,grid%w_bye, &
  691. grid%mu_bxs,grid%mu_bxe,grid%mu_bys,grid%mu_bye, &
  692. grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
  693. grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
  694. grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
  695. grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
  696. grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
  697. grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
  698. config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
  699. grid%dtbc, grid%fcx, grid%gcx, &
  700. ids,ide, jds,jde, kds,kde, &
  701. ims,ime, jms,jme, kms,kme, &
  702. ips,ipe, jps,jpe, kps,kpe, &
  703. grid%i_start(ij), grid%i_end(ij), &
  704. grid%j_start(ij), grid%j_end(ij), &
  705. k_start, k_end )
  706. ENDIF
  707. CALL rk_addtend_dry( grid%ru_tend, grid%rv_tend, rw_tend, ph_tend, t_tend, &
  708. ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
  709. grid%u_save, grid%v_save, w_save, ph_save, grid%t_save, &
  710. mu_tend, mu_tendf, rk_step, &
  711. grid%h_diabatic, grid%mut, grid%msftx, &
  712. grid%msfty, grid%msfux,grid%msfuy, &
  713. grid%msfvx, grid%msfvx_inv, grid%msfvy, &
  714. ids,ide, jds,jde, kds,kde, &
  715. ims,ime, jms,jme, kms,kme, &
  716. ips,ipe, jps,jpe, kps,kpe, &
  717. grid%i_start(ij), grid%i_end(ij), &
  718. grid%j_start(ij), grid%j_end(ij), &
  719. k_start, k_end )
  720. IF( config_flags%specified .or. config_flags%nested ) THEN
  721. CALL spec_bdy_dry ( config_flags, &
  722. grid%ru_tend, grid%rv_tend, ph_tend, t_tend, &
  723. rw_tend, mu_tend, &
  724. grid%u_bxs,grid%u_bxe,grid%u_bys,grid%u_bye, &
  725. grid%v_bxs,grid%v_bxe,grid%v_bys,grid%v_bye, &
  726. grid%ph_bxs,grid%ph_bxe,grid%ph_bys,grid%ph_bye, &
  727. grid%t_bxs,grid%t_bxe,grid%t_bys,grid%t_bye, &
  728. grid%w_bxs,grid%w_bxe,grid%w_bys,grid%w_bye, &
  729. grid%mu_bxs,grid%mu_bxe,grid%mu_bys,grid%mu_bye, &
  730. grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
  731. grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
  732. grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
  733. grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
  734. grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
  735. grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
  736. config_flags%spec_bdy_width, grid%spec_zone, &
  737. ids,ide, jds,jde, kds,kde, & ! domain dims
  738. ims,ime, jms,jme, kms,kme, & ! memory dims
  739. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  740. grid%i_start(ij), grid%i_end(ij), &
  741. grid%j_start(ij), grid%j_end(ij), &
  742. k_start, k_end )
  743. ENDIF
  744. END DO
  745. !$OMP END PARALLEL DO
  746. BENCH_END(relax_bdy_dry_tim)
  747. !<DESCRIPTION>
  748. !<pre>
  749. ! (3) Small (acoustic,sound) steps.
  750. !
  751. ! Several acoustic steps are taken each RK pass. A small step
  752. ! sequence begins with calculating perturbation variables
  753. ! and coupling them to the column dry-air-mass mu
  754. ! (call to small_step_prep). This is followed by computing
  755. ! coefficients for the vertically implicit part of the
  756. ! small timestep (call to calc_coef_w).
  757. !
  758. ! The small steps are taken
  759. ! in the named loop "small_steps:". In the small_steps loop, first
  760. ! the horizontal momentum (u and v) are advanced (call to advance_uv),
  761. ! next mu and theta are advanced (call to advance_mu_t) followed by
  762. ! advancing w and the geopotential (call to advance_w). Diagnostic
  763. ! values for pressure and inverse density are updated at the end of
  764. ! each small_step.
  765. !
  766. ! The small-step section ends with the change of the perturbation variables
  767. ! back to full variables (call to small_step_finish).
  768. !</pre>
  769. !</DESCRIPTION>
  770. BENCH_START(small_step_prep_tim)
  771. !$OMP PARALLEL DO &
  772. !$OMP PRIVATE ( ij )
  773. DO ij = 1 , grid%num_tiles
  774. ! Calculate coefficients for the vertically implicit acoustic/gravity wave
  775. ! integration. We only need calculate these for the first pass through -
  776. ! the predictor step. They are reused as is for the corrector step.
  777. ! For third-order RK, we need to recompute these after the first
  778. ! predictor because we may have changed the small timestep -> grid%dts.
  779. CALL wrf_debug ( 200 , ' call small_step_prep ' )
  780. CALL small_step_prep( grid%u_1,grid%u_2,grid%v_1,grid%v_2,grid%w_1,grid%w_2, &
  781. grid%t_1,grid%t_2,grid%ph_1,grid%ph_2, &
  782. grid%mub, grid%mu_1, grid%mu_2, &
  783. grid%muu, muus, grid%muv, muvs, &
  784. grid%mut, grid%muts, grid%mudf, &
  785. grid%u_save, grid%v_save, w_save, &
  786. grid%t_save, ph_save, mu_save, &
  787. grid%ww, ww1, &
  788. grid%dnw, c2a, grid%pb, grid%p, grid%alt, &
  789. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  790. grid%msfvy, grid%msftx,grid%msfty, &
  791. grid%rdx, grid%rdy, rk_step, &
  792. ids, ide, jds, jde, kds, kde, &
  793. ims, ime, jms, jme, kms, kme, &
  794. grid%i_start(ij), grid%i_end(ij), &
  795. grid%j_start(ij), grid%j_end(ij), &
  796. k_start , k_end )
  797. CALL calc_p_rho( grid%al, grid%p, grid%ph_2, &
  798. grid%alt, grid%t_2, grid%t_save, c2a, pm1, &
  799. grid%mu_2, grid%muts, grid%znu, t0, &
  800. grid%rdnw, grid%dnw, grid%smdiv, &
  801. config_flags%non_hydrostatic, 0, &
  802. ids, ide, jds, jde, kds, kde, &
  803. ims, ime, jms, jme, kms, kme, &
  804. grid%i_start(ij), grid%i_end(ij), &
  805. grid%j_start(ij), grid%j_end(ij), &
  806. k_start , k_end )
  807. IF (config_flags%non_hydrostatic) THEN
  808. CALL calc_coef_w( a,alpha,gamma, &
  809. grid%mut, cqw, &
  810. grid%rdn, grid%rdnw, c2a, &
  811. dts_rk, g, grid%epssm, &
  812. config_flags%top_lid, &
  813. ids, ide, jds, jde, kds, kde, &
  814. ims, ime, jms, jme, kms, kme, &
  815. grid%i_start(ij), grid%i_end(ij), &
  816. grid%j_start(ij), grid%j_end(ij), &
  817. k_start , k_end )
  818. ENDIF
  819. ENDDO
  820. !$OMP END PARALLEL DO
  821. BENCH_END(small_step_prep_tim)
  822. #ifdef DM_PARALLEL
  823. !-----------------------------------------------------------------------
  824. ! Stencils for patch communications (WCS, 29 June 2001)
  825. ! Note: the small size of this halo exchange reflects the
  826. ! fact that we are carrying the uncoupled variables
  827. ! as state variables in the mass coordinate model, as
  828. ! opposed to the coupled variables as in the height
  829. ! coordinate model.
  830. !
  831. ! * * * * *
  832. ! * * * * * * * * *
  833. ! * + * * + * * * + * *
  834. ! * * * * * * * * *
  835. ! * * * * *
  836. !
  837. ! 3D variables - note staggering! ph_2(Z), u_save(X), v_save(Y)
  838. !
  839. ! ph_2 x
  840. ! al x
  841. ! p x
  842. ! t_1 x
  843. ! t_save x
  844. ! u_save x
  845. ! v_save x
  846. !
  847. ! the following are 2D (xy) variables
  848. !
  849. ! mu_1 x
  850. ! mu_2 x
  851. ! mudf x
  852. ! php x
  853. ! alt x
  854. ! pb x
  855. !--------------------------------------------------------------
  856. # include "HALO_EM_B.inc"
  857. # include "PERIOD_BDY_EM_B.inc"
  858. #endif
  859. BENCH_START(set_phys_bc2_tim)
  860. !$OMP PARALLEL DO &
  861. !$OMP PRIVATE ( ij )
  862. DO ij = 1 , grid%num_tiles
  863. CALL set_physical_bc3d( grid%ru_tend, 'u', config_flags, &
  864. ids, ide, jds, jde, kds, kde, &
  865. ims, ime, jms, jme, kms, kme, &
  866. ips, ipe, jps, jpe, kps, kpe, &
  867. grid%i_start(ij), grid%i_end(ij), &
  868. grid%j_start(ij), grid%j_end(ij), &
  869. k_start , k_end )
  870. CALL set_physical_bc3d( grid%rv_tend, 'v', config_flags, &
  871. ids, ide, jds, jde, kds, kde, &
  872. ims, ime, jms, jme, kms, kme, &
  873. ips, ipe, jps, jpe, kps, kpe, &
  874. grid%i_start(ij), grid%i_end(ij), &
  875. grid%j_start(ij), grid%j_end(ij), &
  876. k_start , k_end )
  877. CALL set_physical_bc3d( grid%ph_2, 'w', config_flags, &
  878. ids, ide, jds, jde, kds, kde, &
  879. ims, ime, jms, jme, kms, kme, &
  880. ips, ipe, jps, jpe, kps, kpe, &
  881. grid%i_start(ij), grid%i_end(ij), &
  882. grid%j_start(ij), grid%j_end(ij), &
  883. k_start , k_end )
  884. CALL set_physical_bc3d( grid%al, 'p', config_flags, &
  885. ids, ide, jds, jde, kds, kde, &
  886. ims, ime, jms, jme, kms, kme, &
  887. ips, ipe, jps, jpe, kps, kpe, &
  888. grid%i_start(ij), grid%i_end(ij), &
  889. grid%j_start(ij), grid%j_end(ij), &
  890. k_start , k_end )
  891. CALL set_physical_bc3d( grid%p, 'p', config_flags, &
  892. ids, ide, jds, jde, kds, kde, &
  893. ims, ime, jms, jme, kms, kme, &
  894. ips, ipe, jps, jpe, kps, kpe, &
  895. grid%i_start(ij), grid%i_end(ij), &
  896. grid%j_start(ij), grid%j_end(ij), &
  897. k_start , k_end )
  898. CALL set_physical_bc3d( grid%t_1, 'p', config_flags, &
  899. ids, ide, jds, jde, kds, kde, &
  900. ims, ime, jms, jme, kms, kme, &
  901. ips, ipe, jps, jpe, kps, kpe, &
  902. grid%i_start(ij), grid%i_end(ij), &
  903. grid%j_start(ij), grid%j_end(ij), &
  904. k_start , k_end )
  905. CALL set_physical_bc3d( grid%t_save, 't', config_flags, &
  906. ids, ide, jds, jde, kds, kde, &
  907. ims, ime, jms, jme, kms, kme, &
  908. ips, ipe, jps, jpe, kps, kpe, &
  909. grid%i_start(ij), grid%i_end(ij), &
  910. grid%j_start(ij), grid%j_end(ij), &
  911. k_start , k_end )
  912. CALL set_physical_bc2d( grid%mu_1, 't', config_flags, &
  913. ids, ide, jds, jde, &
  914. ims, ime, jms, jme, &
  915. ips, ipe, jps, jpe, &
  916. grid%i_start(ij), grid%i_end(ij), &
  917. grid%j_start(ij), grid%j_end(ij) )
  918. CALL set_physical_bc2d( grid%mu_2, 't', config_flags, &
  919. ids, ide, jds, jde, &
  920. ims, ime, jms, jme, &
  921. ips, ipe, jps, jpe, &
  922. grid%i_start(ij), grid%i_end(ij), &
  923. grid%j_start(ij), grid%j_end(ij) )
  924. CALL set_physical_bc2d( grid%mudf, 't', config_flags, &
  925. ids, ide, jds, jde, &
  926. ims, ime, jms, jme, &
  927. ips, ipe, jps, jpe, &
  928. grid%i_start(ij), grid%i_end(ij), &
  929. grid%j_start(ij), grid%j_end(ij) )
  930. END DO
  931. !$OMP END PARALLEL DO
  932. BENCH_END(set_phys_bc2_tim)
  933. small_steps : DO iteration = 1 , number_of_small_timesteps
  934. ! Boundary condition time (or communication time).
  935. #ifdef DM_PARALLEL
  936. # include "PERIOD_BDY_EM_B.inc"
  937. #endif
  938. !$OMP PARALLEL DO &
  939. !$OMP PRIVATE ( ij )
  940. DO ij = 1 , grid%num_tiles
  941. BENCH_START(advance_uv_tim)
  942. CALL advance_uv ( grid%u_2, grid%ru_tend, grid%v_2, grid%rv_tend, &
  943. grid%p, grid%pb, &
  944. grid%ph_2, grid%php, grid%alt, grid%al, &
  945. grid%mu_2, &
  946. grid%muu, cqu, grid%muv, cqv, grid%mudf, &
  947. grid%msfux, grid%msfuy, grid%msfvx, &
  948. grid%msfvx_inv, grid%msfvy, &
  949. grid%rdx, grid%rdy, dts_rk, &
  950. grid%cf1, grid%cf2, grid%cf3, grid%fnm, grid%fnp, &
  951. grid%emdiv, &
  952. grid%rdnw, config_flags,grid%spec_zone, &
  953. config_flags%non_hydrostatic, config_flags%top_lid, &
  954. ids, ide, jds, jde, kds, kde, &
  955. ims, ime, jms, jme, kms, kme, &
  956. grid%i_start(ij), grid%i_end(ij), &
  957. grid%j_start(ij), grid%j_end(ij), &
  958. k_start , k_end )
  959. BENCH_END(advance_uv_tim)
  960. END DO
  961. !$OMP END PARALLEL DO
  962. !-----------------------------------------------------------
  963. ! acoustic integration polar filter for smallstep u, v
  964. !-----------------------------------------------------------
  965. IF (config_flags%polar) THEN
  966. CALL pxft ( grid=grid &
  967. ,lineno=__LINE__ &
  968. ,flag_uv = 1 &
  969. ,flag_rurv = 0 &
  970. ,flag_wph = 0 &
  971. ,flag_ww = 0 &
  972. ,flag_t = 0 &
  973. ,flag_mu = 0 &
  974. ,flag_mut = 0 &
  975. ,flag_moist = 0 &
  976. ,flag_chem = 0 &
  977. ,flag_tracer = 0 &
  978. ,flag_scalar = 0 &
  979. ,positive_definite = .FALSE. &
  980. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  981. ,fft_filter_lat = config_flags%fft_filter_lat &
  982. ,dclat = dclat &
  983. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  984. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  985. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  986. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  987. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  988. END IF
  989. !-----------------------------------------------------------
  990. ! end acoustic integration polar filter for smallstep u, v
  991. !-----------------------------------------------------------
  992. !$OMP PARALLEL DO &
  993. !$OMP PRIVATE ( ij )
  994. DO ij = 1 , grid%num_tiles
  995. BENCH_START(spec_bdy_uv_tim)
  996. IF( config_flags%specified .or. config_flags%nested ) THEN
  997. CALL spec_bdyupdate(grid%u_2, grid%ru_tend, dts_rk, &
  998. 'u' , config_flags, &
  999. grid%spec_zone, &
  1000. ids,ide, jds,jde, kds,kde, & ! domain dims
  1001. ims,ime, jms,jme, kms,kme, & ! memory dims
  1002. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  1003. grid%i_start(ij), grid%i_end(ij), &
  1004. grid%j_start(ij), grid%j_end(ij), &
  1005. k_start , k_end )
  1006. CALL spec_bdyupdate(grid%v_2, grid%rv_tend, dts_rk, &
  1007. 'v' , config_flags, &
  1008. grid%spec_zone, &
  1009. ids,ide, jds,jde, kds,kde, & ! domain dims
  1010. ims,ime, jms,jme, kms,kme, & ! memory dims
  1011. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  1012. grid%i_start(ij), grid%i_end(ij), &
  1013. grid%j_start(ij), grid%j_end(ij), &
  1014. k_start , k_end )
  1015. ENDIF
  1016. BENCH_END(spec_bdy_uv_tim)
  1017. END DO
  1018. !$OMP END PARALLEL DO
  1019. #ifdef DM_PARALLEL
  1020. !
  1021. ! Stencils for patch communications (WCS, 29 June 2001)
  1022. !
  1023. ! * *
  1024. ! * + * * + * +
  1025. ! * *
  1026. !
  1027. ! u_2 x
  1028. ! v_2 x
  1029. !
  1030. # include "HALO_EM_C.inc"
  1031. #endif
  1032. !$OMP PARALLEL DO &
  1033. !$OMP PRIVATE ( ij )
  1034. DO ij = 1 , grid%num_tiles
  1035. ! advance the mass in the column, theta, and calculate ww
  1036. BENCH_START(advance_mu_t_tim)
  1037. CALL advance_mu_t( grid%ww, ww1, grid%u_2, grid%u_save, grid%v_2, grid%v_save, &
  1038. grid%mu_2, grid%mut, muave, grid%muts, grid%muu, grid%muv, &
  1039. grid%mudf, grid%ru_m, grid%rv_m, grid%ww_m, &
  1040. grid%t_2, grid%t_save, t_2save, t_tend, &
  1041. mu_tend, &
  1042. grid%rdx, grid%rdy, dts_rk, grid%epssm, &
  1043. grid%dnw, grid%fnm, grid%fnp, grid%rdnw, &
  1044. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  1045. grid%msfvy, grid%msftx,grid%msfty, &
  1046. iteration, config_flags, &
  1047. ids, ide, jds, jde, kds, kde, &
  1048. ims, ime, jms, jme, kms, kme, &
  1049. grid%i_start(ij), grid%i_end(ij), &
  1050. grid%j_start(ij), grid%j_end(ij), &
  1051. k_start , k_end )
  1052. BENCH_END(advance_mu_t_tim)
  1053. ENDDO
  1054. !$OMP END PARALLEL DO
  1055. !-----------------------------------------------------------
  1056. ! acoustic integration polar filter for smallstep mu, t
  1057. !-----------------------------------------------------------
  1058. IF ( (config_flags%polar) ) THEN
  1059. CALL pxft ( grid=grid &
  1060. ,lineno=__LINE__ &
  1061. ,flag_uv = 0 &
  1062. ,flag_rurv = 0 &
  1063. ,flag_wph = 0 &
  1064. ,flag_ww = 0 &
  1065. ,flag_t = 1 &
  1066. ,flag_mu = 1 &
  1067. ,flag_mut = 0 &
  1068. ,flag_moist = 0 &
  1069. ,flag_chem = 0 &
  1070. ,flag_tracer = 0 &
  1071. ,flag_scalar = 0 &
  1072. ,positive_definite = .FALSE. &
  1073. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  1074. ,fft_filter_lat = config_flags%fft_filter_lat &
  1075. ,dclat = dclat &
  1076. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  1077. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  1078. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  1079. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  1080. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  1081. grid%muts = grid%mut + grid%mu_2 ! reset muts using filtered mu_2
  1082. END IF
  1083. !-----------------------------------------------------------
  1084. ! end acoustic integration polar filter for smallstep mu, t
  1085. !-----------------------------------------------------------
  1086. BENCH_START(spec_bdy_t_tim)
  1087. !$OMP PARALLEL DO &
  1088. !$OMP PRIVATE ( ij )
  1089. DO ij = 1 , grid%num_tiles
  1090. IF( config_flags%specified .or. config_flags%nested ) THEN
  1091. CALL spec_bdyupdate(grid%t_2, t_tend, dts_rk, &
  1092. 't' , config_flags, &
  1093. grid%spec_zone, &
  1094. ids,ide, jds,jde, kds,kde, &
  1095. ims,ime, jms,jme, kms,kme, &
  1096. ips,ipe, jps,jpe, kps,kpe, &
  1097. grid%i_start(ij), grid%i_end(ij),&
  1098. grid%j_start(ij), grid%j_end(ij),&
  1099. k_start , k_end )
  1100. CALL spec_bdyupdate(grid%mu_2, mu_tend, dts_rk, &
  1101. 'm' , config_flags, &
  1102. grid%spec_zone, &
  1103. ids,ide, jds,jde, 1 ,1 , &
  1104. ims,ime, jms,jme, 1 ,1 , &
  1105. ips,ipe, jps,jpe, 1 ,1 , &
  1106. grid%i_start(ij), grid%i_end(ij),&
  1107. grid%j_start(ij), grid%j_end(ij),&
  1108. 1 , 1 )
  1109. CALL spec_bdyupdate(grid%muts, mu_tend, dts_rk, &
  1110. 'm' , config_flags, &
  1111. grid%spec_zone, &
  1112. ids,ide, jds,jde, 1 ,1 , & ! domain dims
  1113. ims,ime, jms,jme, 1 ,1 , & ! memory dims
  1114. ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
  1115. grid%i_start(ij), grid%i_end(ij), &
  1116. grid%j_start(ij), grid%j_end(ij), &
  1117. 1 , 1 )
  1118. ENDIF
  1119. BENCH_END(spec_bdy_t_tim)
  1120. ! small (acoustic) step for the vertical momentum,
  1121. ! density and coupled potential temperature.
  1122. BENCH_START(advance_w_tim)
  1123. IF ( config_flags%non_hydrostatic ) THEN
  1124. CALL advance_w( grid%w_2, rw_tend, grid%ww, w_save, &
  1125. grid%u_2, grid%v_2, &
  1126. grid%mu_2, grid%mut, muave, grid%muts, &
  1127. t_2save, grid%t_2, grid%t_save, &
  1128. grid%ph_2, ph_save, grid%phb, ph_tend, &
  1129. grid%ht, c2a, cqw, grid%alt, grid%alb, &
  1130. a, alpha, gamma, &
  1131. grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
  1132. grid%dnw, grid%fnm, grid%fnp, grid%rdnw, &
  1133. grid%rdn, grid%cf1, grid%cf2, grid%cf3, &
  1134. grid%msftx, grid%msfty, &
  1135. config_flags, config_flags%top_lid, &
  1136. ids,ide, jds,jde, kds,kde, &
  1137. ims,ime, jms,jme, kms,kme, &
  1138. grid%i_start(ij), grid%i_end(ij), &
  1139. grid%j_start(ij), grid%j_end(ij), &
  1140. k_start , k_end )
  1141. ENDIF
  1142. BENCH_END(advance_w_tim)
  1143. ENDDO
  1144. !$OMP END PARALLEL DO
  1145. !-----------------------------------------------------------
  1146. ! acoustic integration polar filter for smallstep w, geopotential
  1147. !-----------------------------------------------------------
  1148. IF ( (config_flags%polar) .AND. (config_flags%non_hydrostatic) ) THEN
  1149. CALL pxft ( grid=grid &
  1150. ,lineno=__LINE__ &
  1151. ,flag_uv = 0 &
  1152. ,flag_rurv = 0 &
  1153. ,flag_wph = 1 &
  1154. ,flag_ww = 0 &
  1155. ,flag_t = 0 &
  1156. ,flag_mu = 0 &
  1157. ,flag_mut = 0 &
  1158. ,flag_moist = 0 &
  1159. ,flag_chem = 0 &
  1160. ,flag_tracer = 0 &
  1161. ,flag_scalar = 0 &
  1162. ,positive_definite = .FALSE. &
  1163. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  1164. ,fft_filter_lat = config_flags%fft_filter_lat &
  1165. ,dclat = dclat &
  1166. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  1167. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  1168. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  1169. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  1170. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  1171. END IF
  1172. !-----------------------------------------------------------
  1173. ! end acoustic integration polar filter for smallstep w, geopotential
  1174. !-----------------------------------------------------------
  1175. !$OMP PARALLEL DO &
  1176. !$OMP PRIVATE ( ij )
  1177. DO ij = 1 , grid%num_tiles
  1178. BENCH_START(sumflux_tim)
  1179. CALL sumflux ( grid%u_2, grid%v_2, grid%ww, &
  1180. grid%u_save, grid%v_save, ww1, &
  1181. grid%muu, grid%muv, &
  1182. grid%ru_m, grid%rv_m, grid%ww_m, grid%epssm, &
  1183. grid%msfux, grid% msfuy, grid%msfvx, &
  1184. grid%msfvx_inv, grid%msfvy, &
  1185. iteration, number_of_small_timesteps, &
  1186. ids, ide, jds, jde, kds, kde, &
  1187. ims, ime, jms, jme, kms, kme, &
  1188. grid%i_start(ij), grid%i_end(ij), &
  1189. grid%j_start(ij), grid%j_end(ij), &
  1190. k_start , k_end )
  1191. BENCH_END(sumflux_tim)
  1192. IF( config_flags%specified .or. config_flags%nested ) THEN
  1193. BENCH_START(spec_bdynhyd_tim)
  1194. IF (config_flags%non_hydrostatic) THEN
  1195. CALL spec_bdyupdate_ph( ph_save, grid%ph_2, ph_tend, &
  1196. mu_tend, grid%muts, dts_rk, &
  1197. 'h' , config_flags, &
  1198. grid%spec_zone, &
  1199. ids,ide, jds,jde, kds,kde, &
  1200. ims,ime, jms,jme, kms,kme, &
  1201. ips,ipe, jps,jpe, kps,kpe, &
  1202. grid%i_start(ij), grid%i_end(ij),&
  1203. grid%j_start(ij), grid%j_end(ij),&
  1204. k_start , k_end )
  1205. IF( config_flags%specified ) THEN
  1206. CALL zero_grad_bdy ( grid%w_2, &
  1207. 'w' , config_flags, &
  1208. grid%spec_zone, &
  1209. ids,ide, jds,jde, kds,kde, &
  1210. ims,ime, jms,jme, kms,kme, &
  1211. ips,ipe, jps,jpe, kps,kpe, &
  1212. grid%i_start(ij), grid%i_end(ij), &
  1213. grid%j_start(ij), grid%j_end(ij), &
  1214. k_start , k_end )
  1215. ELSE
  1216. CALL spec_bdyupdate ( grid%w_2, rw_tend, dts_rk, &
  1217. 'h' , config_flags, &
  1218. grid%spec_zone, &
  1219. ids,ide, jds,jde, kds,kde, &
  1220. ims,ime, jms,jme, kms,kme, &
  1221. ips,ipe, jps,jpe, kps,kpe, &
  1222. grid%i_start(ij), grid%i_end(ij),&
  1223. grid%j_start(ij), grid%j_end(ij),&
  1224. k_start , k_end )
  1225. ENDIF
  1226. ENDIF
  1227. BENCH_END(spec_bdynhyd_tim)
  1228. ENDIF
  1229. BENCH_START(cald_p_rho_tim)
  1230. CALL calc_p_rho( grid%al, grid%p, grid%ph_2, &
  1231. grid%alt, grid%t_2, grid%t_save, c2a, pm1, &
  1232. grid%mu_2, grid%muts, grid%znu, t0, &
  1233. grid%rdnw, grid%dnw, grid%smdiv, &
  1234. config_flags%non_hydrostatic, iteration, &
  1235. ids, ide, jds, jde, kds, kde, &
  1236. ims, ime, jms, jme, kms, kme, &
  1237. grid%i_start(ij), grid%i_end(ij), &
  1238. grid%j_start(ij), grid%j_end(ij), &
  1239. k_start , k_end )
  1240. BENCH_END(cald_p_rho_tim)
  1241. ENDDO
  1242. !$OMP END PARALLEL DO
  1243. #ifdef DM_PARALLEL
  1244. !
  1245. ! Stencils for patch communications (WCS, 29 June 2001)
  1246. !
  1247. ! * *
  1248. ! * + * * + * +
  1249. ! * *
  1250. !
  1251. ! ph_2 x
  1252. ! al x
  1253. ! p x
  1254. !
  1255. ! 2D variables (x,y)
  1256. !
  1257. ! mu_2 x
  1258. ! muts x
  1259. ! mudf x
  1260. # include "HALO_EM_C2.inc"
  1261. # include "PERIOD_BDY_EM_B3.inc"
  1262. #endif
  1263. BENCH_START(phys_bc_tim)
  1264. !$OMP PARALLEL DO &
  1265. !$OMP PRIVATE ( ij )
  1266. DO ij = 1 , grid%num_tiles
  1267. ! boundary condition set for next small timestep
  1268. CALL set_physical_bc3d( grid%ph_2, 'w', config_flags, &
  1269. ids, ide, jds, jde, kds, kde, &
  1270. ims, ime, jms, jme, kms, kme, &
  1271. ips, ipe, jps, jpe, kps, kpe, &
  1272. grid%i_start(ij), grid%i_end(ij), &
  1273. grid%j_start(ij), grid%j_end(ij), &
  1274. k_start , k_end )
  1275. CALL set_physical_bc3d( grid%al, 'p', config_flags, &
  1276. ids, ide, jds, jde, kds, kde, &
  1277. ims, ime, jms, jme, kms, kme, &
  1278. ips, ipe, jps, jpe, kps, kpe, &
  1279. grid%i_start(ij), grid%i_end(ij), &
  1280. grid%j_start(ij), grid%j_end(ij), &
  1281. k_start , k_end )
  1282. CALL set_physical_bc3d( grid%p, 'p', config_flags, &
  1283. ids, ide, jds, jde, kds, kde, &
  1284. ims, ime, jms, jme, kms, kme, &
  1285. ips, ipe, jps, jpe, kps, kpe, &
  1286. grid%i_start(ij), grid%i_end(ij), &
  1287. grid%j_start(ij), grid%j_end(ij), &
  1288. k_start , k_end )
  1289. CALL set_physical_bc2d( grid%muts, 't', config_flags, &
  1290. ids, ide, jds, jde, &
  1291. ims, ime, jms, jme, &
  1292. ips, ipe, jps, jpe, &
  1293. grid%i_start(ij), grid%i_end(ij), &
  1294. grid%j_start(ij), grid%j_end(ij) )
  1295. CALL set_physical_bc2d( grid%mu_2, 't', config_flags, &
  1296. ids, ide, jds, jde, &
  1297. ims, ime, jms, jme, &
  1298. ips, ipe, jps, jpe, &
  1299. grid%i_start(ij), grid%i_end(ij), &
  1300. grid%j_start(ij), grid%j_end(ij) )
  1301. CALL set_physical_bc2d( grid%mudf, 't', config_flags, &
  1302. ids, ide, jds, jde, &
  1303. ims, ime, jms, jme, &
  1304. ips, ipe, jps, jpe, &
  1305. grid%i_start(ij), grid%i_end(ij), &
  1306. grid%j_start(ij), grid%j_end(ij) )
  1307. END DO
  1308. !$OMP END PARALLEL DO
  1309. BENCH_END(phys_bc_tim)
  1310. END DO small_steps
  1311. !$OMP PARALLEL DO &
  1312. !$OMP PRIVATE ( ij )
  1313. DO ij = 1 , grid%num_tiles
  1314. CALL wrf_debug ( 200 , ' call rk_small_finish' )
  1315. ! change time-perturbation variables back to
  1316. ! full perturbation variables.
  1317. ! first get updated mu at u and v points
  1318. BENCH_START(calc_mu_uv_tim)
  1319. CALL calc_mu_uv_1 ( config_flags, &
  1320. grid%muts, muus, muvs, &
  1321. ids, ide, jds, jde, kds, kde, &
  1322. ims, ime, jms, jme, kms, kme, &
  1323. grid%i_start(ij), grid%i_end(ij), &
  1324. grid%j_start(ij), grid%j_end(ij), &
  1325. k_start , k_end )
  1326. BENCH_END(calc_mu_uv_tim)
  1327. BENCH_START(small_step_finish_tim)
  1328. CALL small_step_finish( grid%u_2, grid%u_1, grid%v_2, grid%v_1, grid%w_2, grid%w_1, &
  1329. grid%t_2, grid%t_1, grid%ph_2, grid%ph_1, grid%ww, ww1, &
  1330. grid%mu_2, grid%mu_1, &
  1331. grid%mut, grid%muts, grid%muu, muus, grid%muv, muvs, &
  1332. grid%u_save, grid%v_save, w_save, &
  1333. grid%t_save, ph_save, mu_save, &
  1334. grid%msfux,grid%msfuy, grid%msfvx,grid%msfvy, grid%msftx,grid%msfty, &
  1335. grid%h_diabatic, &
  1336. number_of_small_timesteps,dts_rk, &
  1337. rk_step, rk_order, &
  1338. ids, ide, jds, jde, kds, kde, &
  1339. ims, ime, jms, jme, kms, kme, &
  1340. grid%i_start(ij), grid%i_end(ij), &
  1341. grid%j_start(ij), grid%j_end(ij), &
  1342. k_start , k_end )
  1343. ! call to set ru_m, rv_m and ww_m b.c's for PD advection
  1344. IF (rk_step == rk_order) THEN
  1345. CALL set_physical_bc3d( grid%ru_m, 'u', config_flags, &
  1346. ids, ide, jds, jde, kds, kde, &
  1347. ims, ime, jms, jme, kms, kme, &
  1348. ips, ipe, jps, jpe, kps, kpe, &
  1349. grid%i_start(ij), grid%i_end(ij), &
  1350. grid%j_start(ij), grid%j_end(ij), &
  1351. k_start , k_end )
  1352. CALL set_physical_bc3d( grid%rv_m, 'v', config_flags, &
  1353. ids, ide, jds, jde, kds, kde, &
  1354. ims, ime, jms, jme, kms, kme, &
  1355. ips, ipe, jps, jpe, kps, kpe, &
  1356. grid%i_start(ij), grid%i_end(ij), &
  1357. grid%j_start(ij), grid%j_end(ij), &
  1358. k_start , k_end )
  1359. CALL set_physical_bc3d( grid%ww_m, 'w', config_flags, &
  1360. ids, ide, jds, jde, kds, kde, &
  1361. ims, ime, jms, jme, kms, kme, &
  1362. ips, ipe, jps, jpe, kps, kpe, &
  1363. grid%i_start(ij), grid%i_end(ij), &
  1364. grid%j_start(ij), grid%j_end(ij), &
  1365. k_start , k_end )
  1366. CALL set_physical_bc2d( grid%mut, 't', config_flags, &
  1367. ids, ide, jds, jde, &
  1368. ims, ime, jms, jme, &
  1369. ips, ipe, jps, jpe, &
  1370. grid%i_start(ij), grid%i_end(ij), &
  1371. grid%j_start(ij), grid%j_end(ij) )
  1372. CALL set_physical_bc2d( grid%muts, 't', config_flags, &
  1373. ids, ide, jds, jde, &
  1374. ims, ime, jms, jme, &
  1375. ips, ipe, jps, jpe, &
  1376. grid%i_start(ij), grid%i_end(ij), &
  1377. grid%j_start(ij), grid%j_end(ij) )
  1378. END IF
  1379. BENCH_END(small_step_finish_tim)
  1380. END DO
  1381. !$OMP END PARALLEL DO
  1382. !-----------------------------------------------------------
  1383. ! polar filter for full dynamics variables and time-averaged mass fluxes
  1384. !-----------------------------------------------------------
  1385. IF (config_flags%polar) THEN
  1386. CALL pxft ( grid=grid &
  1387. ,lineno=__LINE__ &
  1388. ,flag_uv = 1 &
  1389. ,flag_rurv = 1 &
  1390. ,flag_wph = 1 &
  1391. ,flag_ww = 1 &
  1392. ,flag_t = 1 &
  1393. ,flag_mu = 1 &
  1394. ,flag_mut = 1 &
  1395. ,flag_moist = 0 &
  1396. ,flag_chem = 0 &
  1397. ,flag_tracer = 0 &
  1398. ,flag_scalar = 0 &
  1399. ,positive_definite = .FALSE. &
  1400. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  1401. ,fft_filter_lat = config_flags%fft_filter_lat &
  1402. ,dclat = dclat &
  1403. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  1404. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  1405. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  1406. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  1407. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  1408. END IF
  1409. !-----------------------------------------------------------
  1410. ! end polar filter for full dynamics variables and time-averaged mass fluxes
  1411. !-----------------------------------------------------------
  1412. !-----------------------------------------------------------------------
  1413. ! add in physics tendency first if positive definite advection is used.
  1414. ! pd advection applies advective flux limiter on last runge-kutta step
  1415. !-----------------------------------------------------------------------
  1416. ! first moisture
  1417. IF ((config_flags%moist_adv_opt /= ORIGINAL .and. config_flags%moist_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order)) THEN
  1418. !$OMP PARALLEL DO &
  1419. !$OMP PRIVATE ( ij )
  1420. DO ij = 1 , grid%num_tiles
  1421. CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
  1422. DO im = PARAM_FIRST_SCALAR, num_3d_m
  1423. CALL rk_update_scalar_pd( im, im, &
  1424. moist_old(ims,kms,jms,im), &
  1425. moist_tend(ims,kms,jms,im), &
  1426. grid%mu_1, grid%mu_1, grid%mub, &
  1427. rk_step, dt_rk, grid%spec_zone, &
  1428. config_flags, &
  1429. ids, ide, jds, jde, kds, kde, &
  1430. ims, ime, jms, jme, kms, kme, &
  1431. grid%i_start(ij), grid%i_end(ij), &
  1432. grid%j_start(ij), grid%j_end(ij), &
  1433. k_start , k_end )
  1434. ENDDO
  1435. END DO
  1436. !$OMP END PARALLEL DO
  1437. !---------------------- positive definite bc call
  1438. #ifdef DM_PARALLEL
  1439. IF (config_flags%moist_adv_opt /= ORIGINAL .and. config_flags%moist_adv_opt /= WENO_SCALAR) THEN
  1440. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  1441. # include "HALO_EM_MOIST_OLD_E_5.inc"
  1442. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  1443. # include "HALO_EM_MOIST_OLD_E_7.inc"
  1444. ELSE
  1445. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  1446. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1447. ENDIF
  1448. ENDIF
  1449. #endif
  1450. #ifdef DM_PARALLEL
  1451. # include "PERIOD_BDY_EM_MOIST_OLD.inc"
  1452. #endif
  1453. !$OMP PARALLEL DO &
  1454. !$OMP PRIVATE ( ij )
  1455. DO ij = 1 , grid%num_tiles
  1456. IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
  1457. DO im = PARAM_FIRST_SCALAR , num_3d_m
  1458. CALL set_physical_bc3d( moist_old(ims,kms,jms,im), 'p', config_flags, &
  1459. ids, ide, jds, jde, kds, kde, &
  1460. ims, ime, jms, jme, kms, kme, &
  1461. ips, ipe, jps, jpe, kps, kpe, &
  1462. grid%i_start(ij), grid%i_end(ij), &
  1463. grid%j_start(ij), grid%j_end(ij), &
  1464. k_start , k_end )
  1465. END DO
  1466. ENDIF
  1467. END DO
  1468. !$OMP END PARALLEL DO
  1469. END IF ! end if for moist_adv_opt
  1470. ! scalars
  1471. IF ((config_flags%scalar_adv_opt /= ORIGINAL .and. config_flags%scalar_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order)) THEN
  1472. !$OMP PARALLEL DO &
  1473. !$OMP PRIVATE ( ij )
  1474. DO ij = 1 , grid%num_tiles
  1475. CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
  1476. DO im = PARAM_FIRST_SCALAR, num_3d_s
  1477. CALL rk_update_scalar_pd( im, im, &
  1478. scalar_old(ims,kms,jms,im), &
  1479. scalar_tend(ims,kms,jms,im), &
  1480. grid%mu_1, grid%mu_1, grid%mub, &
  1481. rk_step, dt_rk, grid%spec_zone, &
  1482. config_flags, &
  1483. ids, ide, jds, jde, kds, kde, &
  1484. ims, ime, jms, jme, kms, kme, &
  1485. grid%i_start(ij), grid%i_end(ij), &
  1486. grid%j_start(ij), grid%j_end(ij), &
  1487. k_start , k_end )
  1488. ENDDO
  1489. ENDDO
  1490. !$OMP END PARALLEL DO
  1491. !---------------------- positive definite bc call
  1492. #ifdef DM_PARALLEL
  1493. IF (config_flags%scalar_adv_opt /= ORIGINAL .and. config_flags%scalar_adv_opt /= WENO_SCALAR) THEN
  1494. #ifndef RSL
  1495. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  1496. # include "HALO_EM_SCALAR_OLD_E_5.inc"
  1497. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  1498. # include "HALO_EM_SCALAR_OLD_E_7.inc"
  1499. ELSE
  1500. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  1501. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1502. ENDIF
  1503. #else
  1504. WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
  1505. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1506. #endif
  1507. endif
  1508. #endif
  1509. #ifdef DM_PARALLEL
  1510. # include "PERIOD_BDY_EM_SCALAR_OLD.inc"
  1511. #endif
  1512. !$OMP PARALLEL DO &
  1513. !$OMP PRIVATE ( ij )
  1514. DO ij = 1 , grid%num_tiles
  1515. IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
  1516. DO im = PARAM_FIRST_SCALAR , num_3d_s
  1517. CALL set_physical_bc3d( scalar_old(ims,kms,jms,im), 'p', config_flags, &
  1518. ids, ide, jds, jde, kds, kde, &
  1519. ims, ime, jms, jme, kms, kme, &
  1520. ips, ipe, jps, jpe, kps, kpe, &
  1521. grid%i_start(ij), grid%i_end(ij), &
  1522. grid%j_start(ij), grid%j_end(ij), &
  1523. k_start , k_end )
  1524. END DO
  1525. ENDIF
  1526. END DO
  1527. !$OMP END PARALLEL DO
  1528. END IF ! end if for scalar_adv_opt
  1529. ! chem
  1530. IF ((config_flags%chem_adv_opt /= ORIGINAL .and. config_flags%chem_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order)) THEN
  1531. !$OMP PARALLEL DO &
  1532. !$OMP PRIVATE ( ij )
  1533. DO ij = 1 , grid%num_tiles
  1534. CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
  1535. DO im = PARAM_FIRST_SCALAR, num_3d_c
  1536. CALL rk_update_scalar_pd( im, im, &
  1537. chem_old(ims,kms,jms,im), &
  1538. chem_tend(ims,kms,jms,im), &
  1539. grid%mu_1, grid%mu_1, grid%mub, &
  1540. rk_step, dt_rk, grid%spec_zone, &
  1541. config_flags, &
  1542. ids, ide, jds, jde, kds, kde, &
  1543. ims, ime, jms, jme, kms, kme, &
  1544. grid%i_start(ij), grid%i_end(ij), &
  1545. grid%j_start(ij), grid%j_end(ij), &
  1546. k_start , k_end )
  1547. ENDDO
  1548. END DO
  1549. !$OMP END PARALLEL DO
  1550. !---------------------- positive definite bc call
  1551. #ifdef DM_PARALLEL
  1552. IF (config_flags%chem_adv_opt /= ORIGINAL .and. config_flags%chem_adv_opt /= WENO_SCALAR) THEN
  1553. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  1554. # include "HALO_EM_CHEM_OLD_E_5.inc"
  1555. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  1556. # include "HALO_EM_CHEM_OLD_E_7.inc"
  1557. ELSE
  1558. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  1559. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1560. ENDIF
  1561. ENDIF
  1562. #endif
  1563. #ifdef DM_PARALLEL
  1564. # include "PERIOD_BDY_EM_CHEM_OLD.inc"
  1565. #endif
  1566. !$OMP PARALLEL DO &
  1567. !$OMP PRIVATE ( ij )
  1568. DO ij = 1 , grid%num_tiles
  1569. IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
  1570. DO im = PARAM_FIRST_SCALAR , num_3d_c
  1571. CALL set_physical_bc3d( chem_old(ims,kms,jms,im), 'p', config_flags, &
  1572. ids, ide, jds, jde, kds, kde, &
  1573. ims, ime, jms, jme, kms, kme, &
  1574. ips, ipe, jps, jpe, kps, kpe, &
  1575. grid%i_start(ij), grid%i_end(ij), &
  1576. grid%j_start(ij), grid%j_end(ij), &
  1577. k_start , k_end )
  1578. END DO
  1579. ENDIF
  1580. END DO
  1581. !$OMP END PARALLEL DO
  1582. ENDIF ! end if for chem_adv_opt
  1583. ! tracer
  1584. IF ((config_flags%tracer_adv_opt /= ORIGINAL .and. config_flags%tracer_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order)) THEN
  1585. !$OMP PARALLEL DO &
  1586. !$OMP PRIVATE ( ij )
  1587. DO ij = 1 , grid%num_tiles
  1588. CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
  1589. DO im = PARAM_FIRST_SCALAR, num_tracer
  1590. CALL rk_update_scalar_pd( im, im, &
  1591. tracer_old(ims,kms,jms,im), &
  1592. tracer_tend(ims,kms,jms,im), &
  1593. grid%mu_1, grid%mu_1, grid%mub, &
  1594. rk_step, dt_rk, grid%spec_zone, &
  1595. config_flags, &
  1596. ids, ide, jds, jde, kds, kde, &
  1597. ims, ime, jms, jme, kms, kme, &
  1598. grid%i_start(ij), grid%i_end(ij), &
  1599. grid%j_start(ij), grid%j_end(ij), &
  1600. k_start , k_end )
  1601. ENDDO
  1602. END DO
  1603. !$OMP END PARALLEL DO
  1604. !---------------------- positive definite bc call
  1605. #ifdef DM_PARALLEL
  1606. IF (config_flags%tracer_adv_opt /= ORIGINAL .and. config_flags%tracer_adv_opt /= WENO_SCALAR) THEN
  1607. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  1608. # include "HALO_EM_TRACER_OLD_E_5.inc"
  1609. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  1610. # include "HALO_EM_TRACER_OLD_E_7.inc"
  1611. ELSE
  1612. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  1613. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1614. ENDIF
  1615. ENDIF
  1616. #endif
  1617. #ifdef DM_PARALLEL
  1618. # include "PERIOD_BDY_EM_TRACER_OLD.inc"
  1619. #endif
  1620. !$OMP PARALLEL DO &
  1621. !$OMP PRIVATE ( ij )
  1622. DO ij = 1 , grid%num_tiles
  1623. IF (num_tracer >= PARAM_FIRST_SCALAR) THEN
  1624. DO im = PARAM_FIRST_SCALAR , num_tracer
  1625. CALL set_physical_bc3d( tracer_old(ims,kms,jms,im), 'p', config_flags, &
  1626. ids, ide, jds, jde, kds, kde, &
  1627. ims, ime, jms, jme, kms, kme, &
  1628. ips, ipe, jps, jpe, kps, kpe, &
  1629. grid%i_start(ij), grid%i_end(ij), &
  1630. grid%j_start(ij), grid%j_end(ij), &
  1631. k_start , k_end )
  1632. END DO
  1633. ENDIF
  1634. END DO
  1635. !$OMP END PARALLEL DO
  1636. ENDIF ! end if for tracer_adv_opt
  1637. ! tke
  1638. IF ((config_flags%tke_adv_opt /= ORIGINAL .and. config_flags%tke_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order) &
  1639. .and. (config_flags%km_opt .eq. 2) ) THEN
  1640. !$OMP PARALLEL DO &
  1641. !$OMP PRIVATE ( ij )
  1642. DO ij = 1 , grid%num_tiles
  1643. CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
  1644. CALL rk_update_scalar_pd( 1, 1, &
  1645. grid%tke_1, &
  1646. tke_tend(ims,kms,jms), &
  1647. grid%mu_1, grid%mu_1, grid%mub, &
  1648. rk_step, dt_rk, grid%spec_zone, &
  1649. config_flags, &
  1650. ids, ide, jds, jde, kds, kde, &
  1651. ims, ime, jms, jme, kms, kme, &
  1652. grid%i_start(ij), grid%i_end(ij), &
  1653. grid%j_start(ij), grid%j_end(ij), &
  1654. k_start , k_end )
  1655. ENDDO
  1656. !$OMP END PARALLEL DO
  1657. !---------------------- positive definite bc call
  1658. #ifdef DM_PARALLEL
  1659. IF (config_flags%tke_adv_opt /= ORIGINAL .and. config_flags%tke_adv_opt /= WENO_SCALAR) THEN
  1660. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  1661. # include "HALO_EM_TKE_OLD_E_5.inc"
  1662. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  1663. # include "HALO_EM_TKE_OLD_E_7.inc"
  1664. ELSE
  1665. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  1666. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1667. ENDIF
  1668. ENDIF
  1669. #endif
  1670. #ifdef DM_PARALLEL
  1671. # include "PERIOD_BDY_EM_TKE_OLD.inc"
  1672. #endif
  1673. !$OMP PARALLEL DO &
  1674. !$OMP PRIVATE ( ij )
  1675. DO ij = 1 , grid%num_tiles
  1676. CALL set_physical_bc3d( grid%tke_1, 'p', config_flags, &
  1677. ids, ide, jds, jde, kds, kde, &
  1678. ims, ime, jms, jme, kms, kme, &
  1679. ips, ipe, jps, jpe, kps, kpe, &
  1680. grid%i_start(ij), grid%i_end(ij), &
  1681. grid%j_start(ij), grid%j_end(ij), &
  1682. k_start , k_end )
  1683. END DO
  1684. !$OMP END PARALLEL DO
  1685. !--- end of positive definite physics tendency update
  1686. END IF ! end if for tke_adv_opt
  1687. #ifdef DM_PARALLEL
  1688. !
  1689. ! Stencils for patch communications (WCS, 29 June 2001)
  1690. !
  1691. ! * * * * *
  1692. ! * * * * *
  1693. ! * * + * *
  1694. ! * * * * *
  1695. ! * * * * *
  1696. !
  1697. ! ru_m x
  1698. ! rv_m x
  1699. ! ww_m x
  1700. ! mut x
  1701. !
  1702. !--------------------------------------------------------------
  1703. # include "HALO_EM_D.inc"
  1704. ! WCS addition 11/19/08
  1705. # include "PERIOD_EM_DA.inc"
  1706. #endif
  1707. !<DESCRIPTION>
  1708. !<pre>
  1709. ! (4) Still within the RK loop, the scalar variables are advanced.
  1710. !
  1711. ! For the moist and chem variables, each one is advanced
  1712. ! individually, using named loops "moist_variable_loop:"
  1713. ! and "chem_variable_loop:". Each RK substep begins by
  1714. ! calculating the advective tendency, and, for the first RK step,
  1715. ! 3D mixing (calling rk_scalar_tend) followed by an update
  1716. ! of the scalar (calling rk_update_scalar).
  1717. !</pre>
  1718. !</DESCRIPTION>
  1719. moist_scalar_advance: IF (num_3d_m >= PARAM_FIRST_SCALAR ) THEN
  1720. moist_variable_loop: DO im = PARAM_FIRST_SCALAR, num_3d_m
  1721. ! adv_moist_cond is set in module_physics_init based on mp_physics choice
  1722. ! true except for Ferrier scheme
  1723. IF (grid%adv_moist_cond .or. im==p_qv ) THEN
  1724. !$OMP PARALLEL DO &
  1725. !$OMP PRIVATE ( ij )
  1726. moist_tile_loop_1: DO ij = 1 , grid%num_tiles
  1727. CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
  1728. tenddec = .false.
  1729. BENCH_START(rk_scalar_tend_tim)
  1730. CALL rk_scalar_tend ( im, im, config_flags, tenddec, &
  1731. rk_step, dt_rk, &
  1732. grid%ru_m, grid%rv_m, grid%ww_m, &
  1733. grid%muts, grid%mub, grid%mu_1, &
  1734. grid%alt, &
  1735. moist_old(ims,kms,jms,im), &
  1736. moist(ims,kms,jms,im), &
  1737. moist_tend(ims,kms,jms,im), &
  1738. advect_tend,h_tendency,z_tendency,grid%rqvften, &
  1739. grid%qv_base, .true., grid%fnm, grid%fnp, &
  1740. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv,&
  1741. grid%msfvy, grid%msftx,grid%msfty, &
  1742. grid%rdx, grid%rdy, grid%rdn, grid%rdnw, grid%khdif, &
  1743. grid%kvdif, grid%xkhh, &
  1744. grid%diff_6th_opt, grid%diff_6th_factor, &
  1745. config_flags%moist_adv_opt, &
  1746. ids, ide, jds, jde, kds, kde, &
  1747. ims, ime, jms, jme, kms, kme, &
  1748. grid%i_start(ij), grid%i_end(ij), &
  1749. grid%j_start(ij), grid%j_end(ij), &
  1750. k_start , k_end )
  1751. BENCH_END(rk_scalar_tend_tim)
  1752. BENCH_START(rlx_bdy_scalar_tim)
  1753. IF( ( config_flags%specified .or. config_flags%nested ) .and. rk_step == 1 ) THEN
  1754. IF ( im .EQ. P_QV .OR. config_flags%nested ) THEN
  1755. CALL relax_bdy_scalar ( moist_tend(ims,kms,jms,im), &
  1756. moist(ims,kms,jms,im), grid%mut, &
  1757. moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im), &
  1758. moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
  1759. moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im), &
  1760. moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
  1761. config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
  1762. grid%dtbc, grid%fcx, grid%gcx, &
  1763. config_flags, &
  1764. ids,ide, jds,jde, kds,kde, & ! domain dims
  1765. ims,ime, jms,jme, kms,kme, & ! memory dims
  1766. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  1767. grid%i_start(ij), grid%i_end(ij), &
  1768. grid%j_start(ij), grid%j_end(ij), &
  1769. k_start, k_end )
  1770. CALL spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
  1771. moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im), &
  1772. moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
  1773. moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im), &
  1774. moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
  1775. config_flags%spec_bdy_width, grid%spec_zone, &
  1776. config_flags, &
  1777. ids,ide, jds,jde, kds,kde, & ! domain dims
  1778. ims,ime, jms,jme, kms,kme, & ! memory dims
  1779. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  1780. grid%i_start(ij), grid%i_end(ij), &
  1781. grid%j_start(ij), grid%j_end(ij), &
  1782. k_start, k_end )
  1783. ENDIF
  1784. ENDIF
  1785. BENCH_END(rlx_bdy_scalar_tim)
  1786. ENDDO moist_tile_loop_1
  1787. !$OMP END PARALLEL DO
  1788. !$OMP PARALLEL DO &
  1789. !$OMP PRIVATE ( ij )
  1790. moist_tile_loop_2: DO ij = 1 , grid%num_tiles
  1791. CALL wrf_debug ( 200 , ' call rk_update_scalar' )
  1792. tenddec = .false.
  1793. BENCH_START(update_scal_tim)
  1794. CALL rk_update_scalar( scs=im, sce=im, &
  1795. scalar_1=moist_old(ims,kms,jms,im), &
  1796. scalar_2=moist(ims,kms,jms,im), &
  1797. sc_tend=moist_tend(ims,kms,jms,im), &
  1798. advect_tend=advect_tend, &
  1799. h_tendency=h_tendency, z_tendency=z_tendency, &
  1800. msftx=grid%msftx,msfty=grid%msfty, &
  1801. mu_old=grid%mu_1, mu_new=grid%mu_2, mu_base=grid%mub, &
  1802. rk_step=rk_step, dt=dt_rk, spec_zone=grid%spec_zone, &
  1803. config_flags=config_flags, tenddec=tenddec, &
  1804. ids=ids, ide=ide, jds=jds, jde=jde, kds=kds, kde=kde, &
  1805. ims=ims, ime=ime, jms=jms, jme=jme, kms=kms, kme=kme, &
  1806. its=grid%i_start(ij), ite=grid%i_end(ij), &
  1807. jts=grid%j_start(ij), jte=grid%j_end(ij), &
  1808. kts=k_start , kte=k_end )
  1809. BENCH_END(update_scal_tim)
  1810. BENCH_START(flow_depbdy_tim)
  1811. IF( config_flags%specified ) THEN
  1812. IF(im .ne. P_QV)THEN
  1813. CALL flow_dep_bdy ( moist(ims,kms,jms,im), &
  1814. grid%ru_m, grid%rv_m, config_flags, &
  1815. grid%spec_zone, &
  1816. ids,ide, jds,jde, kds,kde, &
  1817. ims,ime, jms,jme, kms,kme, &
  1818. ips,ipe, jps,jpe, kps,kpe, &
  1819. grid%i_start(ij), grid%i_end(ij), &
  1820. grid%j_start(ij), grid%j_end(ij), &
  1821. k_start, k_end )
  1822. ENDIF
  1823. ENDIF
  1824. BENCH_END(flow_depbdy_tim)
  1825. ENDDO moist_tile_loop_2
  1826. !$OMP END PARALLEL DO
  1827. ENDIF !-- if (grid%adv_moist_cond .or. im==p_qv ) then
  1828. ENDDO moist_variable_loop
  1829. ENDIF moist_scalar_advance
  1830. BENCH_START(tke_adv_tim)
  1831. TKE_advance: IF (config_flags%km_opt .eq. 2) then
  1832. #ifdef DM_PARALLEL
  1833. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  1834. # include "HALO_EM_TKE_ADVECT_3.inc"
  1835. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  1836. # include "HALO_EM_TKE_ADVECT_5.inc"
  1837. ELSE
  1838. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  1839. CALL wrf_error_fatal(TRIM(wrf_err_message))
  1840. ENDIF
  1841. #endif
  1842. !$OMP PARALLEL DO &
  1843. !$OMP PRIVATE ( ij )
  1844. tke_tile_loop_1: DO ij = 1 , grid%num_tiles
  1845. CALL wrf_debug ( 200 , ' call rk_scalar_tend for tke' )
  1846. tenddec = .false.
  1847. CALL rk_scalar_tend ( 1, 1, config_flags, tenddec, &
  1848. rk_step, dt_rk, &
  1849. grid%ru_m, grid%rv_m, grid%ww_m, &
  1850. grid%muts, grid%mub, grid%mu_1, &
  1851. grid%alt, &
  1852. grid%tke_1, &
  1853. grid%tke_2, &
  1854. tke_tend(ims,kms,jms), &
  1855. advect_tend,h_tendency,z_tendency,grid%rqvften, &
  1856. grid%qv_base, .false., grid%fnm, grid%fnp, &
  1857. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  1858. grid%msfvy, grid%msftx,grid%msfty, &
  1859. grid%rdx, grid%rdy, grid%rdn, grid%rdnw, grid%khdif, &
  1860. grid%kvdif, grid%xkhh, &
  1861. grid%diff_6th_opt, grid%diff_6th_factor, &
  1862. config_flags%tke_adv_opt, &
  1863. ids, ide, jds, jde, kds, kde, &
  1864. ims, ime, jms, jme, kms, kme, &
  1865. grid%i_start(ij), grid%i_end(ij), &
  1866. grid%j_start(ij), grid%j_end(ij), &
  1867. k_start , k_end )
  1868. ENDDO tke_tile_loop_1
  1869. !$OMP END PARALLEL DO
  1870. !$OMP PARALLEL DO &
  1871. !$OMP PRIVATE ( ij )
  1872. tke_tile_loop_2: DO ij = 1 , grid%num_tiles
  1873. CALL wrf_debug ( 200 , ' call rk_update_scalar' )
  1874. tenddec = .false.
  1875. CALL rk_update_scalar( scs=1, sce=1, &
  1876. scalar_1=grid%tke_1, &
  1877. scalar_2=grid%tke_2, &
  1878. sc_tend=tke_tend(ims,kms,jms), &
  1879. advect_tend=advect_tend, &
  1880. h_tendency=h_tendency, z_tendency=z_tendency, &
  1881. msftx=grid%msftx,msfty=grid%msfty, &
  1882. mu_old=grid%mu_1, mu_new=grid%mu_2, mu_base=grid%mub, &
  1883. rk_step=rk_step, dt=dt_rk, spec_zone=grid%spec_zone, &
  1884. config_flags=config_flags, tenddec=tenddec, &
  1885. ids=ids, ide=ide, jds=jds, jde=jde, kds=kds, kde=kde, &
  1886. ims=ims, ime=ime, jms=jms, jme=jme, kms=kms, kme=kme, &
  1887. its=grid%i_start(ij), ite=grid%i_end(ij), &
  1888. jts=grid%j_start(ij), jte=grid%j_end(ij), &
  1889. kts=k_start , kte=k_end )
  1890. ! bound the tke (greater than 0, less than tke_upper_bound)
  1891. CALL bound_tke( grid%tke_2, grid%tke_upper_bound, &
  1892. ids, ide, jds, jde, kds, kde, &
  1893. ims, ime, jms, jme, kms, kme, &
  1894. grid%i_start(ij), grid%i_end(ij), &
  1895. grid%j_start(ij), grid%j_end(ij), &
  1896. k_start , k_end )
  1897. IF( config_flags%specified .or. config_flags%nested ) THEN
  1898. CALL flow_dep_bdy ( grid%tke_2, &
  1899. grid%ru_m, grid%rv_m, config_flags, &
  1900. grid%spec_zone, &
  1901. ids,ide, jds,jde, kds,kde, & ! domain dims
  1902. ims,ime, jms,jme, kms,kme, & ! memory dims
  1903. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  1904. grid%i_start(ij), grid%i_end(ij), &
  1905. grid%j_start(ij), grid%j_end(ij), &
  1906. k_start, k_end )
  1907. ENDIF
  1908. ENDDO tke_tile_loop_2
  1909. !$OMP END PARALLEL DO
  1910. ENDIF TKE_advance
  1911. BENCH_END(tke_adv_tim)
  1912. #ifdef WRF_CHEM
  1913. ! next the chemical species
  1914. BENCH_START(chem_adv_tim)
  1915. chem_scalar_advance: IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
  1916. chem_variable_loop: DO ic = PARAM_FIRST_SCALAR, num_3d_c
  1917. !$OMP PARALLEL DO &
  1918. !$OMP PRIVATE ( ij )
  1919. chem_tile_loop_1: DO ij = 1 , grid%num_tiles
  1920. CALL wrf_debug ( 200 , ' call rk_scalar_tend in chem_tile_loop_1' )
  1921. tenddec = (( config_flags%chemdiag == USECHEMDIAG ) .and. &
  1922. ( adv_ct_indices(ic) >= PARAM_FIRST_SCALAR ))
  1923. CALL rk_scalar_tend ( ic, ic, config_flags, tenddec, &
  1924. rk_step, dt_rk, &
  1925. grid%ru_m, grid%rv_m, grid%ww_m, &
  1926. grid%muts, grid%mub, grid%mu_1, &
  1927. grid%alt, &
  1928. chem_old(ims,kms,jms,ic), &
  1929. chem(ims,kms,jms,ic), &
  1930. chem_tend(ims,kms,jms,ic), &
  1931. advect_tend,h_tendency,z_tendency,grid%rqvften, &
  1932. grid%qv_base, .false., grid%fnm, grid%fnp, &
  1933. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  1934. grid%msfvy, grid%msftx,grid%msfty, &
  1935. grid%rdx, grid%rdy, grid%rdn, grid%rdnw, &
  1936. grid%khdif, grid%kvdif, grid%xkhh, &
  1937. grid%diff_6th_opt, grid%diff_6th_factor, &
  1938. config_flags%chem_adv_opt, &
  1939. ids, ide, jds, jde, kds, kde, &
  1940. ims, ime, jms, jme, kms, kme, &
  1941. grid%i_start(ij), grid%i_end(ij), &
  1942. grid%j_start(ij), grid%j_end(ij), &
  1943. k_start , k_end )
  1944. !
  1945. ! Currently, chemistry species with specified boundaries (i.e. the mother
  1946. ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
  1947. ! spec_bdy are only called for nests. For boundary conditions from global model or larger domain,
  1948. ! chem is uncoupled, and only used for one row/column on inflow (if have_bcs_chem=.true.)
  1949. !
  1950. IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
  1951. IF(ic.eq.1)CALL wrf_debug ( 10 , ' have_bcs_chem' )
  1952. CALL relax_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
  1953. chem(ims,kms,jms,ic), grid%mut, &
  1954. chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic), &
  1955. chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
  1956. chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic), &
  1957. chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
  1958. config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
  1959. grid%dtbc, grid%fcx, grid%gcx, &
  1960. config_flags, &
  1961. ids,ide, jds,jde, kds,kde, &
  1962. ims,ime, jms,jme, kms,kme, &
  1963. ips,ipe, jps,jpe, kps,kpe, &
  1964. grid%i_start(ij), grid%i_end(ij), &
  1965. grid%j_start(ij), grid%j_end(ij), &
  1966. k_start, k_end )
  1967. CALL spec_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
  1968. chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic), &
  1969. chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
  1970. chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic), &
  1971. chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
  1972. config_flags%spec_bdy_width, grid%spec_zone, &
  1973. config_flags, &
  1974. ids,ide, jds,jde, kds,kde, &
  1975. ims,ime, jms,jme, kms,kme, &
  1976. ips,ipe, jps,jpe, kps,kpe, &
  1977. grid%i_start(ij), grid%i_end(ij), &
  1978. grid%j_start(ij), grid%j_end(ij), &
  1979. k_start, k_end )
  1980. ENDIF
  1981. ENDDO chem_tile_loop_1
  1982. !$OMP END PARALLEL DO
  1983. !$OMP PARALLEL DO &
  1984. !$OMP PRIVATE ( ij )
  1985. chem_tile_loop_2: DO ij = 1 , grid%num_tiles
  1986. CALL wrf_debug ( 200 , ' call rk_update_scalar' )
  1987. tenddec = (( config_flags%chemdiag == USECHEMDIAG ) .and. &
  1988. ( adv_ct_indices(ic) >= PARAM_FIRST_SCALAR ))
  1989. CALL rk_update_scalar( scs=ic, sce=ic, &
  1990. scalar_1=chem_old(ims,kms,jms,ic), &
  1991. scalar_2=chem(ims,kms,jms,ic), &
  1992. sc_tend=chem_tend(ims,kms,jms,ic), &
  1993. advh_t=advh_ct(ims,kms,jms,adv_ct_indices(ic)), &
  1994. advz_t=advz_ct(ims,kms,jms,adv_ct_indices(ic)), &
  1995. advect_tend=advect_tend, &
  1996. h_tendency=h_tendency, z_tendency=z_tendency, &
  1997. msftx=grid%msftx,msfty=grid%msfty, &
  1998. mu_old=grid%mu_1, mu_new=grid%mu_2, mu_base=grid%mub, &
  1999. rk_step=rk_step, dt=dt_rk, spec_zone=grid%spec_zone, &
  2000. config_flags=config_flags, tenddec=tenddec, &
  2001. ids=ids, ide=ide, jds=jds, jde=jde, kds=kds, kde=kde, &
  2002. ims=ims, ime=ime, jms=jms, jme=jme, kms=kms, kme=kme, &
  2003. its=grid%i_start(ij), ite=grid%i_end(ij), &
  2004. jts=grid%j_start(ij), jte=grid%j_end(ij), &
  2005. kts=k_start , kte=k_end )
  2006. IF( config_flags%specified ) THEN
  2007. CALL flow_dep_bdy_chem( chem(ims,kms,jms,ic), &
  2008. chem_bxs(jms,kms,1,ic), chem_btxs(jms,kms,1,ic), &
  2009. chem_bxe(jms,kms,1,ic), chem_btxe(jms,kms,1,ic), &
  2010. chem_bys(ims,kms,1,ic), chem_btys(ims,kms,1,ic), &
  2011. chem_bye(ims,kms,1,ic), chem_btye(ims,kms,1,ic), &
  2012. dt_rk+grid%dtbc, &
  2013. config_flags%spec_bdy_width,grid%z, &
  2014. grid%have_bcs_chem, &
  2015. grid%ru_m, grid%rv_m, config_flags,grid%alt, &
  2016. grid%t_1,grid%pb,grid%p,t0,p1000mb,rcp,grid%ph_2,grid%phb,g, &
  2017. grid%spec_zone,ic, &
  2018. ids,ide, jds,jde, kds,kde, & ! domain dims
  2019. ims,ime, jms,jme, kms,kme, & ! memory dims
  2020. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  2021. grid%i_start(ij), grid%i_end(ij), &
  2022. grid%j_start(ij), grid%j_end(ij), &
  2023. k_start, k_end )
  2024. ENDIF
  2025. ENDDO chem_tile_loop_2
  2026. !$OMP END PARALLEL DO
  2027. ENDDO chem_variable_loop
  2028. ENDIF chem_scalar_advance
  2029. BENCH_END(chem_adv_tim)
  2030. #endif
  2031. ! next the chemical species
  2032. BENCH_START(tracer_adv_tim)
  2033. tracer_advance: IF (num_tracer >= PARAM_FIRST_SCALAR) THEN
  2034. tracer_variable_loop: DO ic = PARAM_FIRST_SCALAR, num_tracer
  2035. !$OMP PARALLEL DO &
  2036. !$OMP PRIVATE ( ij )
  2037. tracer_tile_loop_1: DO ij = 1 , grid%num_tiles
  2038. CALL wrf_debug ( 15 , ' call rk_scalar_tend in tracer_tile_loop_1' )
  2039. tenddec = .false.
  2040. CALL rk_scalar_tend ( ic, ic, config_flags, tenddec, &
  2041. rk_step, dt_rk, &
  2042. grid%ru_m, grid%rv_m, grid%ww_m, &
  2043. grid%muts, grid%mub, grid%mu_1, &
  2044. grid%alt, &
  2045. tracer_old(ims,kms,jms,ic), &
  2046. tracer(ims,kms,jms,ic), &
  2047. tracer_tend(ims,kms,jms,ic), &
  2048. advect_tend,h_tendency,z_tendency,grid%rqvften, &
  2049. grid%qv_base, .false., grid%fnm, grid%fnp, &
  2050. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  2051. grid%msfvy, grid%msftx,grid%msfty, &
  2052. grid%rdx, grid%rdy, grid%rdn, grid%rdnw, &
  2053. grid%khdif, grid%kvdif, grid%xkhh, &
  2054. grid%diff_6th_opt, grid%diff_6th_factor, &
  2055. config_flags%tracer_adv_opt, &
  2056. ids, ide, jds, jde, kds, kde, &
  2057. ims, ime, jms, jme, kms, kme, &
  2058. grid%i_start(ij), grid%i_end(ij), &
  2059. grid%j_start(ij), grid%j_end(ij), &
  2060. k_start , k_end )
  2061. !
  2062. ! Currently, chemistry species with specified boundaries (i.e. the mother
  2063. ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
  2064. ! spec_bdy are only called for nests. For boundary conditions from global model or larger domain,
  2065. ! chem is uncoupled, and only used for one row/column on inflow (if have_bcs_chem=.true.)
  2066. !
  2067. IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
  2068. IF(ic.eq.1)CALL wrf_debug ( 10 , ' have_bcs_tracer' )
  2069. CALL relax_bdy_scalar ( tracer_tend(ims,kms,jms,ic), &
  2070. tracer(ims,kms,jms,ic), grid%mut, &
  2071. tracer_bxs(jms,kms,1,ic),tracer_bxe(jms,kms,1,ic), &
  2072. tracer_bys(ims,kms,1,ic),tracer_bye(ims,kms,1,ic), &
  2073. tracer_btxs(jms,kms,1,ic),tracer_btxe(jms,kms,1,ic), &
  2074. tracer_btys(ims,kms,1,ic),tracer_btye(ims,kms,1,ic), &
  2075. config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
  2076. grid%dtbc, grid%fcx, grid%gcx, &
  2077. config_flags, &
  2078. ids,ide, jds,jde, kds,kde, &
  2079. ims,ime, jms,jme, kms,kme, &
  2080. ips,ipe, jps,jpe, kps,kpe, &
  2081. grid%i_start(ij), grid%i_end(ij), &
  2082. grid%j_start(ij), grid%j_end(ij), &
  2083. k_start, k_end )
  2084. CALL spec_bdy_scalar ( tracer_tend(ims,kms,jms,ic), &
  2085. tracer_bxs(jms,kms,1,ic),tracer_bxe(jms,kms,1,ic), &
  2086. tracer_bys(ims,kms,1,ic),tracer_bye(ims,kms,1,ic), &
  2087. tracer_btxs(jms,kms,1,ic),tracer_btxe(jms,kms,1,ic), &
  2088. tracer_btys(ims,kms,1,ic),tracer_btye(ims,kms,1,ic), &
  2089. config_flags%spec_bdy_width, grid%spec_zone, &
  2090. config_flags, &
  2091. ids,ide, jds,jde, kds,kde, &
  2092. ims,ime, jms,jme, kms,kme, &
  2093. ips,ipe, jps,jpe, kps,kpe, &
  2094. grid%i_start(ij), grid%i_end(ij), &
  2095. grid%j_start(ij), grid%j_end(ij), &
  2096. k_start, k_end )
  2097. ENDIF
  2098. ENDDO tracer_tile_loop_1
  2099. !$OMP END PARALLEL DO
  2100. !$OMP PARALLEL DO &
  2101. !$OMP PRIVATE ( ij )
  2102. tracer_tile_loop_2: DO ij = 1 , grid%num_tiles
  2103. CALL wrf_debug ( 200 , ' call rk_update_scalar' )
  2104. tenddec = .false.
  2105. CALL rk_update_scalar( scs=ic, sce=ic, &
  2106. scalar_1=tracer_old(ims,kms,jms,ic), &
  2107. scalar_2=tracer(ims,kms,jms,ic), &
  2108. sc_tend=tracer_tend(ims,kms,jms,ic), &
  2109. ! advh_t=advh_t(ims,kms,jms,1), &
  2110. ! advz_t=advz_t(ims,kms,jms,1), &
  2111. advect_tend=advect_tend, &
  2112. h_tendency=h_tendency, z_tendency=z_tendency, &
  2113. msftx=grid%msftx,msfty=grid%msfty, &
  2114. mu_old=grid%mu_1, mu_new=grid%mu_2, mu_base=grid%mub, &
  2115. rk_step=rk_step, dt=dt_rk, spec_zone=grid%spec_zone, &
  2116. config_flags=config_flags, tenddec=tenddec, &
  2117. ids=ids, ide=ide, jds=jds, jde=jde, kds=kds, kde=kde, &
  2118. ims=ims, ime=ime, jms=jms, jme=jme, kms=kms, kme=kme, &
  2119. its=grid%i_start(ij), ite=grid%i_end(ij), &
  2120. jts=grid%j_start(ij), jte=grid%j_end(ij), &
  2121. kts=k_start , kte=k_end )
  2122. IF( config_flags%specified ) THEN
  2123. #ifdef WRF_CHEM
  2124. CALL flow_dep_bdy_tracer( tracer(ims,kms,jms,ic), &
  2125. tracer_bxs(jms,kms,1,ic), tracer_btxs(jms,kms,1,ic), &
  2126. tracer_bxe(jms,kms,1,ic), tracer_btxe(jms,kms,1,ic), &
  2127. tracer_bys(ims,kms,1,ic), tracer_btys(ims,kms,1,ic), &
  2128. tracer_bye(ims,kms,1,ic), tracer_btye(ims,kms,1,ic), &
  2129. dt_rk+grid%dtbc, &
  2130. config_flags%spec_bdy_width,grid%z, &
  2131. grid%have_bcs_tracer, &
  2132. grid%ru_m, grid%rv_m, config_flags%tracer_opt,grid%alt, &
  2133. grid%t_1,grid%pb,grid%p,t0,p1000mb,rcp,grid%ph_2,grid%phb,g, &
  2134. grid%spec_zone,ic, &
  2135. ids,ide, jds,jde, kds,kde, & ! domain dims
  2136. ims,ime, jms,jme, kms,kme, & ! memory dims
  2137. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  2138. grid%i_start(ij), grid%i_end(ij), &
  2139. grid%j_start(ij), grid%j_end(ij), &
  2140. k_start, k_end )
  2141. #else
  2142. CALL flow_dep_bdy ( tracer(ims,kms,jms,ic), &
  2143. grid%ru_m, grid%rv_m, config_flags, &
  2144. grid%spec_zone, &
  2145. ids,ide, jds,jde, kds,kde, & ! domain dims
  2146. ims,ime, jms,jme, kms,kme, & ! memory dims
  2147. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  2148. grid%i_start(ij), grid%i_end(ij), &
  2149. grid%j_start(ij), grid%j_end(ij), &
  2150. k_start, k_end )
  2151. #endif
  2152. ENDIF
  2153. ENDDO tracer_tile_loop_2
  2154. !$OMP END PARALLEL DO
  2155. ENDDO tracer_variable_loop
  2156. ENDIF tracer_advance
  2157. BENCH_END(tracer_adv_tim)
  2158. ! next the other scalar species
  2159. other_scalar_advance: IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
  2160. scalar_variable_loop: do is = PARAM_FIRST_SCALAR, num_3d_s
  2161. !$OMP PARALLEL DO &
  2162. !$OMP PRIVATE ( ij )
  2163. scalar_tile_loop_1: DO ij = 1 , grid%num_tiles
  2164. CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
  2165. tenddec = .false.
  2166. CALL rk_scalar_tend ( is, is, config_flags, tenddec, &
  2167. rk_step, dt_rk, &
  2168. grid%ru_m, grid%rv_m, grid%ww_m, &
  2169. grid%muts, grid%mub, grid%mu_1, &
  2170. grid%alt, &
  2171. scalar_old(ims,kms,jms,is), &
  2172. scalar(ims,kms,jms,is), &
  2173. scalar_tend(ims,kms,jms,is), &
  2174. advect_tend,h_tendency,z_tendency,grid%rqvften, &
  2175. grid%qv_base, .false., grid%fnm, grid%fnp, &
  2176. grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
  2177. grid%msfvy, grid%msftx,grid%msfty, &
  2178. grid%rdx, grid%rdy, grid%rdn, grid%rdnw, &
  2179. grid%khdif, grid%kvdif, grid%xkhh, &
  2180. grid%diff_6th_opt, grid%diff_6th_factor, &
  2181. config_flags%scalar_adv_opt, &
  2182. ids, ide, jds, jde, kds, kde, &
  2183. ims, ime, jms, jme, kms, kme, &
  2184. grid%i_start(ij), grid%i_end(ij), &
  2185. grid%j_start(ij), grid%j_end(ij), &
  2186. k_start , k_end )
  2187. IF( config_flags%nested .and. (rk_step == 1) ) THEN
  2188. CALL relax_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
  2189. scalar(ims,kms,jms,is), grid%mut, &
  2190. scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is), &
  2191. scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
  2192. scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is), &
  2193. scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
  2194. config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
  2195. grid%dtbc, grid%fcx, grid%gcx, &
  2196. config_flags, &
  2197. ids,ide, jds,jde, kds,kde, &
  2198. ims,ime, jms,jme, kms,kme, &
  2199. ips,ipe, jps,jpe, kps,kpe, &
  2200. grid%i_start(ij), grid%i_end(ij), &
  2201. grid%j_start(ij), grid%j_end(ij), &
  2202. k_start, k_end )
  2203. CALL spec_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
  2204. scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is), &
  2205. scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
  2206. scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is), &
  2207. scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
  2208. config_flags%spec_bdy_width, grid%spec_zone, &
  2209. config_flags, &
  2210. ids,ide, jds,jde, kds,kde, &
  2211. ims,ime, jms,jme, kms,kme, &
  2212. ips,ipe, jps,jpe, kps,kpe, &
  2213. grid%i_start(ij), grid%i_end(ij), &
  2214. grid%j_start(ij), grid%j_end(ij), &
  2215. k_start, k_end )
  2216. ENDIF ! b.c test for chem nested boundary condition
  2217. ENDDO scalar_tile_loop_1
  2218. !$OMP END PARALLEL DO
  2219. !$OMP PARALLEL DO &
  2220. !$OMP PRIVATE ( ij )
  2221. scalar_tile_loop_2: DO ij = 1 , grid%num_tiles
  2222. CALL wrf_debug ( 200 , ' call rk_update_scalar' )
  2223. tenddec = .false.
  2224. CALL rk_update_scalar( scs=is, sce=is, &
  2225. scalar_1=scalar_old(ims,kms,jms,is), &
  2226. scalar_2=scalar(ims,kms,jms,is), &
  2227. sc_tend=scalar_tend(ims,kms,jms,is), &
  2228. ! advh_t=advh_t(ims,kms,jms,1), &
  2229. ! advz_t=advz_t(ims,kms,jms,1), &
  2230. advect_tend=advect_tend, &
  2231. h_tendency=h_tendency, z_tendency=z_tendency, &
  2232. msftx=grid%msftx,msfty=grid%msfty, &
  2233. mu_old=grid%mu_1, mu_new=grid%mu_2, mu_base=grid%mub, &
  2234. rk_step=rk_step, dt=dt_rk, spec_zone=grid%spec_zone, &
  2235. config_flags=config_flags, tenddec=tenddec, &
  2236. ids=ids, ide=ide, jds=jds, jde=jde, kds=kds, kde=kde, &
  2237. ims=ims, ime=ime, jms=jms, jme=jme, kms=kms, kme=kme, &
  2238. its=grid%i_start(ij), ite=grid%i_end(ij), &
  2239. jts=grid%j_start(ij), jte=grid%j_end(ij), &
  2240. kts=k_start , kte=k_end )
  2241. IF( config_flags%specified ) THEN
  2242. IF(is .ne. P_QNN)THEN
  2243. CALL flow_dep_bdy ( scalar(ims,kms,jms,is), &
  2244. grid%ru_m, grid%rv_m, config_flags, &
  2245. grid%spec_zone, &
  2246. ids,ide, jds,jde, kds,kde, & ! domain dims
  2247. ims,ime, jms,jme, kms,kme, & ! memory dims
  2248. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  2249. grid%i_start(ij), grid%i_end(ij), &
  2250. grid%j_start(ij), grid%j_end(ij), &
  2251. k_start, k_end )
  2252. ELSE
  2253. CALL flow_dep_bdy_qnn ( scalar(ims,kms,jms,is), &
  2254. grid%ru_m, grid%rv_m, config_flags, &
  2255. grid%spec_zone, &
  2256. ids,ide, jds,jde, kds,kde, & ! domain dims
  2257. ims,ime, jms,jme, kms,kme, & ! memory dims
  2258. ips,ipe, jps,jpe, kps,kpe, & ! patch dims
  2259. grid%i_start(ij), grid%i_end(ij), &
  2260. grid%j_start(ij), grid%j_end(ij), &
  2261. k_start, k_end )
  2262. ENDIF
  2263. ENDIF
  2264. ENDDO scalar_tile_loop_2
  2265. !$OMP END PARALLEL DO
  2266. ENDDO scalar_variable_loop
  2267. ENDIF other_scalar_advance
  2268. ! update the pressure and density at the new time level
  2269. !$OMP PARALLEL DO &
  2270. !$OMP PRIVATE ( ij )
  2271. DO ij = 1 , grid%num_tiles
  2272. BENCH_START(calc_p_rho_tim)
  2273. CALL calc_p_rho_phi( moist, num_3d_m, config_flags%hypsometric_opt, &
  2274. grid%al, grid%alb, grid%mu_2, grid%muts, &
  2275. grid%ph_2, grid%phb, grid%p, grid%pb, grid%t_2, &
  2276. p0, t0, grid%p_top, grid%znu, grid%znw, grid%dnw, grid%rdnw, &
  2277. grid%rdn, config_flags%non_hydrostatic, &
  2278. ids, ide, jds, jde, kds, kde, &
  2279. ims, ime, jms, jme, kms, kme, &
  2280. grid%i_start(ij), grid%i_end(ij), &
  2281. grid%j_start(ij), grid%j_end(ij), &
  2282. k_start , k_end )
  2283. BENCH_END(calc_p_rho_tim)
  2284. ENDDO
  2285. !$OMP END PARALLEL DO
  2286. ! Reset the boundary conditions if there is another corrector step.
  2287. ! (rk_step < rk_order), else we'll handle it at the end of everything
  2288. ! (after the split physics, before exiting the timestep).
  2289. rk_step_1_check: IF ( rk_step < rk_order ) THEN
  2290. !-----------------------------------------------------------
  2291. ! rk3 substep polar filter for scalars (moist,chem,scalar)
  2292. !-----------------------------------------------------------
  2293. IF (config_flags%polar) THEN
  2294. IF ( num_3d_m >= PARAM_FIRST_SCALAR ) THEN
  2295. CALL wrf_debug ( 200 , ' call filter moist ' )
  2296. DO im = PARAM_FIRST_SCALAR, num_3d_m
  2297. CALL couple_scalars_for_filter ( FIELD=moist(ims,kms,jms,im) &
  2298. ,MU=grid%mu_2 , MUB=grid%mub &
  2299. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2300. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2301. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2302. CALL pxft ( grid=grid &
  2303. ,lineno=__LINE__ &
  2304. ,flag_uv = 0 &
  2305. ,flag_rurv = 0 &
  2306. ,flag_wph = 0 &
  2307. ,flag_ww = 0 &
  2308. ,flag_t = 0 &
  2309. ,flag_mu = 0 &
  2310. ,flag_mut = 0 &
  2311. ,flag_moist = im &
  2312. ,flag_chem = 0 &
  2313. ,flag_scalar = 0 &
  2314. ,flag_tracer = 0 &
  2315. ,positive_definite=.FALSE. &
  2316. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  2317. ,fft_filter_lat = config_flags%fft_filter_lat &
  2318. ,dclat = dclat &
  2319. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2320. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2321. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  2322. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  2323. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  2324. CALL uncouple_scalars_for_filter ( FIELD=moist(ims,kms,jms,im) &
  2325. ,MU=grid%mu_2 , MUB=grid%mub &
  2326. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2327. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2328. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2329. END DO
  2330. END IF
  2331. IF ( num_3d_c >= PARAM_FIRST_SCALAR ) THEN
  2332. CALL wrf_debug ( 200 , ' call filter chem ' )
  2333. DO im = PARAM_FIRST_SCALAR, num_3d_c
  2334. CALL couple_scalars_for_filter ( FIELD=chem(ims,kms,jms,im) &
  2335. ,MU=grid%mu_2 , MUB=grid%mub &
  2336. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2337. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2338. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2339. CALL pxft ( grid=grid &
  2340. ,lineno=__LINE__ &
  2341. ,flag_uv = 0 &
  2342. ,flag_rurv = 0 &
  2343. ,flag_wph = 0 &
  2344. ,flag_ww = 0 &
  2345. ,flag_t = 0 &
  2346. ,flag_mu = 0 &
  2347. ,flag_mut = 0 &
  2348. ,flag_moist = 0 &
  2349. ,flag_chem = im &
  2350. ,flag_tracer = 0 &
  2351. ,flag_scalar = 0 &
  2352. ,positive_definite=.FALSE. &
  2353. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  2354. ,fft_filter_lat = config_flags%fft_filter_lat &
  2355. ,dclat = dclat &
  2356. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2357. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2358. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  2359. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  2360. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  2361. CALL uncouple_scalars_for_filter ( FIELD=chem(ims,kms,jms,im) &
  2362. ,MU=grid%mu_2 , MUB=grid%mub &
  2363. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2364. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2365. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2366. END DO
  2367. END IF
  2368. IF ( num_tracer >= PARAM_FIRST_SCALAR ) THEN
  2369. CALL wrf_debug ( 200 , ' call filter tracer ' )
  2370. DO im = PARAM_FIRST_SCALAR, num_tracer
  2371. CALL couple_scalars_for_filter ( FIELD=tracer(ims,kms,jms,im) &
  2372. ,MU=grid%mu_2 , MUB=grid%mub &
  2373. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2374. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2375. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2376. CALL pxft ( grid=grid &
  2377. ,lineno=__LINE__ &
  2378. ,flag_uv = 0 &
  2379. ,flag_rurv = 0 &
  2380. ,flag_wph = 0 &
  2381. ,flag_ww = 0 &
  2382. ,flag_t = 0 &
  2383. ,flag_mu = 0 &
  2384. ,flag_mut = 0 &
  2385. ,flag_moist = 0 &
  2386. ,flag_chem = 0 &
  2387. ,flag_tracer = im &
  2388. ,flag_scalar = 0 &
  2389. ,positive_definite=.FALSE. &
  2390. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  2391. ,fft_filter_lat = config_flags%fft_filter_lat &
  2392. ,dclat = dclat &
  2393. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2394. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2395. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  2396. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  2397. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  2398. CALL uncouple_scalars_for_filter ( FIELD=tracer(ims,kms,jms,im) &
  2399. ,MU=grid%mu_2 , MUB=grid%mub &
  2400. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2401. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2402. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2403. END DO
  2404. END IF
  2405. IF ( num_3d_s >= PARAM_FIRST_SCALAR ) THEN
  2406. CALL wrf_debug ( 200 , ' call filter scalar ' )
  2407. DO im = PARAM_FIRST_SCALAR, num_3d_s
  2408. CALL couple_scalars_for_filter ( FIELD=scalar(ims,kms,jms,im) &
  2409. ,MU=grid%mu_2 , MUB=grid%mub &
  2410. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2411. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2412. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2413. CALL pxft ( grid=grid &
  2414. ,lineno=__LINE__ &
  2415. ,flag_uv = 0 &
  2416. ,flag_rurv = 0 &
  2417. ,flag_wph = 0 &
  2418. ,flag_ww = 0 &
  2419. ,flag_t = 0 &
  2420. ,flag_mu = 0 &
  2421. ,flag_mut = 0 &
  2422. ,flag_moist = 0 &
  2423. ,flag_chem = 0 &
  2424. ,flag_tracer = 0 &
  2425. ,flag_scalar = im &
  2426. ,positive_definite=.FALSE. &
  2427. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  2428. ,fft_filter_lat = config_flags%fft_filter_lat &
  2429. ,dclat = dclat &
  2430. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2431. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2432. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  2433. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  2434. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  2435. CALL uncouple_scalars_for_filter ( FIELD=scalar(ims,kms,jms,im) &
  2436. ,MU=grid%mu_2 , MUB=grid%mub &
  2437. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  2438. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  2439. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe )
  2440. END DO
  2441. END IF
  2442. END IF ! polar filter test
  2443. !-----------------------------------------------------------
  2444. ! END rk3 substep polar filter for scalars (moist,chem,scalar)
  2445. !-----------------------------------------------------------
  2446. !-----------------------------------------------------------
  2447. ! Stencils for patch communications (WCS, 29 June 2001)
  2448. !
  2449. ! here's where we need a wide comm stencil - these are the
  2450. ! uncoupled variables so are used for high order calc in
  2451. ! advection and mixong routines.
  2452. !
  2453. !
  2454. ! * * * * * * *
  2455. ! * * * * * * * * * * * *
  2456. ! * * * * * * * * * * * * *
  2457. ! * + * * * + * * * * * + * * *
  2458. ! * * * * * * * * * * * * *
  2459. ! * * * * * * * * * * * *
  2460. ! * * * * * * *
  2461. !
  2462. ! al x
  2463. !
  2464. ! 2D variable
  2465. ! mu_2 x
  2466. !
  2467. ! (adv order <=4)
  2468. ! u_2 x
  2469. ! v_2 x
  2470. ! w_2 x
  2471. ! t_2 x
  2472. ! ph_2 x
  2473. !
  2474. ! (adv order <=6)
  2475. ! u_2 x
  2476. ! v_2 x
  2477. ! w_2 x
  2478. ! t_2 x
  2479. ! ph_2 x
  2480. !
  2481. ! 4D variable
  2482. ! moist x
  2483. ! chem x
  2484. ! scalar x
  2485. #ifdef DM_PARALLEL
  2486. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  2487. # include "HALO_EM_D2_3.inc"
  2488. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  2489. # include "HALO_EM_D2_5.inc"
  2490. ELSE
  2491. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  2492. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2493. ENDIF
  2494. # include "PERIOD_BDY_EM_D.inc"
  2495. # include "PERIOD_BDY_EM_MOIST2.inc"
  2496. # include "PERIOD_BDY_EM_CHEM2.inc"
  2497. # include "PERIOD_BDY_EM_TRACER2.inc"
  2498. # include "PERIOD_BDY_EM_SCALAR2.inc"
  2499. #endif
  2500. BENCH_START(bc_end_tim)
  2501. !$OMP PARALLEL DO &
  2502. !$OMP PRIVATE ( ij )
  2503. tile_bc_loop_1: DO ij = 1 , grid%num_tiles
  2504. CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_2' )
  2505. CALL rk_phys_bc_dry_2( config_flags, &
  2506. grid%u_2, grid%v_2, grid%w_2, &
  2507. grid%t_2, grid%ph_2, grid%mu_2, &
  2508. ids, ide, jds, jde, kds, kde, &
  2509. ims, ime, jms, jme, kms, kme, &
  2510. ips, ipe, jps, jpe, kps, kpe, &
  2511. grid%i_start(ij), grid%i_end(ij), &
  2512. grid%j_start(ij), grid%j_end(ij), &
  2513. k_start , k_end )
  2514. BENCH_START(diag_w_tim)
  2515. IF (.not. config_flags%non_hydrostatic) THEN
  2516. CALL diagnose_w( ph_tend, grid%ph_2, grid%ph_1, grid%w_2, grid%muts, dt_rk, &
  2517. grid%u_2, grid%v_2, grid%ht, &
  2518. grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msftx, grid%msfty, &
  2519. ids, ide, jds, jde, kds, kde, &
  2520. ims, ime, jms, jme, kms, kme, &
  2521. grid%i_start(ij), grid%i_end(ij), &
  2522. grid%j_start(ij), grid%j_end(ij), &
  2523. k_start , k_end )
  2524. ENDIF
  2525. BENCH_END(diag_w_tim)
  2526. IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
  2527. moisture_loop_bdy_1 : DO im = PARAM_FIRST_SCALAR , num_3d_m
  2528. CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', config_flags, &
  2529. ids, ide, jds, jde, kds, kde, &
  2530. ims, ime, jms, jme, kms, kme, &
  2531. ips, ipe, jps, jpe, kps, kpe, &
  2532. grid%i_start(ij), grid%i_end(ij), &
  2533. grid%j_start(ij), grid%j_end(ij), &
  2534. k_start , k_end )
  2535. END DO moisture_loop_bdy_1
  2536. ENDIF
  2537. IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
  2538. chem_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
  2539. CALL set_physical_bc3d( chem(ims,kms,jms,ic), 'p', config_flags, &
  2540. ids, ide, jds, jde, kds, kde, &
  2541. ims, ime, jms, jme, kms, kme, &
  2542. ips, ipe, jps, jpe, kps, kpe, &
  2543. grid%i_start(ij), grid%i_end(ij), &
  2544. grid%j_start(ij), grid%j_end(ij), &
  2545. k_start , k_end-1 )
  2546. END DO chem_species_bdy_loop_1
  2547. END IF
  2548. IF (num_tracer >= PARAM_FIRST_SCALAR) THEN
  2549. tracer_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_tracer
  2550. CALL set_physical_bc3d( tracer(ims,kms,jms,ic), 'p', config_flags, &
  2551. ids, ide, jds, jde, kds, kde, &
  2552. ims, ime, jms, jme, kms, kme, &
  2553. ips, ipe, jps, jpe, kps, kpe, &
  2554. grid%i_start(ij), grid%i_end(ij), &
  2555. grid%j_start(ij), grid%j_end(ij), &
  2556. k_start , k_end-1 )
  2557. END DO tracer_species_bdy_loop_1
  2558. END IF
  2559. IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
  2560. scalar_species_bdy_loop_1 : DO is = PARAM_FIRST_SCALAR , num_3d_s
  2561. CALL set_physical_bc3d( scalar(ims,kms,jms,is), 'p', config_flags, &
  2562. ids, ide, jds, jde, kds, kde, &
  2563. ims, ime, jms, jme, kms, kme, &
  2564. ips, ipe, jps, jpe, kps, kpe, &
  2565. grid%i_start(ij), grid%i_end(ij), &
  2566. grid%j_start(ij), grid%j_end(ij), &
  2567. k_start , k_end-1 )
  2568. END DO scalar_species_bdy_loop_1
  2569. END IF
  2570. IF (config_flags%km_opt .eq. 2) THEN
  2571. CALL set_physical_bc3d( grid%tke_2 , 'p', config_flags, &
  2572. ids, ide, jds, jde, kds, kde, &
  2573. ims, ime, jms, jme, kms, kme, &
  2574. ips, ipe, jps, jpe, kps, kpe, &
  2575. grid%i_start(ij), grid%i_end(ij), &
  2576. grid%j_start(ij), grid%j_end(ij), &
  2577. k_start , k_end )
  2578. END IF
  2579. END DO tile_bc_loop_1
  2580. !$OMP END PARALLEL DO
  2581. BENCH_END(bc_end_tim)
  2582. #ifdef DM_PARALLEL
  2583. ! * * * * *
  2584. ! * * * * * * * * *
  2585. ! * + * * + * * * + * *
  2586. ! * * * * * * * * *
  2587. ! * * * * *
  2588. ! moist, chem, scalar, tke x
  2589. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  2590. IF ( (config_flags%tke_adv_opt /= ORIGINAL .and. config_flags%tke_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2591. # include "HALO_EM_TKE_5.inc"
  2592. ELSE
  2593. # include "HALO_EM_TKE_3.inc"
  2594. ENDIF
  2595. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  2596. IF ( (config_flags%tke_adv_opt /= ORIGINAL .and. config_flags%tke_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2597. # include "HALO_EM_TKE_7.inc"
  2598. ELSE
  2599. # include "HALO_EM_TKE_5.inc"
  2600. ENDIF
  2601. ELSE
  2602. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  2603. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2604. ENDIF
  2605. IF ( num_moist .GE. PARAM_FIRST_SCALAR ) THEN
  2606. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  2607. IF ( (config_flags%moist_adv_opt /= ORIGINAL .and. config_flags%moist_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2608. # include "HALO_EM_MOIST_E_5.inc"
  2609. ELSE
  2610. # include "HALO_EM_MOIST_E_3.inc"
  2611. END IF
  2612. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  2613. IF ( (config_flags%moist_adv_opt /= ORIGINAL .and. config_flags%moist_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2614. # include "HALO_EM_MOIST_E_7.inc"
  2615. ELSE
  2616. # include "HALO_EM_MOIST_E_5.inc"
  2617. END IF
  2618. ELSE
  2619. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  2620. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2621. ENDIF
  2622. ENDIF
  2623. IF ( num_chem >= PARAM_FIRST_SCALAR ) THEN
  2624. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  2625. IF ( (config_flags%chem_adv_opt /= ORIGINAL .and. config_flags%chem_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2626. # include "HALO_EM_CHEM_E_5.inc"
  2627. ELSE
  2628. # include "HALO_EM_CHEM_E_3.inc"
  2629. ENDIF
  2630. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  2631. IF ( (config_flags%chem_adv_opt /= ORIGINAL .and. config_flags%chem_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2632. # include "HALO_EM_CHEM_E_7.inc"
  2633. ELSE
  2634. # include "HALO_EM_CHEM_E_5.inc"
  2635. ENDIF
  2636. ELSE
  2637. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  2638. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2639. ENDIF
  2640. ENDIF
  2641. IF ( num_tracer >= PARAM_FIRST_SCALAR ) THEN
  2642. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  2643. IF ( (config_flags%tracer_adv_opt /= ORIGINAL .and. config_flags%tracer_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2644. # include "HALO_EM_TRACER_E_5.inc"
  2645. ELSE
  2646. # include "HALO_EM_TRACER_E_3.inc"
  2647. ENDIF
  2648. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  2649. IF ( (config_flags%tracer_adv_opt /= ORIGINAL .and. config_flags%tracer_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2650. # include "HALO_EM_TRACER_E_7.inc"
  2651. ELSE
  2652. # include "HALO_EM_TRACER_E_5.inc"
  2653. ENDIF
  2654. ELSE
  2655. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  2656. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2657. ENDIF
  2658. ENDIF
  2659. IF ( num_scalar >= PARAM_FIRST_SCALAR ) THEN
  2660. IF ( config_flags%h_sca_adv_order <= 4 ) THEN
  2661. IF ( (config_flags%scalar_adv_opt /= ORIGINAL .and. config_flags%scalar_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2662. # include "HALO_EM_SCALAR_E_5.inc"
  2663. ELSE
  2664. # include "HALO_EM_SCALAR_E_3.inc"
  2665. ENDIF
  2666. ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
  2667. IF ( (config_flags%scalar_adv_opt /= ORIGINAL .and. config_flags%scalar_adv_opt /= WENO_SCALAR) .and. (rk_step == rk_order-1) ) THEN
  2668. # include "HALO_EM_SCALAR_E_7.inc"
  2669. ELSE
  2670. # include "HALO_EM_SCALAR_E_5.inc"
  2671. ENDIF
  2672. ELSE
  2673. WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
  2674. CALL wrf_error_fatal(TRIM(wrf_err_message))
  2675. ENDIF
  2676. ENDIF
  2677. #endif
  2678. ENDIF rk_step_1_check
  2679. !**********************************************************
  2680. !
  2681. ! end of RK predictor-corrector loop
  2682. !
  2683. !**********************************************************
  2684. END DO Runge_Kutta_loop
  2685. IF (config_flags%do_avgflx_em .EQ. 1) THEN
  2686. ! Reinitialize time-averaged fluxes if history output was written after the previous time step:
  2687. CALL WRFU_ALARMGET(grid%alarms( HISTORY_ALARM ),prevringtime=temp_time)
  2688. CALL domain_clock_get ( grid, current_time=CurrTime, &
  2689. current_timestr=message2 )
  2690. ! use overloaded -, .LT. operator to check whether to initialize avgflx:
  2691. ! reinitialize after each history output (detect this here by comparing current time
  2692. ! against last history time and time step - this code follows what's done in adapt_timestep_em):
  2693. WRITE ( message , FMT = '("solve_em: old_dt =",g15.6,", dt=",g15.6," on domain ",I3)' ) &
  2694. & old_dt,grid%dt,grid%id
  2695. CALL wrf_debug(200,message)
  2696. old_dt=min(old_dt,grid%dt)
  2697. num = INT(old_dt * precision)
  2698. den = precision
  2699. CALL WRFU_TimeIntervalSet(dtInterval, Sn=num, Sd=den)
  2700. IF (CurrTime .lt. temp_time + dtInterval) THEN
  2701. WRITE ( message , FMT = '("solve_em: initializing avgflx at time ",A," on domain ",I3)' ) &
  2702. & TRIM(message2), grid%id
  2703. CALL wrf_message(trim(message))
  2704. grid%avgflx_count = 0
  2705. !tile-loop for zero_avgflx
  2706. !$OMP PARALLEL DO &
  2707. !$OMP PRIVATE ( ij )
  2708. DO ij = 1 , grid%num_tiles
  2709. CALL wrf_debug(200,'In solve_em, before zero_avgflx call')
  2710. CALL zero_avgflx(grid%avgflx_rum,grid%avgflx_rvm,grid%avgflx_wwm, &
  2711. & ids, ide, jds, jde, kds, kde, &
  2712. & ims, ime, jms, jme, kms, kme, &
  2713. & grid%i_start(ij), grid%i_end(ij), grid%j_start(ij), grid%j_end(ij), &
  2714. & k_start , k_end, f_flux, &
  2715. & grid%avgflx_cfu1,grid%avgflx_cfd1,grid%avgflx_dfu1, &
  2716. & grid%avgflx_efu1,grid%avgflx_dfd1,grid%avgflx_efd1 )
  2717. CALL wrf_debug(200,'In solve_em, after zero_avgflx call')
  2718. ENDDO
  2719. ENDIF
  2720. ! Update avgflx quantities
  2721. !tile-loop for upd_avgflx
  2722. !$OMP PARALLEL DO &
  2723. !$OMP PRIVATE ( ij )
  2724. DO ij = 1 , grid%num_tiles
  2725. CALL wrf_debug(200,'In solve_em, before upd_avgflx call')
  2726. CALL upd_avgflx(grid%avgflx_count,grid%avgflx_rum,grid%avgflx_rvm,grid%avgflx_wwm, &
  2727. & grid%ru_m, grid%rv_m, grid%ww_m, &
  2728. & ids, ide, jds, jde, kds, kde, &
  2729. & ims, ime, jms, jme, kms, kme, &
  2730. & grid%i_start(ij), grid%i_end(ij), grid%j_start(ij), grid%j_end(ij), &
  2731. & k_start , k_end, f_flux, &
  2732. & grid%cfu1,grid%cfd1,grid%dfu1,grid%efu1,grid%dfd1,grid%efd1, &
  2733. & grid%avgflx_cfu1,grid%avgflx_cfd1,grid%avgflx_dfu1, &
  2734. & grid%avgflx_efu1,grid%avgflx_dfd1,grid%avgflx_efd1 )
  2735. CALL wrf_debug(200,'In solve_em, after upd_avgflx call')
  2736. ENDDO
  2737. grid%avgflx_count = grid%avgflx_count + 1
  2738. ENDIF
  2739. !
  2740. !$OMP PARALLEL DO &
  2741. !$OMP PRIVATE ( ij )
  2742. DO ij = 1 , grid%num_tiles
  2743. BENCH_START(advance_ppt_tim)
  2744. CALL wrf_debug ( 200 , ' call advance_ppt' )
  2745. CALL advance_ppt(grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
  2746. grid%rqicuten,grid%rqscuten, &
  2747. grid%rainc,grid%raincv,grid%rainsh,grid%pratec,grid%pratesh, &
  2748. grid%nca,grid%htop,grid%hbot,grid%cutop,grid%cubot, &
  2749. grid%cuppt, grid%dt, config_flags, &
  2750. ids,ide, jds,jde, kds,kde, &
  2751. ims,ime, jms,jme, kms,kme, &
  2752. grid%i_start(ij), grid%i_end(ij), &
  2753. grid%j_start(ij), grid%j_end(ij), &
  2754. k_start , k_end )
  2755. BENCH_END(advance_ppt_tim)
  2756. ENDDO
  2757. !$OMP END PARALLEL DO
  2758. !<DESCRIPTION>
  2759. !<pre>
  2760. ! (5) time-split physics.
  2761. !
  2762. ! Microphysics are the only time split physics in the WRF model
  2763. ! at this time. Split-physics begins with the calculation of
  2764. ! needed diagnostic quantities (pressure, temperature, etc.)
  2765. ! followed by a call to the microphysics driver,
  2766. ! and finishes with a clean-up, storing off of a diabatic tendency
  2767. ! from the moist physics, and a re-calulation of the diagnostic
  2768. ! quantities pressure and density.
  2769. !</pre>
  2770. !</DESCRIPTION>
  2771. IF( config_flags%specified .or. config_flags%nested ) THEN
  2772. sz = grid%spec_zone
  2773. ELSE
  2774. sz = 0
  2775. ENDIF
  2776. IF (config_flags%mp_physics /= 0) then
  2777. !$OMP PARALLEL DO &
  2778. !$OMP PRIVATE ( ij, its, ite, jts, jte )
  2779. scalar_tile_loop_1a: DO ij = 1 , grid%num_tiles
  2780. IF ( config_flags%periodic_x ) THEN
  2781. its = max(grid%i_start(ij),ids)
  2782. ite = min(grid%i_end(ij),ide-1)
  2783. ELSE
  2784. its = max(grid%i_start(ij),ids+sz)
  2785. ite = min(grid%i_end(ij),ide-1-sz)
  2786. ENDIF
  2787. jts = max(grid%j_start(ij),jds+sz)
  2788. jte = min(grid%j_end(ij),jde-1-sz)
  2789. CALL wrf_debug ( 200 , ' call moist_physics_prep' )
  2790. BENCH_START(moist_physics_prep_tim)
  2791. CALL moist_physics_prep_em( grid%t_2, grid%t_1, t0, rho, &
  2792. grid%al, grid%alb, grid%p, p8w, p0, grid%pb, &
  2793. grid%ph_2, grid%phb, th_phy, pi_phy, p_phy, &
  2794. grid%z, grid%z_at_w, dz8w, &
  2795. dtm, grid%h_diabatic, &
  2796. config_flags,grid%fnm, grid%fnp, &
  2797. ids, ide, jds, jde, kds, kde, &
  2798. ims, ime, jms, jme, kms, kme, &
  2799. its, ite, jts, jte, &
  2800. k_start , k_end )
  2801. BENCH_END(moist_physics_prep_tim)
  2802. END DO scalar_tile_loop_1a
  2803. !$OMP END PARALLEL DO
  2804. CALL wrf_debug ( 200 , ' call microphysics_driver' )
  2805. grid%sr = 0.
  2806. specified_bdy = config_flags%specified .OR. config_flags%nested
  2807. channel_bdy = config_flags%specified .AND. config_flags%periodic_x
  2808. BENCH_START(micro_driver_tim)
  2809. !
  2810. ! WRFU_AlarmIsRinging always returned false, so using an alternate method to find out if it is time
  2811. ! to dump history/restart files so microphysics can be told to calculate things like radar reflectivity.
  2812. !
  2813. diagflag = .false.
  2814. CALL WRFU_ALARMGET(grid%alarms( HISTORY_ALARM ),prevringtime=temp_time,RingInterval=intervaltime)
  2815. CALL WRFU_ALARMGET(grid%alarms( RESTART_ALARM ),prevringtime=restart_time,RingInterval=restartinterval)
  2816. CALL domain_clock_get ( grid, current_time=CurrTime )
  2817. old_dt=min(old_dt,grid%dt)
  2818. num = INT(old_dt * precision)
  2819. den = precision
  2820. CALL WRFU_TimeIntervalSet(dtInterval, Sn=num, Sd=den)
  2821. IF (CurrTime .ge. temp_time + intervaltime - dtInterval .or. &
  2822. CurrTime .ge. restart_time + restartinterval - dtInterval ) THEN
  2823. diagflag = .true.
  2824. ENDIF
  2825. CALL microphysics_driver( &
  2826. & DT=dtm ,DX=grid%dx ,DY=grid%dy &
  2827. & ,DZ8W=dz8w ,F_ICE_PHY=grid%f_ice_phy &
  2828. & ,ITIMESTEP=grid%itimestep ,LOWLYR=grid%lowlyr &
  2829. & ,P8W=p8w ,P=p_phy ,PI_PHY=pi_phy &
  2830. & ,RHO=rho ,SPEC_ZONE=grid%spec_zone &
  2831. & ,SR=grid%sr ,TH=th_phy &
  2832. & ,refl_10cm=grid%refl_10cm & ! hm, 9/22/09 for refl
  2833. & ,WARM_RAIN=grid%warm_rain &
  2834. & ,T8W=t8w &
  2835. & ,CLDFRA=grid%cldfra, EXCH_H=grid%exch_h &
  2836. & ,NSOURCE=grid%qndropsource &
  2837. #ifdef WRF_CHEM
  2838. & ,QLSINK=grid%qlsink,CLDFRA_OLD=grid%cldfra_old &
  2839. & ,PRECR=grid%precr, PRECI=grid%preci, PRECS=grid%precs, PRECG=grid%precg &
  2840. & ,CHEM_OPT=config_flags%chem_opt, PROGN=config_flags%progn &
  2841. #endif
  2842. & ,XLAND=grid%xland &
  2843. & ,SPECIFIED=specified_bdy, CHANNEL_SWITCH=channel_bdy &
  2844. & ,F_RAIN_PHY=grid%f_rain_phy &
  2845. & ,F_RIMEF_PHY=grid%f_rimef_phy &
  2846. & ,MP_PHYSICS=config_flags%mp_physics &
  2847. & ,ID=grid%id &
  2848. & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
  2849. & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
  2850. #ifdef RUN_ON_GPU
  2851. & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
  2852. #endif
  2853. & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
  2854. & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
  2855. & ,KTS=k_start, KTE=min(k_end,kde-1) &
  2856. & ,NUM_TILES=grid%num_tiles &
  2857. & ,NAER=grid%naer &
  2858. ! Optional
  2859. & , RAINNC=grid%rainnc, RAINNCV=grid%rainncv &
  2860. & , SNOWNC=grid%snownc, SNOWNCV=grid%snowncv &
  2861. & , GRAUPELNC=grid%graupelnc, GRAUPELNCV=grid%graupelncv & ! for milbrandt2mom
  2862. & , HAILNC=grid%hailnc, HAILNCV=grid%hailncv &
  2863. & , W=grid%w_2, Z=grid%z, HT=grid%ht &
  2864. & , MP_RESTART_STATE=grid%mp_restart_state &
  2865. & , TBPVS_STATE=grid%tbpvs_state & ! etampnew
  2866. & , TBPVS0_STATE=grid%tbpvs0_state & ! etampnew
  2867. & , QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
  2868. & , QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
  2869. & , QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
  2870. & , QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
  2871. & , QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
  2872. & , QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
  2873. & , QH_CURR=moist(ims,kms,jms,P_QH), F_QH=F_QH & ! for milbrandt2mom
  2874. & , QNDROP_CURR=scalar(ims,kms,jms,P_QNDROP), F_QNDROP=F_QNDROP &
  2875. #ifdef WRF_CHEM
  2876. & , RAINPROD=grid%rainprod, EVAPPROD=grid%evapprod &
  2877. & , QV_B4MP=grid%qv_b4mp,QC_B4MP=grid%qc_b4mp &
  2878. & , QI_B4MP=grid%qi_b4mp, QS_B4MP=grid%qs_b4mp &
  2879. #endif
  2880. & , QT_CURR=scalar(ims,kms,jms,P_QT), F_QT=F_QT &
  2881. & , QNN_CURR=scalar(ims,kms,jms,P_QNN), F_QNN=F_QNN &
  2882. & , QNI_CURR=scalar(ims,kms,jms,P_QNI), F_QNI=F_QNI &
  2883. & , QNC_CURR=scalar(ims,kms,jms,P_QNC), F_QNC=F_QNC &
  2884. & , QNR_CURR=scalar(ims,kms,jms,P_QNR), F_QNR=F_QNR &
  2885. & , QNS_CURR=scalar(ims,kms,jms,P_QNS), F_QNS=F_QNS &
  2886. & , QNG_CURR=scalar(ims,kms,jms,P_QNG), F_QNG=F_QNG &
  2887. & , QNH_CURR=scalar(ims,kms,jms,P_QNH), F_QNH=F_QNH & ! for milbrandt2mom and nssl_2mom
  2888. ! & , QZR_CURR=scalar(ims,kms,jms,P_QZR), F_QZR=F_QZR & ! for milbrandt3mom
  2889. ! & , QZI_CURR=scalar(ims,kms,jms,P_QZI), F_QZI=F_QZI & ! "
  2890. ! & , QZS_CURR=scalar(ims,kms,jms,P_QZS), F_QZS=F_QZS & ! "
  2891. ! & , QZG_CURR=scalar(ims,kms,jms,P_QZG), F_QZG=F_QZG & ! "
  2892. ! & , QZH_CURR=scalar(ims,kms,jms,P_QZH), F_QZH=F_QZH & ! "
  2893. & , QVOLG_CURR=scalar(ims,kms,jms,P_QVOLG), F_QVOLG=F_QVOLG & ! for nssl_2mom
  2894. & , qrcuten=grid%rqrcuten, qscuten=grid%rqscuten &
  2895. & , qicuten=grid%rqicuten,mu=grid%mut &
  2896. & , HAIL=config_flags%gsfcgce_hail & ! for gsfcgce
  2897. & , ICE2=config_flags%gsfcgce_2ice & ! for gsfcgce
  2898. ! & , ccntype=config_flags%milbrandt_ccntype & ! for milbrandt (2mom)
  2899. ! YLIN
  2900. ! RI_CURR INPUT
  2901. & , RI_CURR=grid%rimi &
  2902. & , diagflag=diagflag &
  2903. )
  2904. BENCH_END(micro_driver_tim)
  2905. #if 0
  2906. BENCH_START(microswap_2)
  2907. ! for load balancing; communication to redistribute the points
  2908. IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
  2909. #include "SWAP_ETAMP_NEW.inc"
  2910. ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
  2911. #include "SWAP_WSM3.inc"
  2912. ENDIF
  2913. BENCH_END(microswap_2)
  2914. #endif
  2915. CALL wrf_debug ( 200 , ' call moist_physics_finish' )
  2916. BENCH_START(moist_phys_end_tim)
  2917. !$OMP PARALLEL DO &
  2918. !$OMP PRIVATE ( ij, its, ite, jts, jte, im, ii, jj, kk )
  2919. DO ij = 1 , grid%num_tiles
  2920. its = max(grid%i_start(ij),ids)
  2921. ite = min(grid%i_end(ij),ide-1)
  2922. jts = max(grid%j_start(ij),jds)
  2923. jte = min(grid%j_end(ij),jde-1)
  2924. CALL microphysics_zero_outb ( &
  2925. moist , num_moist , config_flags , &
  2926. ids, ide, jds, jde, kds, kde, &
  2927. ims, ime, jms, jme, kms, kme, &
  2928. its, ite, jts, jte, &
  2929. k_start , k_end )
  2930. CALL microphysics_zero_outb ( &
  2931. scalar , num_scalar , config_flags , &
  2932. ids, ide, jds, jde, kds, kde, &
  2933. ims, ime, jms, jme, kms, kme, &
  2934. its, ite, jts, jte, &
  2935. k_start , k_end )
  2936. CALL microphysics_zero_outb ( &
  2937. chem , num_chem , config_flags , &
  2938. ids, ide, jds, jde, kds, kde, &
  2939. ims, ime, jms, jme, kms, kme, &
  2940. its, ite, jts, jte, &
  2941. k_start , k_end )
  2942. CALL microphysics_zero_outb ( &
  2943. tracer , num_tracer , config_flags , &
  2944. ids, ide, jds, jde, kds, kde, &
  2945. ims, ime, jms, jme, kms, kme, &
  2946. its, ite, jts, jte, &
  2947. k_start , k_end )
  2948. IF ( config_flags%periodic_x ) THEN
  2949. its = max(grid%i_start(ij),ids)
  2950. ite = min(grid%i_end(ij),ide-1)
  2951. ELSE
  2952. its = max(grid%i_start(ij),ids+sz)
  2953. ite = min(grid%i_end(ij),ide-1-sz)
  2954. ENDIF
  2955. jts = max(grid%j_start(ij),jds+sz)
  2956. jte = min(grid%j_end(ij),jde-1-sz)
  2957. CALL microphysics_zero_outa ( &
  2958. moist , num_moist , config_flags , &
  2959. ids, ide, jds, jde, kds, kde, &
  2960. ims, ime, jms, jme, kms, kme, &
  2961. its, ite, jts, jte, &
  2962. k_start , k_end )
  2963. CALL microphysics_zero_outa ( &
  2964. scalar , num_scalar , config_flags , &
  2965. ids, ide, jds, jde, kds, kde, &
  2966. ims, ime, jms, jme, kms, kme, &
  2967. its, ite, jts, jte, &
  2968. k_start , k_end )
  2969. CALL microphysics_zero_outa ( &
  2970. chem , num_chem , config_flags , &
  2971. ids, ide, jds, jde, kds, kde, &
  2972. ims, ime, jms, jme, kms, kme, &
  2973. its, ite, jts, jte, &
  2974. k_start , k_end )
  2975. CALL microphysics_zero_outa ( &
  2976. tracer , num_tracer , config_flags , &
  2977. ids, ide, jds, jde, kds, kde, &
  2978. ims, ime, jms, jme, kms, kme, &
  2979. its, ite, jts, jte, &
  2980. k_start , k_end )
  2981. CALL moist_physics_finish_em( grid%t_2, grid%t_1, t0, grid%muts, th_phy, &
  2982. grid%h_diabatic, dtm, config_flags, &
  2983. #if ( WRF_DFI_RADAR == 1 )
  2984. grid%dfi_tten_rad,grid%dfi_stage, &
  2985. #endif
  2986. ids, ide, jds, jde, kds, kde, &
  2987. ims, ime, jms, jme, kms, kme, &
  2988. its, ite, jts, jte, &
  2989. k_start , k_end )
  2990. END DO
  2991. !$OMP END PARALLEL DO
  2992. ENDIF ! microphysics test
  2993. !-----------------------------------------------------------
  2994. ! filter for moist variables post-microphysics and end of timestep
  2995. !-----------------------------------------------------------
  2996. IF (config_flags%polar) THEN
  2997. IF ( num_3d_m >= PARAM_FIRST_SCALAR ) THEN
  2998. CALL wrf_debug ( 200 , ' call filter moist' )
  2999. DO im = PARAM_FIRST_SCALAR, num_3d_m
  3000. DO jj = jps, MIN(jpe,jde-1)
  3001. DO kk = kps, MIN(kpe,kde-1)
  3002. DO ii = ips, MIN(ipe,ide-1)
  3003. moist(ii,kk,jj,im)=moist(ii,kk,jj,im)*(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3004. ENDDO
  3005. ENDDO
  3006. ENDDO
  3007. CALL pxft ( grid=grid &
  3008. ,lineno=__LINE__ &
  3009. ,flag_uv = 0 &
  3010. ,flag_rurv = 0 &
  3011. ,flag_wph = 0 &
  3012. ,flag_ww = 0 &
  3013. ,flag_t = 0 &
  3014. ,flag_mu = 0 &
  3015. ,flag_mut = 0 &
  3016. ,flag_moist = im &
  3017. ,flag_chem = 0 &
  3018. ,flag_tracer = 0 &
  3019. ,flag_scalar = 0 &
  3020. ,positive_definite=.FALSE. &
  3021. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  3022. ,fft_filter_lat = config_flags%fft_filter_lat &
  3023. ,dclat = dclat &
  3024. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  3025. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  3026. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  3027. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  3028. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  3029. DO jj = jps, MIN(jpe,jde-1)
  3030. DO kk = kps, MIN(kpe,kde-1)
  3031. DO ii = ips, MIN(ipe,ide-1)
  3032. moist(ii,kk,jj,im)=moist(ii,kk,jj,im)/(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3033. ENDDO
  3034. ENDDO
  3035. ENDDO
  3036. ENDDO
  3037. ENDIF
  3038. ENDIF
  3039. !-----------------------------------------------------------
  3040. ! end filter for moist variables post-microphysics and end of timestep
  3041. !-----------------------------------------------------------
  3042. !$OMP PARALLEL DO &
  3043. !$OMP PRIVATE ( ij, its, ite, jts, jte, im, ii, jj, kk )
  3044. scalar_tile_loop_1ba: DO ij = 1 , grid%num_tiles
  3045. IF ( config_flags%periodic_x ) THEN
  3046. its = max(grid%i_start(ij),ids)
  3047. ite = min(grid%i_end(ij),ide-1)
  3048. ELSE
  3049. its = max(grid%i_start(ij),ids+sz)
  3050. ite = min(grid%i_end(ij),ide-1-sz)
  3051. ENDIF
  3052. jts = max(grid%j_start(ij),jds+sz)
  3053. jte = min(grid%j_end(ij),jde-1-sz)
  3054. CALL calc_p_rho_phi( moist, num_3d_m, config_flags%hypsometric_opt, &
  3055. grid%al, grid%alb, grid%mu_2, grid%muts, &
  3056. grid%ph_2, grid%phb, grid%p, grid%pb, grid%t_2, &
  3057. p0, t0, grid%p_top, grid%znu, grid%znw, grid%dnw, grid%rdnw, &
  3058. grid%rdn, config_flags%non_hydrostatic, &
  3059. ids, ide, jds, jde, kds, kde, &
  3060. ims, ime, jms, jme, kms, kme, &
  3061. its, ite, jts, jte, &
  3062. k_start , k_end )
  3063. END DO scalar_tile_loop_1ba
  3064. !$OMP END PARALLEL DO
  3065. BENCH_END(moist_phys_end_tim)
  3066. IF (.not. config_flags%non_hydrostatic) THEN
  3067. #ifdef DM_PARALLEL
  3068. # include "HALO_EM_HYDRO_UV.inc"
  3069. # include "PERIOD_EM_HYDRO_UV.inc"
  3070. #endif
  3071. !$OMP PARALLEL DO &
  3072. !$OMP PRIVATE ( ij )
  3073. DO ij = 1 , grid%num_tiles
  3074. CALL diagnose_w( ph_tend, grid%ph_2, grid%ph_1, grid%w_2, grid%muts, dt_rk, &
  3075. grid%u_2, grid%v_2, grid%ht, &
  3076. grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msftx, grid%msfty, &
  3077. ids, ide, jds, jde, kds, kde, &
  3078. ims, ime, jms, jme, kms, kme, &
  3079. grid%i_start(ij), grid%i_end(ij), &
  3080. grid%j_start(ij), grid%j_end(ij), &
  3081. k_start , k_end )
  3082. END DO
  3083. !$OMP END PARALLEL DO
  3084. END IF
  3085. CALL wrf_debug ( 200 , ' call chem polar filter ' )
  3086. !-----------------------------------------------------------
  3087. ! filter for chem and scalar variables at end of timestep
  3088. !-----------------------------------------------------------
  3089. IF (config_flags%polar) THEN
  3090. IF ( num_3d_c >= PARAM_FIRST_SCALAR ) then
  3091. chem_filter_loop: DO im = PARAM_FIRST_SCALAR, num_3d_c
  3092. DO jj = jps, MIN(jpe,jde-1)
  3093. DO kk = kps, MIN(kpe,kde-1)
  3094. DO ii = ips, MIN(ipe,ide-1)
  3095. chem(ii,kk,jj,im)=chem(ii,kk,jj,im)*(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3096. ENDDO
  3097. ENDDO
  3098. ENDDO
  3099. CALL pxft ( grid=grid &
  3100. ,lineno=__LINE__ &
  3101. ,flag_uv = 0 &
  3102. ,flag_rurv = 0 &
  3103. ,flag_wph = 0 &
  3104. ,flag_ww = 0 &
  3105. ,flag_t = 0 &
  3106. ,flag_mu = 0 &
  3107. ,flag_mut = 0 &
  3108. ,flag_moist = 0 &
  3109. ,flag_chem = im &
  3110. ,flag_tracer = 0 &
  3111. ,flag_scalar = 0 &
  3112. ,positive_definite=.FALSE. &
  3113. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  3114. ,fft_filter_lat = config_flags%fft_filter_lat &
  3115. ,dclat = dclat &
  3116. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  3117. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  3118. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  3119. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  3120. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  3121. DO jj = jps, MIN(jpe,jde-1)
  3122. DO kk = kps, MIN(kpe,kde-1)
  3123. DO ii = ips, MIN(ipe,ide-1)
  3124. chem(ii,kk,jj,im)=chem(ii,kk,jj,im)/(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3125. ENDDO
  3126. ENDDO
  3127. ENDDO
  3128. ENDDO chem_filter_loop
  3129. ENDIF
  3130. IF ( num_tracer >= PARAM_FIRST_SCALAR ) then
  3131. tracer_filter_loop: DO im = PARAM_FIRST_SCALAR, num_tracer
  3132. DO jj = jps, MIN(jpe,jde-1)
  3133. DO kk = kps, MIN(kpe,kde-1)
  3134. DO ii = ips, MIN(ipe,ide-1)
  3135. tracer(ii,kk,jj,im)=tracer(ii,kk,jj,im)*(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3136. ENDDO
  3137. ENDDO
  3138. ENDDO
  3139. CALL pxft ( grid=grid &
  3140. ,lineno=__LINE__ &
  3141. ,flag_uv = 0 &
  3142. ,flag_rurv = 0 &
  3143. ,flag_wph = 0 &
  3144. ,flag_ww = 0 &
  3145. ,flag_t = 0 &
  3146. ,flag_mu = 0 &
  3147. ,flag_mut = 0 &
  3148. ,flag_moist = 0 &
  3149. ,flag_chem = 0 &
  3150. ,flag_tracer = im &
  3151. ,flag_scalar = 0 &
  3152. ,positive_definite=.FALSE. &
  3153. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  3154. ,fft_filter_lat = config_flags%fft_filter_lat &
  3155. ,dclat = dclat &
  3156. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  3157. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  3158. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  3159. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  3160. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  3161. DO jj = jps, MIN(jpe,jde-1)
  3162. DO kk = kps, MIN(kpe,kde-1)
  3163. DO ii = ips, MIN(ipe,ide-1)
  3164. tracer(ii,kk,jj,im)=tracer(ii,kk,jj,im)/(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3165. ENDDO
  3166. ENDDO
  3167. ENDDO
  3168. ENDDO tracer_filter_loop
  3169. ENDIF
  3170. IF ( num_3d_s >= PARAM_FIRST_SCALAR ) then
  3171. scalar_filter_loop: DO im = PARAM_FIRST_SCALAR, num_3d_s
  3172. DO jj = jps, MIN(jpe,jde-1)
  3173. DO kk = kps, MIN(kpe,kde-1)
  3174. DO ii = ips, MIN(ipe,ide-1)
  3175. scalar(ii,kk,jj,im)=scalar(ii,kk,jj,im)*(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3176. ENDDO
  3177. ENDDO
  3178. ENDDO
  3179. CALL pxft ( grid=grid &
  3180. ,lineno=__LINE__ &
  3181. ,flag_uv = 0 &
  3182. ,flag_rurv = 0 &
  3183. ,flag_wph = 0 &
  3184. ,flag_ww = 0 &
  3185. ,flag_t = 0 &
  3186. ,flag_mu = 0 &
  3187. ,flag_mut = 0 &
  3188. ,flag_moist = 0 &
  3189. ,flag_chem = 0 &
  3190. ,flag_tracer = 0 &
  3191. ,flag_scalar = im &
  3192. ,positive_definite=.FALSE. &
  3193. ,moist=moist,chem=chem,tracer=tracer,scalar=scalar &
  3194. ,fft_filter_lat = config_flags%fft_filter_lat &
  3195. ,dclat = dclat &
  3196. ,ids=ids,ide=ide,jds=jds,jde=jde,kds=kds,kde=kde &
  3197. ,ims=ims,ime=ime,jms=jms,jme=jme,kms=kms,kme=kme &
  3198. ,ips=ips,ipe=ipe,jps=jps,jpe=jpe,kps=kps,kpe=kpe &
  3199. ,imsx=imsx,imex=imex,jmsx=jmsx,jmex=jmex,kmsx=kmsx,kmex=kmex &
  3200. ,ipsx=ipsx,ipex=ipex,jpsx=jmsx,jpex=jpex,kpsx=kpsx,kpex=kpex )
  3201. DO jj = jps, MIN(jpe,jde-1)
  3202. DO kk = kps, MIN(kpe,kde-1)
  3203. DO ii = ips, MIN(ipe,ide-1)
  3204. scalar(ii,kk,jj,im)=scalar(ii,kk,jj,im)/(grid%mu_2(ii,jj)+grid%mub(ii,jj))
  3205. ENDDO
  3206. ENDDO
  3207. ENDDO
  3208. ENDDO scalar_filter_loop
  3209. ENDIF
  3210. ENDIF
  3211. !-----------------------------------------------------------
  3212. ! end filter for chem and scalar variables at end of timestep
  3213. !-----------------------------------------------------------
  3214. ! We're finished except for boundary condition (and patch) update
  3215. ! Boundary condition time (or communication time). At this time, we have
  3216. ! implemented periodic and symmetric physical boundary conditions.
  3217. ! b.c. routine for data within patch.
  3218. ! we need to do both time levels of
  3219. ! data because the time filter only works in the physical solution space.
  3220. ! First, do patch communications for boundary conditions (periodicity)
  3221. !-----------------------------------------------------------
  3222. ! Stencils for patch communications (WCS, 29 June 2001)
  3223. !
  3224. ! here's where we need a wide comm stencil - these are the
  3225. ! uncoupled variables so are used for high order calc in
  3226. ! advection and mixong routines.
  3227. !
  3228. ! * * * * *
  3229. ! * * * * * * * * *
  3230. ! * + * * + * * * + * *
  3231. ! * * * * * * * * *
  3232. ! * * * * *
  3233. !
  3234. ! grid%u_1 x
  3235. ! grid%u_2 x
  3236. ! grid%v_1 x
  3237. ! grid%v_2 x
  3238. ! grid%w_1 x
  3239. ! grid%w_2 x
  3240. ! grid%t_1 x
  3241. ! grid%t_2 x
  3242. ! grid%ph_1 x
  3243. ! grid%ph_2 x
  3244. ! grid%tke_1 x
  3245. ! grid%tke_2 x
  3246. !
  3247. ! 2D variables
  3248. ! grid%mu_1 x
  3249. ! grid%mu_2 x
  3250. !
  3251. ! 4D variables
  3252. ! moist x
  3253. ! chem x
  3254. ! scalar x
  3255. !----------------------------------------------------------
  3256. #ifdef DM_PARALLEL
  3257. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3258. # include "HALO_EM_D3_3.inc"
  3259. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3260. # include "HALO_EM_D3_5.inc"
  3261. ELSE
  3262. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3263. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3264. ENDIF
  3265. # include "PERIOD_BDY_EM_D3.inc"
  3266. # include "PERIOD_BDY_EM_MOIST.inc"
  3267. # include "PERIOD_BDY_EM_CHEM.inc"
  3268. # include "PERIOD_BDY_EM_TRACER.inc"
  3269. # include "PERIOD_BDY_EM_SCALAR.inc"
  3270. #endif
  3271. ! now set physical b.c on a patch
  3272. BENCH_START(bc_2d_tim)
  3273. !$OMP PARALLEL DO &
  3274. !$OMP PRIVATE ( ij )
  3275. tile_bc_loop_2: DO ij = 1 , grid%num_tiles
  3276. CALL wrf_debug ( 200 , ' call set_phys_bc_dry_2' )
  3277. CALL set_phys_bc_dry_2( config_flags, &
  3278. grid%u_1, grid%u_2, grid%v_1, grid%v_2, grid%w_1, grid%w_2, &
  3279. grid%t_1, grid%t_2, grid%ph_1, grid%ph_2, grid%mu_1, grid%mu_2, &
  3280. ids, ide, jds, jde, kds, kde, &
  3281. ims, ime, jms, jme, kms, kme, &
  3282. ips, ipe, jps, jpe, kps, kpe, &
  3283. grid%i_start(ij), grid%i_end(ij), &
  3284. grid%j_start(ij), grid%j_end(ij), &
  3285. k_start , k_end )
  3286. CALL set_physical_bc3d( grid%tke_1, 'p', config_flags, &
  3287. ids, ide, jds, jde, kds, kde, &
  3288. ims, ime, jms, jme, kms, kme, &
  3289. ips, ipe, jps, jpe, kps, kpe, &
  3290. grid%i_start(ij), grid%i_end(ij), &
  3291. grid%j_start(ij), grid%j_end(ij), &
  3292. k_start , k_end-1 )
  3293. CALL set_physical_bc3d( grid%tke_2 , 'p', config_flags, &
  3294. ids, ide, jds, jde, kds, kde, &
  3295. ims, ime, jms, jme, kms, kme, &
  3296. ips, ipe, jps, jpe, kps, kpe, &
  3297. grid%i_start(ij), grid%i_end(ij), &
  3298. grid%j_start(ij), grid%j_end(ij), &
  3299. k_start , k_end )
  3300. moisture_loop_bdy_2 : DO im = PARAM_FIRST_SCALAR , num_3d_m
  3301. CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', &
  3302. config_flags, &
  3303. ids, ide, jds, jde, kds, kde, &
  3304. ims, ime, jms, jme, kms, kme, &
  3305. ips, ipe, jps, jpe, kps, kpe, &
  3306. grid%i_start(ij), grid%i_end(ij), &
  3307. grid%j_start(ij), grid%j_end(ij), &
  3308. k_start , k_end )
  3309. END DO moisture_loop_bdy_2
  3310. chem_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
  3311. CALL set_physical_bc3d( chem(ims,kms,jms,ic) , 'p', config_flags, &
  3312. ids, ide, jds, jde, kds, kde, &
  3313. ims, ime, jms, jme, kms, kme, &
  3314. ips, ipe, jps, jpe, kps, kpe, &
  3315. grid%i_start(ij), grid%i_end(ij), &
  3316. grid%j_start(ij), grid%j_end(ij), &
  3317. k_start , k_end )
  3318. END DO chem_species_bdy_loop_2
  3319. tracer_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_tracer
  3320. CALL set_physical_bc3d( tracer(ims,kms,jms,ic) , 'p', config_flags, &
  3321. ids, ide, jds, jde, kds, kde, &
  3322. ims, ime, jms, jme, kms, kme, &
  3323. ips, ipe, jps, jpe, kps, kpe, &
  3324. grid%i_start(ij), grid%i_end(ij), &
  3325. grid%j_start(ij), grid%j_end(ij), &
  3326. k_start , k_end )
  3327. END DO tracer_species_bdy_loop_2
  3328. scalar_species_bdy_loop_2 : DO is = PARAM_FIRST_SCALAR , num_3d_s
  3329. CALL set_physical_bc3d( scalar(ims,kms,jms,is) , 'p', config_flags, &
  3330. ids, ide, jds, jde, kds, kde, &
  3331. ims, ime, jms, jme, kms, kme, &
  3332. ips, ipe, jps, jpe, kps, kpe, &
  3333. grid%i_start(ij), grid%i_end(ij), &
  3334. grid%j_start(ij), grid%j_end(ij), &
  3335. k_start , k_end )
  3336. END DO scalar_species_bdy_loop_2
  3337. END DO tile_bc_loop_2
  3338. !$OMP END PARALLEL DO
  3339. BENCH_END(bc_2d_tim)
  3340. IF( config_flags%specified .or. config_flags%nested ) THEN
  3341. grid%dtbc = grid%dtbc + grid%dt
  3342. ENDIF
  3343. ! reset surface w for consistency
  3344. #ifdef DM_PARALLEL
  3345. # include "HALO_EM_C.inc"
  3346. # include "PERIOD_BDY_EM_E.inc"
  3347. #endif
  3348. CALL wrf_debug ( 10 , ' call set_w_surface' )
  3349. fill_w_flag = .false.
  3350. !$OMP PARALLEL DO &
  3351. !$OMP PRIVATE ( ij )
  3352. DO ij = 1 , grid%num_tiles
  3353. CALL set_w_surface( config_flags, grid%znw, fill_w_flag, &
  3354. grid%w_2, grid%ht, grid%u_2, grid%v_2, &
  3355. grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy,&
  3356. grid%msftx, grid%msfty, &
  3357. ids, ide, jds, jde, kds, kde, &
  3358. ims, ime, jms, jme, kms, kme, &
  3359. grid%i_start(ij), grid%i_end(ij), &
  3360. grid%j_start(ij), grid%j_end(ij), &
  3361. k_start, k_end )
  3362. ! its, ite, jts, jte, k_start, min(k_end,kde-1), &
  3363. END DO
  3364. !$OMP END PARALLEL DO
  3365. ! calculate some model diagnostics.
  3366. CALL wrf_debug ( 200 , ' call diagnostic_driver' )
  3367. CALL diagnostic_output_calc( &
  3368. & DPSDT=grid%dpsdt ,DMUDT=grid%dmudt &
  3369. & ,P8W=p8w ,PK1M=grid%pk1m &
  3370. & ,MU_2=grid%mu_2 ,MU_2M=grid%mu_2m &
  3371. & ,U=grid%u_2 ,V=grid%v_2 &
  3372. & ,RAINCV=grid%raincv ,RAINNCV=grid%rainncv &
  3373. & ,RAINC=grid%rainc ,RAINNC=grid%rainnc &
  3374. & ,I_RAINC=grid%i_rainc ,I_RAINNC=grid%i_rainnc &
  3375. & ,HFX=grid%hfx ,SFCEVP=grid%sfcevp ,LH=grid%lh &
  3376. & ,DT=grid%dt ,SBW=config_flags%spec_bdy_width &
  3377. & ,XTIME=grid%xtime ,T2=grid%t2 &
  3378. & ,ACSWUPT=grid%acswupt ,ACSWUPTC=grid%acswuptc &
  3379. & ,ACSWDNT=grid%acswdnt ,ACSWDNTC=grid%acswdntc &
  3380. & ,ACSWUPB=grid%acswupb ,ACSWUPBC=grid%acswupbc &
  3381. & ,ACSWDNB=grid%acswdnb ,ACSWDNBC=grid%acswdnbc &
  3382. & ,ACLWUPT=grid%aclwupt ,ACLWUPTC=grid%aclwuptc &
  3383. & ,ACLWDNT=grid%aclwdnt ,ACLWDNTC=grid%aclwdntc &
  3384. & ,ACLWUPB=grid%aclwupb ,ACLWUPBC=grid%aclwupbc &
  3385. & ,ACLWDNB=grid%aclwdnb ,ACLWDNBC=grid%aclwdnbc &
  3386. & ,I_ACSWUPT=grid%i_acswupt ,I_ACSWUPTC=grid%i_acswuptc &
  3387. & ,I_ACSWDNT=grid%i_acswdnt ,I_ACSWDNTC=grid%i_acswdntc &
  3388. & ,I_ACSWUPB=grid%i_acswupb ,I_ACSWUPBC=grid%i_acswupbc &
  3389. & ,I_ACSWDNB=grid%i_acswdnb ,I_ACSWDNBC=grid%i_acswdnbc &
  3390. & ,I_ACLWUPT=grid%i_aclwupt ,I_ACLWUPTC=grid%i_aclwuptc &
  3391. & ,I_ACLWDNT=grid%i_aclwdnt ,I_ACLWDNTC=grid%i_aclwdntc &
  3392. & ,I_ACLWUPB=grid%i_aclwupb ,I_ACLWUPBC=grid%i_aclwupbc &
  3393. & ,I_ACLWDNB=grid%i_aclwdnb ,I_ACLWDNBC=grid%i_aclwdnbc &
  3394. ! Selection flag
  3395. & ,DIAG_PRINT=config_flags%diag_print &
  3396. & ,BUCKET_MM=config_flags%bucket_mm &
  3397. & ,BUCKET_J =config_flags%bucket_J &
  3398. & ,SNOWNCV=grid%snowncv, SNOW_ACC_NC=grid%snow_acc_nc &
  3399. & ,PREC_ACC_C=grid%prec_acc_c &
  3400. & ,PREC_ACC_NC=grid%prec_acc_nc &
  3401. & ,PREC_ACC_DT=config_flags%prec_acc_dt &
  3402. & ,CURR_SECS=curr_secs &
  3403. ! Dimension arguments
  3404. & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
  3405. & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
  3406. & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
  3407. & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
  3408. & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
  3409. & ,KTS=k_start, KTE=min(k_end,kde-1) &
  3410. & ,NUM_TILES=grid%num_tiles &
  3411. & )
  3412. IF (config_flags%output_diagnostics == 1) THEN
  3413. IF ((config_flags%auxhist3_interval == 0 ) ) THEN
  3414. WRITE (wrf_err_message , * )"CLWRF: ERROR -- error -- ERROR -- error : NO 'auxhist3_interval' has been defined in 'namelist.input'"
  3415. CALL wrf_error_fatal ( TRIM(wrf_err_message) )
  3416. END IF
  3417. CALL wrf_debug ( 200 , ' CLWRF: call diagnostic_calc' )
  3418. CALL clwrf_output_calc( &
  3419. & DPSDT=grid%dpsdt ,DMUDT=grid%dmudt &
  3420. & ,P8W=p8w ,PK1M=grid%pk1m &
  3421. & ,MU_2=grid%mu_2 ,MU_2M=grid%mu_2m &
  3422. & ,U=grid%u_2 ,V=grid%v_2 &
  3423. & ,is_restart=config_flags%restart &
  3424. & ,clwrfH=config_flags%auxhist3_interval &
  3425. & ,T2=grid%t2, Q2=grid%q2, U10=grid%u10, V10=grid%v10 &
  3426. & ,SKINTEMP=grid%tsk &
  3427. & ,T2CLMIN=grid%t2min, T2CLMAX=grid%t2max &
  3428. & ,TT2CLMIN=grid%tt2min, TT2CLMAX=grid%tt2max &
  3429. & ,T2CLMEAN=grid%t2mean, T2CLSTD=grid%t2std &
  3430. & ,Q2CLMIN=grid%q2min, Q2CLMAX=grid%q2max &
  3431. & ,TQ2CLMIN=grid%tq2min, TQ2CLMAX=grid%tq2max &
  3432. & ,Q2CLMEAN=grid%q2mean, Q2CLSTD=grid%q2std &
  3433. & ,U10CLMAX=grid%u10max, V10CLMAX=grid%v10max &
  3434. & ,SPDUV10CLMAX=grid%spduv10max &
  3435. & ,TSPDUV10CLMAX=grid%tspduv10max &
  3436. & ,U10CLMEAN=grid%u10mean, V10CLMEAN=grid%v10mean &
  3437. & ,SPDUV10CLMEAN=grid%spduv10mean &
  3438. & ,U10CLSTD=grid%u10std, V10CLSTD=grid%v10std &
  3439. & ,SPDUV10CLSTD=grid%spduv10std &
  3440. & ,RAINCCLMAX=grid%raincvmax &
  3441. & ,RAINNCCLMAX=grid%rainncvmax &
  3442. & ,TRAINCCLMAX=grid%traincvmax &
  3443. & ,TRAINNCCLMAX=grid%trainncvmax &
  3444. & ,RAINCCLMEAN=grid%raincvmean &
  3445. & ,RAINNCCLMEAN=grid%rainncvmean &
  3446. & ,RAINCCLSTD=grid%raincvstd &
  3447. & ,RAINNCCLSTD=grid%rainncvstd &
  3448. & ,SKINTEMPCLMIN=grid%skintempmin &
  3449. & ,SKINTEMPCLMAX=grid%skintempmax &
  3450. & ,TSKINTEMPCLMIN=grid%tskintempmin &
  3451. & ,TSKINTEMPCLMAX=grid%tskintempmax &
  3452. & ,SKINTEMPCLMEAN=grid%skintempmean &
  3453. & ,SKINTEMPCLSTD=grid%skintempstd &
  3454. & ,RAINCV=grid%raincv ,RAINNCV=grid%rainncv &
  3455. & ,RAINC=grid%rainc ,RAINNC=grid%rainnc &
  3456. & ,I_RAINC=grid%i_rainc ,I_RAINNC=grid%i_rainnc &
  3457. & ,HFX=grid%hfx ,SFCEVP=grid%sfcevp ,LH=grid%lh &
  3458. & ,DT=grid%dt ,SBW=config_flags%spec_bdy_width &
  3459. & ,XTIME=grid%xtime &
  3460. ! Selection flag
  3461. & ,DIAG_PRINT=config_flags%diag_print &
  3462. & ,BUCKET_MM=config_flags%bucket_mm &
  3463. & ,BUCKET_J =config_flags%bucket_J &
  3464. ! Dimension arguments
  3465. & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
  3466. & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
  3467. & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
  3468. & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
  3469. & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
  3470. & ,KTS=k_start, KTE=min(k_end,kde-1) &
  3471. & ,NUM_TILES=grid%num_tiles &
  3472. & )
  3473. ENDIF
  3474. #ifdef DM_PARALLEL
  3475. !-----------------------------------------------------------------------
  3476. ! see above
  3477. !--------------------------------------------------------------
  3478. CALL wrf_debug ( 200 , ' call HALO_RK_E' )
  3479. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3480. # include "HALO_EM_E_3.inc"
  3481. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3482. # include "HALO_EM_E_5.inc"
  3483. ELSE
  3484. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3485. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3486. ENDIF
  3487. #endif
  3488. #ifdef DM_PARALLEL
  3489. IF ( num_moist >= PARAM_FIRST_SCALAR ) THEN
  3490. !-----------------------------------------------------------------------
  3491. ! see above
  3492. !--------------------------------------------------------------
  3493. CALL wrf_debug ( 200 , ' call HALO_RK_MOIST' )
  3494. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3495. # include "HALO_EM_MOIST_E_3.inc"
  3496. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3497. # include "HALO_EM_MOIST_E_5.inc"
  3498. ELSE
  3499. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3500. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3501. ENDIF
  3502. ENDIF
  3503. IF ( num_chem >= PARAM_FIRST_SCALAR ) THEN
  3504. !-----------------------------------------------------------------------
  3505. ! see above
  3506. !--------------------------------------------------------------
  3507. CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
  3508. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3509. # include "HALO_EM_CHEM_E_3.inc"
  3510. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3511. # include "HALO_EM_CHEM_E_5.inc"
  3512. ELSE
  3513. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3514. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3515. ENDIF
  3516. ENDIF
  3517. IF ( num_tracer >= PARAM_FIRST_SCALAR ) THEN
  3518. !-----------------------------------------------------------------------
  3519. ! see above
  3520. !--------------------------------------------------------------
  3521. CALL wrf_debug ( 200 , ' call HALO_RK_TRACER' )
  3522. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3523. # include "HALO_EM_TRACER_E_3.inc"
  3524. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3525. # include "HALO_EM_TRACER_E_5.inc"
  3526. ELSE
  3527. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3528. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3529. ENDIF
  3530. ENDIF
  3531. IF ( num_scalar >= PARAM_FIRST_SCALAR ) THEN
  3532. !-----------------------------------------------------------------------
  3533. ! see above
  3534. !--------------------------------------------------------------
  3535. CALL wrf_debug ( 200 , ' call HALO_RK_SCALAR' )
  3536. IF ( config_flags%h_mom_adv_order <= 4 ) THEN
  3537. # include "HALO_EM_SCALAR_E_3.inc"
  3538. ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
  3539. # include "HALO_EM_SCALAR_E_5.inc"
  3540. ELSE
  3541. WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
  3542. CALL wrf_error_fatal(TRIM(wrf_err_message))
  3543. ENDIF
  3544. ENDIF
  3545. #endif
  3546. ! Max values of CFL for adaptive time step scheme
  3547. DEALLOCATE(max_vert_cfl_tmp)
  3548. DEALLOCATE(max_horiz_cfl_tmp)
  3549. CALL wrf_debug ( 200 , ' call end of solve_em' )
  3550. ! Finish timers if compiled with -DBENCH.
  3551. #include <bench_solve_em_end.h>
  3552. RETURN
  3553. END SUBROUTINE solve_em