/wrfv2_fire/dyn_em/solve_em.F
FORTRAN Legacy | 4014 lines | 2574 code | 461 blank | 979 comment | 12 complexity | b11609b3a9b0c8f951dfd34819ba8f0b MD5 | raw file
Possible License(s): AGPL-1.0
Large files files are truncated, but you can click here to view the full file
- !WRF:MEDIATION_LAYER:SOLVER
- SUBROUTINE solve_em ( grid , config_flags &
- ! Arguments generated from Registry
- #include "dummy_new_args.inc"
- !
- )
- ! Driver layer modules
- USE module_state_description
- USE module_domain, ONLY : &
- domain, get_ijk_from_grid, get_ijk_from_subgrid &
- ,domain_get_current_time, domain_get_start_time &
- ,domain_get_sim_start_time, domain_clock_get
- USE module_domain_type, ONLY : history_alarm, restart_alarm
- USE module_configure, ONLY : grid_config_rec_type
- USE module_driver_constants
- USE module_machine
- USE module_tiles, ONLY : set_tiles
- #ifdef DM_PARALLEL
- USE module_dm, ONLY : &
- local_communicator, mytask, ntasks, ntasks_x, ntasks_y &
- ,local_communicator_periodic, wrf_dm_maxval
- USE module_comm_dm, ONLY : &
- halo_em_a_sub,halo_em_b_sub,halo_em_c2_sub,halo_em_chem_e_3_sub &
- ,halo_em_chem_e_5_sub,halo_em_chem_e_7_sub,halo_em_chem_old_e_5_sub &
- ,halo_em_chem_old_e_7_sub,halo_em_c_sub,halo_em_d2_3_sub &
- ,halo_em_d2_5_sub,halo_em_d3_3_sub,halo_em_d3_5_sub,halo_em_d_sub &
- ,halo_em_e_3_sub,halo_em_e_5_sub,halo_em_hydro_uv_sub &
- ,halo_em_moist_e_3_sub,halo_em_moist_e_5_sub,halo_em_moist_e_7_sub &
- ,halo_em_moist_old_e_5_sub,halo_em_moist_old_e_7_sub &
- ,halo_em_scalar_e_3_sub,halo_em_scalar_e_5_sub,halo_em_scalar_e_7_sub &
- ,halo_em_scalar_old_e_5_sub,halo_em_scalar_old_e_7_sub,halo_em_tke_3_sub &
- ,halo_em_tke_5_sub,halo_em_tke_7_sub,halo_em_tke_advect_3_sub &
- ,halo_em_tke_advect_5_sub,halo_em_tke_old_e_5_sub &
- ,halo_em_tke_old_e_7_sub,halo_em_tracer_e_3_sub,halo_em_tracer_e_5_sub &
- ,halo_em_tracer_e_7_sub,halo_em_tracer_old_e_5_sub &
- ,halo_em_tracer_old_e_7_sub,period_bdy_em_a_sub &
- ,period_bdy_em_b3_sub,period_bdy_em_b_sub,period_bdy_em_chem2_sub &
- ,period_bdy_em_chem_old_sub,period_bdy_em_chem_sub,period_bdy_em_d3_sub &
- ,period_bdy_em_d_sub,period_bdy_em_e_sub,period_bdy_em_moist2_sub &
- ,period_bdy_em_moist_old_sub,period_bdy_em_moist_sub &
- ,period_bdy_em_scalar2_sub,period_bdy_em_scalar_old_sub &
- ,period_bdy_em_scalar_sub,period_bdy_em_tke_old_sub &
- ,period_bdy_em_tracer2_sub,period_bdy_em_tracer_old_sub &
- ,period_bdy_em_tracer_sub,period_em_da_sub,period_em_hydro_uv_sub
- #endif
- USE module_utility
- ! Mediation layer modules
- ! Model layer modules
- USE module_model_constants
- USE module_small_step_em
- USE module_em
- USE module_big_step_utilities_em
- USE module_bc
- USE module_bc_em
- USE module_solvedebug_em
- USE module_physics_addtendc
- USE module_diffusion_em
- USE module_polarfft
- USE module_microphysics_driver
- USE module_microphysics_zero_out
- USE module_fddaobs_driver
- USE module_diagnostics
- #ifdef WRF_CHEM
- USE module_input_chem_data
- USE module_input_tracer
- USE module_chem_utilities
- #endif
- USE module_first_rk_step_part1
- USE module_first_rk_step_part2
- USE module_llxy, ONLY : proj_cassini
- USE module_avgflx_em, ONLY : zero_avgflx, upd_avgflx
- IMPLICIT NONE
- ! Input data.
- TYPE(domain) , TARGET :: grid
- ! Definitions of dummy arguments to this routine (generated from Registry).
- #include "dummy_new_decl.inc"
- ! Structure that contains run-time configuration (namelist) data for domain
- TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
- ! Local data
- INTEGER :: k_start , k_end, its, ite, jts, jte
- INTEGER :: ids , ide , jds , jde , kds , kde , &
- ims , ime , jms , jme , kms , kme , &
- ips , ipe , jps , jpe , kps , kpe
- INTEGER :: sids , side , sjds , sjde , skds , skde , &
- sims , sime , sjms , sjme , skms , skme , &
- sips , sipe , sjps , sjpe , skps , skpe
- INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex, &
- ipsx, ipex, jpsx, jpex, kpsx, kpex, &
- imsy, imey, jmsy, jmey, kmsy, kmey, &
- ipsy, ipey, jpsy, jpey, kpsy, kpey
- INTEGER :: ij , iteration
- INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
- INTEGER :: loop
- INTEGER :: sz
- INTEGER :: iswater
- LOGICAL :: specified_bdy, channel_bdy
- REAL :: t_new
-
- ! Changes in tendency at this timestep
- real ,DIMENSION(grid%sm31:grid%em31,grid%sm32:grid%em32,grid%sm33:grid%em33) :: h_tendency, &
- z_tendency
-
- ! Whether advection should produce decoupled horizontal and vertical advective tendency outputs
- LOGICAL :: tenddec
-
- ! Flag for microphysics routines to produce diagnostic fields (e.g., radar reflectivity)
- LOGICAL :: diagflag
-
- #ifdef WRF_CHEM
- ! Index cross-referencing array for tendency accumulation
- INTEGER, DIMENSION( num_chem ) :: adv_ct_indices
- #endif
- ! storage for tendencies and decoupled state (generated from Registry)
- #include <i1_decl.inc>
- ! Previous time level of tracer arrays now defined as i1 variables;
- ! the state 4d arrays now redefined as 1-time level arrays in Registry.
- ! Benefit: save memory in nested runs, since only 1 domain is active at a
- ! time. Potential problem on stack-limited architectures: increases
- ! amount of data on program stack by making these automatic arrays.
- INTEGER :: rc
- INTEGER :: number_of_small_timesteps, rk_step
- INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
- INTEGER :: idum1, idum2, dynamics_option
- INTEGER :: rk_order, iwmax, jwmax, kwmax
- REAL :: dt_rk, dts_rk, dts, dtm, wmax
- REAL , ALLOCATABLE , DIMENSION(:) :: max_vert_cfl_tmp, max_horiz_cfl_tmp
- LOGICAL :: leapfrog
- INTEGER :: l,kte,kk
- LOGICAL :: f_flux ! flag for computing averaged fluxes in cu_gd
- REAL :: curr_secs
- INTEGER :: num_sound_steps
- INTEGER :: idex, jdex
- REAL :: max_msft
- REAL :: spacing
- INTEGER :: ii, jj !kk is above after l,kte
- REAL :: dclat
- INTEGER :: debug_level
- ! urban related variables
- INTEGER :: NUM_ROOF_LAYERS, NUM_WALL_LAYERS, NUM_ROAD_LAYERS ! urban
- TYPE(WRFU_TimeInterval) :: tmpTimeInterval
- REAL :: real_time
- LOGICAL :: adapt_step_flag
- LOGICAL :: fill_w_flag
- ! variables for flux-averaging code 20091223
- CHARACTER*256 :: message, message2
- REAL :: old_dt
- TYPE(WRFU_Time) :: temp_time, CurrTime, restart_time
- INTEGER, PARAMETER :: precision = 100
- INTEGER :: num, den
- TYPE(WRFU_TimeInterval) :: dtInterval, intervaltime,restartinterval
- ! Define benchmarking timers if -DBENCH is compiled
- #include <bench_solve_em_def.h>
- !----------------------
- ! Executable statements
- !----------------------
- !<DESCRIPTION>
- !<pre>
- ! solve_em is the main driver for advancing a grid a single timestep.
- ! It is a mediation-layer routine -> DM and SM calls are made where
- ! needed for parallel processing.
- !
- ! solve_em can integrate the equations using 3 time-integration methods
- !
- ! - 3rd order Runge-Kutta time integration (recommended)
- !
- ! - 2nd order Runge-Kutta time integration
- !
- ! The main sections of solve_em are
- !
- ! (1) Runge-Kutta (RK) loop
- !
- ! (2) Non-timesplit physics (i.e., tendencies computed for updating
- ! model state variables during the first RK sub-step (loop)
- !
- ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
- !
- ! (4) scalar advance for moist and chem scalar variables (and TKE)
- ! within the RK sub-steps.
- !
- ! (5) time-split physics (after the RK step), currently this includes
- ! only microphyics
- !
- ! A more detailed description of these sections follows.
- !</pre>
- !</DESCRIPTION>
- ! Initialize timers if compiled with -DBENCH
- #include <bench_solve_em_init.h>
- ! set runge-kutta solver (2nd or 3rd order)
- dynamics_option = config_flags%rk_ord
- ! Obtain dimension information stored in the grid data structure.
- CALL get_ijk_from_grid ( grid , &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- imsx, imex, jmsx, jmex, kmsx, kmex, &
- ipsx, ipex, jpsx, jpex, kpsx, kpex, &
- imsy, imey, jmsy, jmey, kmsy, kmey, &
- ipsy, ipey, jpsy, jpey, kpsy, kpey )
-
- CALL get_ijk_from_subgrid ( grid , &
- sids, side, sjds, sjde, skds, skde, &
- sims, sime, sjms, sjme, skms, skme, &
- sips, sipe, sjps, sjpe, skps, skpe )
- k_start = kps
- k_end = kpe
- num_3d_m = num_moist
- num_3d_c = num_chem
- num_3d_s = num_scalar
- f_flux = config_flags%do_avgflx_cugd .EQ. 1
- ! Compute these starting and stopping locations for each tile and number of tiles.
- ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
- CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
- ! Max values of CFL for adaptive time step scheme
- ALLOCATE (max_vert_cfl_tmp(grid%num_tiles))
- ALLOCATE (max_horiz_cfl_tmp(grid%num_tiles))
- !
- ! Calculate current time in seconds since beginning of model run.
- ! Unfortunately, ESMF does not seem to have a way to return
- ! floating point seconds based on a TimeInterval. So, we will
- ! calculate it here--but, this is not clean!!
- !
- tmpTimeInterval = domain_get_current_time ( grid ) - domain_get_sim_start_time ( grid )
- curr_secs = real_time(tmpTimeInterval)
- old_dt = grid%dt ! store old time step for flux averaging code at end of RK loop
- !-----------------------------------------------------------------------------
- ! Adaptive time step: Added by T. Hutchinson, WSI 3/5/07
- ! In this call, we do the time-step adaptation and set time-dependent lateral
- ! boundary condition nudging weights.
- !
- IF ( (config_flags%use_adaptive_time_step) .and. &
- ( (.not. grid%nested) .or. &
- ( (grid%nested) .and. (abs(grid%dtbc) < 0.0001) ) ) )THEN
- CALL adapt_timestep(grid, config_flags)
- adapt_step_flag = .TRUE.
- ELSE
- adapt_step_flag = .FALSE.
- ENDIF
- ! End of adaptive time step modifications
- !-----------------------------------------------------------------------------
- grid%itimestep = grid%itimestep + 1
- IF (config_flags%polar) dclat = 90./REAL(jde-jds) !(0.5 * 180/ny)
- #ifdef WRF_CHEM
- kte=min(k_end,kde-1)
- # ifdef DM_PARALLEL
- if ( num_chem >= PARAM_FIRST_SCALAR ) then
- !-----------------------------------------------------------------------
- ! see matching halo calls below for stencils
- !--------------------------------------------------------------
- CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
- IF ( config_flags%h_mom_adv_order <= 4 ) THEN
- # include "HALO_EM_CHEM_E_3.inc"
- IF( config_flags%progn > 0 ) THEN
- # include "HALO_EM_SCALAR_E_3.inc"
- ENDIF
- ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
- # include "HALO_EM_CHEM_E_5.inc"
- IF( config_flags%progn > 0 ) THEN
- # include "HALO_EM_SCALAR_E_5.inc"
- ENDIF
- ELSE
- WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
- CALL wrf_error_fatal(TRIM(wrf_err_message))
- ENDIF
- ENDIF
- if ( num_tracer >= PARAM_FIRST_SCALAR ) then
- !-----------------------------------------------------------------------
- ! see matching halo calls below for stencils
- !--------------------------------------------------------------
- CALL wrf_debug ( 200 , ' call HALO_RK_tracer' )
- IF ( config_flags%h_mom_adv_order <= 4 ) THEN
- # include "HALO_EM_TRACER_E_3.inc"
- ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
- # include "HALO_EM_TRACER_E_5.inc"
- ELSE
- WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
- CALL wrf_error_fatal(TRIM(wrf_err_message))
- ENDIF
- ENDIF
- # endif
- !--------------------------------------------------------------
- adv_ct_indices( : ) = 1
- IF ( config_flags%chemdiag == USECHEMDIAG ) THEN
- ! modify tendency list here
- ! note that the referencing direction here is opposite of that in chem_driver
- adv_ct_indices(p_co ) = p_advh_co
- adv_ct_indices(p_o3 ) = p_advh_o3
- adv_ct_indices(p_no ) = p_advh_no
- adv_ct_indices(p_no2 ) = p_advh_no2
- adv_ct_indices(p_hno3) = p_advh_hno3
- adv_ct_indices(p_iso ) = p_advh_iso
- adv_ct_indices(p_ho ) = p_advh_ho
- adv_ct_indices(p_ho2 ) = p_advh_ho2
- END IF
- #endif
- rk_order = config_flags%rk_ord
- IF ( grid%time_step_sound == 0 ) THEN
- ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
- spacing = min(grid%dx, grid%dy)
- IF ( ( config_flags%use_adaptive_time_step ) .AND. ( config_flags%map_proj == PROJ_CASSINI ) ) THEN
- max_msft=MIN ( MAX(grid%max_msftx, grid%max_msfty) , &
- 1.0/COS(config_flags%fft_filter_lat*degrad) )
- num_sound_steps = max ( 2 * ( INT (300. * grid%dt / (spacing / max_msft) - 0.01 ) + 1 ), 4 )
- ELSE IF ( config_flags%use_adaptive_time_step ) THEN
- max_msft= MAX(grid%max_msftx, grid%max_msfty)
- num_sound_steps = max ( 2 * ( INT (300. * grid%dt / (spacing / max_msft) - 0.01 ) + 1 ), 4 )
- ELSE
- num_sound_steps = max ( 2 * ( INT (300. * grid%dt / spacing - 0.01 ) + 1 ), 4 )
- END IF
- WRITE(wrf_err_message,*)'grid spacing, dt, time_step_sound=',spacing,grid%dt,num_sound_steps
- CALL wrf_debug ( 50 , wrf_err_message )
- ELSE
- num_sound_steps = grid%time_step_sound
- ENDIF
- dts = grid%dt/float(num_sound_steps)
- IF (config_flags%use_adaptive_time_step) THEN
-
- CALL get_wrf_debug_level( debug_level )
- IF ((config_flags%time_step < 0) .AND. (debug_level.GE.50)) THEN
- #ifdef DM_PARALLEL
- CALL wrf_dm_maxval(grid%max_vert_cfl, idex, jdex)
- #endif
- WRITE(wrf_err_message,*)'variable dt, max horiz cfl, max vert cfl: ',&
- grid%dt, grid%max_horiz_cfl, grid%max_vert_cfl
- CALL wrf_debug ( 0 , wrf_err_message )
- ENDIF
- grid%max_cfl_val = 0
- grid%max_horiz_cfl = 0
- grid%max_vert_cfl = 0
- ENDIF
- ! setting bdy tendencies to zero for DFI if constant_bc = true
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- ! IF( config_flags%specified .AND. grid%dfi_opt .NE. DFI_NODFI &
- ! .AND. config_flags%constant_bc .AND. (grid%dfi_stage .EQ. DFI_BCK .OR. grid%dfi_stage .EQ. DFI_FWD) ) THEN
- IF( config_flags%specified .AND. config_flags%constant_bc ) THEN
- CALL zero_bdytend (grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
- grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
- grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
- grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
- grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
- grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
- moist_btxs,moist_btxe, &
- moist_btys,moist_btye, &
- grid%spec_bdy_width,num_3d_m, &
- ids,ide, jds,jde, kds,kde, &
- ims,ime, jms,jme, kms,kme, &
- ips,ipe, jps,jpe, kps,kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- ENDIF
- ENDDO
- !$OMP END PARALLEL DO
- !**********************************************************************
- !
- ! LET US BEGIN.......
- !
- !<DESCRIPTION>
- !<pre>
- ! (1) RK integration loop is named the "Runge_Kutta_loop:"
- !
- ! Predictor-corrector type time integration.
- ! Advection terms are evaluated at time t for the predictor step,
- ! and advection is re-evaluated with the latest predicted value for
- ! each succeeding time corrector step
- !
- ! 2nd order Runge Kutta (rk_order = 2):
- ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
- !
- ! 3rd order Runge Kutta (rk_order = 3):
- ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
- ! and step 3 is from t to dt.
- !
- ! non-timesplit physics are evaluated during first RK step and
- ! these physics tendencies are stored for use in each RK pass.
- !</pre>
- !</DESCRIPTION>
- !**********************************************************************
- Runge_Kutta_loop: DO rk_step = 1, rk_order
- ! Set the step size and number of small timesteps for
- ! each part of the timestep
- dtm = grid%dt
- IF ( rk_order == 1 ) THEN
- write(wrf_err_message,*)' leapfrog removed, error exit for dynamics_option = ',dynamics_option
- CALL wrf_error_fatal( wrf_err_message )
- ELSE IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
- IF ( rk_step == 1) THEN
- dt_rk = 0.5*grid%dt
- dts_rk = dts
- number_of_small_timesteps = num_sound_steps/2
- ELSE
- dt_rk = grid%dt
- dts_rk = dts
- number_of_small_timesteps = num_sound_steps
- ENDIF
- ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
- IF ( rk_step == 1) THEN
- dt_rk = grid%dt/3.
- dts_rk = dt_rk
- number_of_small_timesteps = 1
- ELSE IF (rk_step == 2) THEN
- dt_rk = 0.5*grid%dt
- dts_rk = dts
- number_of_small_timesteps = num_sound_steps/2
- ELSE
- dt_rk = grid%dt
- dts_rk = dts
- number_of_small_timesteps = num_sound_steps
- ENDIF
- ELSE
- write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
- CALL wrf_error_fatal( wrf_err_message )
- END IF
- ! Ensure that polar meridional velocity is zero
- IF (config_flags%polar) THEN
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- CALL zero_pole ( grid%v_1, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- CALL zero_pole ( grid%v_2, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- END DO
- !$OMP END PARALLEL DO
- END IF
- !
- ! Time level t is in the *_2 variable in the first part
- ! of the step, and in the *_1 variable after the predictor.
- ! the latest predicted values are stored in the *_2 variables.
- !
- CALL wrf_debug ( 200 , ' call rk_step_prep ' )
- BENCH_START(step_prep_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- CALL rk_step_prep ( config_flags, rk_step, &
- grid%u_2, grid%v_2, grid%w_2, grid%t_2, grid%ph_2, grid%mu_2, &
- moist, &
- grid%ru, grid%rv, grid%rw, grid%ww, grid%php, grid%alt, grid%muu, grid%muv, &
- grid%mub, grid%mut, grid%phb, grid%pb, grid%p, grid%al, grid%alb, &
- cqu, cqv, cqw, &
- grid%msfux, grid%msfuy, grid%msfvx, grid%msfvx_inv, &
- grid%msfvy, grid%msftx, grid%msfty, &
- grid%fnm, grid%fnp, grid%dnw, grid%rdx, grid%rdy, &
- num_3d_m, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- END DO
- !$OMP END PARALLEL DO
- BENCH_END(step_prep_tim)
- #ifdef DM_PARALLEL
- !-----------------------------------------------------------------------
- ! Stencils for patch communications (WCS, 29 June 2001)
- ! Note: the small size of this halo exchange reflects the
- ! fact that we are carrying the uncoupled variables
- ! as state variables in the mass coordinate model, as
- ! opposed to the coupled variables as in the height
- ! coordinate model.
- !
- ! * * * * *
- ! * * * * * * * * *
- ! * + * * + * * * + * *
- ! * * * * * * * * *
- ! * * * * *
- !
- ! 3D variables - note staggering! ru(X), rv(Y), ww(Z), php(Z)
- !
- ! ru x
- ! rv x
- ! ww x
- ! php x
- ! alt x
- ! ph_2 x
- ! phb x
- !
- ! the following are 2D (xy) variables
- !
- ! muu x
- ! muv x
- ! mut x
- !--------------------------------------------------------------
- # include "HALO_EM_A.inc"
- #endif
- ! set boundary conditions on variables
- ! from big_step_prep for use in big_step_proc
- #ifdef DM_PARALLEL
- # include "PERIOD_BDY_EM_A.inc"
- #endif
- BENCH_START(set_phys_bc_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij, ii, jj, kk )
- DO ij = 1 , grid%num_tiles
- CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
- CALL rk_phys_bc_dry_1( config_flags, grid%ru, grid%rv, grid%rw, grid%ww, &
- grid%muu, grid%muv, grid%mut, grid%php, grid%alt, grid%p, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- CALL set_physical_bc3d( grid%al, 'p', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%ph_2, 'w', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- IF (config_flags%polar) THEN
- !-------------------------------------------------------
- ! lat-lon grid pole-point (v) specification (extrapolate v, rv to the pole)
- !-------------------------------------------------------
- CALL pole_point_bc ( grid%v_1, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
-
- CALL pole_point_bc ( grid%v_2, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
-
- !-------------------------------------------------------
- ! end lat-lon grid pole-point (v) specification
- !-------------------------------------------------------
- ENDIF
- END DO
- !$OMP END PARALLEL DO
- BENCH_END(set_phys_bc_tim)
- rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
- !<DESCRIPTION>
- !<pre>
- !(2) The non-timesplit physics begins with a call to "phy_prep"
- ! (which computes some diagnostic variables such as temperature,
- ! pressure, u and v at p points, etc). This is followed by
- ! calls to the physics drivers:
- !
- ! radiation,
- ! surface,
- ! pbl,
- ! cumulus,
- ! fddagd,
- ! 3D TKE and mixing.
- !<pre>
- !</DESCRIPTION>
- CALL first_rk_step_part1 ( grid, config_flags &
- , moist , moist_tend &
- , chem , chem_tend &
- , tracer, tracer_tend &
- , scalar , scalar_tend &
- , fdda3d, fdda2d &
- , ru_tendf, rv_tendf &
- , rw_tendf, t_tendf &
- , ph_tendf, mu_tendf &
- , tke_tend &
- , config_flags%use_adaptive_time_step &
- , curr_secs &
- , psim , psih , wspd , gz1oz0 &
- , br , chklowq &
- , cu_act_flag , hol , th_phy &
- , pi_phy , p_phy , grid%t_phy &
- , u_phy , v_phy &
- , dz8w , p8w , t8w , rho_phy , rho &
- , ids, ide, jds, jde, kds, kde &
- , ims, ime, jms, jme, kms, kme &
- , ips, ipe, jps, jpe, kps, kpe &
- , imsx, imex, jmsx, jmex, kmsx, kmex &
- , ipsx, ipex, jpsx, jpex, kpsx, kpex &
- , imsy, imey, jmsy, jmey, kmsy, kmey &
- , ipsy, ipey, jpsy, jpey, kpsy, kpey &
- , k_start , k_end &
- , f_flux &
- )
- #ifdef DM_PARALLEL
- IF ( config_flags%bl_pbl_physics == MYNNPBLSCHEME2 .OR. &
- config_flags%bl_pbl_physics == MYNNPBLSCHEME3 ) THEN
- # include "HALO_EM_SCALAR_E_5.inc"
- ENDIF
- #endif
- CALL first_rk_step_part2 ( grid, config_flags &
- , moist , moist_tend &
- , chem , chem_tend &
- , tracer, tracer_tend &
- , scalar , scalar_tend &
- , fdda3d, fdda2d &
- , ru_tendf, rv_tendf &
- , rw_tendf, t_tendf &
- , ph_tendf, mu_tendf &
- , tke_tend &
- , adapt_step_flag , curr_secs &
- , psim , psih , wspd , gz1oz0 &
- , br , chklowq &
- , cu_act_flag , hol , th_phy &
- , pi_phy , p_phy , grid%t_phy &
- , u_phy , v_phy &
- , dz8w , p8w , t8w , rho_phy , rho &
- , nba_mij, num_nba_mij & !JDM
- , nba_rij, num_nba_rij & !JDM
- , ids, ide, jds, jde, kds, kde &
- , ims, ime, jms, jme, kms, kme &
- , ips, ipe, jps, jpe, kps, kpe &
- , imsx, imex, jmsx, jmex, kmsx, kmex &
- , ipsx, ipex, jpsx, jpex, kpsx, kpex &
- , imsy, imey, jmsy, jmey, kmsy, kmey &
- , ipsy, ipey, jpsy, jpey, kpsy, kpey &
- , k_start , k_end &
- )
- END IF rk_step_is_one
- BENCH_START(rk_tend_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- CALL wrf_debug ( 200 , ' call rk_tendency' )
- CALL rk_tendency ( config_flags, rk_step &
- ,grid%ru_tend, grid%rv_tend, rw_tend, ph_tend, t_tend &
- ,ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf &
- ,mu_tend, grid%u_save, grid%v_save, w_save, ph_save &
- ,grid%t_save, mu_save, grid%rthften &
- ,grid%ru, grid%rv, grid%rw, grid%ww &
- ,grid%u_2, grid%v_2, grid%w_2, grid%t_2, grid%ph_2 &
- ,grid%u_1, grid%v_1, grid%w_1, grid%t_1, grid%ph_1 &
- ,grid%h_diabatic, grid%phb, grid%t_init &
- ,grid%mu_2, grid%mut, grid%muu, grid%muv, grid%mub &
- ,grid%al, grid%alt, grid%p, grid%pb, grid%php, cqu, cqv, cqw &
- ,grid%u_base, grid%v_base, grid%t_base, grid%qv_base, grid%z_base &
- ,grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv &
- ,grid%msfvy, grid%msftx,grid%msfty, grid%clat, grid%f, grid%e, grid%sina, grid%cosa &
- ,grid%fnm, grid%fnp, grid%rdn, grid%rdnw &
- ,grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmh, grid%xkhh &
- ,grid%diff_6th_opt, grid%diff_6th_factor &
- ,config_flags%momentum_adv_opt &
- ,grid%dampcoef,grid%zdamp,config_flags%damp_opt,config_flags%rad_nudge &
- ,grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m &
- ,config_flags%non_hydrostatic, config_flags%top_lid &
- ,grid%u_frame, grid%v_frame &
- ,ids, ide, jds, jde, kds, kde &
- ,ims, ime, jms, jme, kms, kme &
- ,grid%i_start(ij), grid%i_end(ij) &
- ,grid%j_start(ij), grid%j_end(ij) &
- ,k_start, k_end &
- ,max_vert_cfl_tmp(ij), max_horiz_cfl_tmp(ij) )
- END DO
- !$OMP END PARALLEL DO
- BENCH_END(rk_tend_tim)
- IF (config_flags%use_adaptive_time_step) THEN
- DO ij = 1 , grid%num_tiles
- IF (max_horiz_cfl_tmp(ij) .GT. grid%max_horiz_cfl) THEN
- grid%max_horiz_cfl = max_horiz_cfl_tmp(ij)
- ENDIF
- IF (max_vert_cfl_tmp(ij) .GT. grid%max_vert_cfl) THEN
- grid%max_vert_cfl = max_vert_cfl_tmp(ij)
- ENDIF
- END DO
-
- IF (grid%max_horiz_cfl .GT. grid%max_cfl_val) THEN
- grid%max_cfl_val = grid%max_horiz_cfl
- ENDIF
- IF (grid%max_vert_cfl .GT. grid%max_cfl_val) THEN
- grid%max_cfl_val = grid%max_vert_cfl
- ENDIF
- ENDIF
- BENCH_START(relax_bdy_dry_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
- CALL relax_bdy_dry ( config_flags, &
- grid%u_save, grid%v_save, ph_save, grid%t_save, &
- w_save, mu_tend, &
- grid%ru, grid%rv, grid%ph_2, grid%t_2, &
- grid%w_2, grid%mu_2, grid%mut, &
- grid%u_bxs,grid%u_bxe,grid%u_bys,grid%u_bye, &
- grid%v_bxs,grid%v_bxe,grid%v_bys,grid%v_bye, &
- grid%ph_bxs,grid%ph_bxe,grid%ph_bys,grid%ph_bye, &
- grid%t_bxs,grid%t_bxe,grid%t_bys,grid%t_bye, &
- grid%w_bxs,grid%w_bxe,grid%w_bys,grid%w_bye, &
- grid%mu_bxs,grid%mu_bxe,grid%mu_bys,grid%mu_bye, &
- grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
- grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
- grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
- grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
- grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
- grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
- config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
- grid%dtbc, grid%fcx, grid%gcx, &
- ids,ide, jds,jde, kds,kde, &
- ims,ime, jms,jme, kms,kme, &
- ips,ipe, jps,jpe, kps,kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- ENDIF
- CALL rk_addtend_dry( grid%ru_tend, grid%rv_tend, rw_tend, ph_tend, t_tend, &
- ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
- grid%u_save, grid%v_save, w_save, ph_save, grid%t_save, &
- mu_tend, mu_tendf, rk_step, &
- grid%h_diabatic, grid%mut, grid%msftx, &
- grid%msfty, grid%msfux,grid%msfuy, &
- grid%msfvx, grid%msfvx_inv, grid%msfvy, &
- ids,ide, jds,jde, kds,kde, &
- ims,ime, jms,jme, kms,kme, &
- ips,ipe, jps,jpe, kps,kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
- IF( config_flags%specified .or. config_flags%nested ) THEN
- CALL spec_bdy_dry ( config_flags, &
- grid%ru_tend, grid%rv_tend, ph_tend, t_tend, &
- rw_tend, mu_tend, &
- grid%u_bxs,grid%u_bxe,grid%u_bys,grid%u_bye, &
- grid%v_bxs,grid%v_bxe,grid%v_bys,grid%v_bye, &
- grid%ph_bxs,grid%ph_bxe,grid%ph_bys,grid%ph_bye, &
- grid%t_bxs,grid%t_bxe,grid%t_bys,grid%t_bye, &
- grid%w_bxs,grid%w_bxe,grid%w_bys,grid%w_bye, &
- grid%mu_bxs,grid%mu_bxe,grid%mu_bys,grid%mu_bye, &
- grid%u_btxs,grid%u_btxe,grid%u_btys,grid%u_btye, &
- grid%v_btxs,grid%v_btxe,grid%v_btys,grid%v_btye, &
- grid%ph_btxs,grid%ph_btxe,grid%ph_btys,grid%ph_btye, &
- grid%t_btxs,grid%t_btxe,grid%t_btys,grid%t_btye, &
- grid%w_btxs,grid%w_btxe,grid%w_btys,grid%w_btye, &
- grid%mu_btxs,grid%mu_btxe,grid%mu_btys,grid%mu_btye, &
- config_flags%spec_bdy_width, grid%spec_zone, &
- ids,ide, jds,jde, kds,kde, & ! domain dims
- ims,ime, jms,jme, kms,kme, & ! memory dims
- ips,ipe, jps,jpe, kps,kpe, & ! patch dims
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start, k_end )
-
- ENDIF
- END DO
- !$OMP END PARALLEL DO
- BENCH_END(relax_bdy_dry_tim)
- !<DESCRIPTION>
- !<pre>
- ! (3) Small (acoustic,sound) steps.
- !
- ! Several acoustic steps are taken each RK pass. A small step
- ! sequence begins with calculating perturbation variables
- ! and coupling them to the column dry-air-mass mu
- ! (call to small_step_prep). This is followed by computing
- ! coefficients for the vertically implicit part of the
- ! small timestep (call to calc_coef_w).
- !
- ! The small steps are taken
- ! in the named loop "small_steps:". In the small_steps loop, first
- ! the horizontal momentum (u and v) are advanced (call to advance_uv),
- ! next mu and theta are advanced (call to advance_mu_t) followed by
- ! advancing w and the geopotential (call to advance_w). Diagnostic
- ! values for pressure and inverse density are updated at the end of
- ! each small_step.
- !
- ! The small-step section ends with the change of the perturbation variables
- ! back to full variables (call to small_step_finish).
- !</pre>
- !</DESCRIPTION>
- BENCH_START(small_step_prep_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- ! Calculate coefficients for the vertically implicit acoustic/gravity wave
- ! integration. We only need calculate these for the first pass through -
- ! the predictor step. They are reused as is for the corrector step.
- ! For third-order RK, we need to recompute these after the first
- ! predictor because we may have changed the small timestep -> grid%dts.
- CALL wrf_debug ( 200 , ' call small_step_prep ' )
- CALL small_step_prep( grid%u_1,grid%u_2,grid%v_1,grid%v_2,grid%w_1,grid%w_2, &
- grid%t_1,grid%t_2,grid%ph_1,grid%ph_2, &
- grid%mub, grid%mu_1, grid%mu_2, &
- grid%muu, muus, grid%muv, muvs, &
- grid%mut, grid%muts, grid%mudf, &
- grid%u_save, grid%v_save, w_save, &
- grid%t_save, ph_save, mu_save, &
- grid%ww, ww1, &
- grid%dnw, c2a, grid%pb, grid%p, grid%alt, &
- grid%msfux,grid%msfuy, grid%msfvx, grid%msfvx_inv, &
- grid%msfvy, grid%msftx,grid%msfty, &
- grid%rdx, grid%rdy, rk_step, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
-
- CALL calc_p_rho( grid%al, grid%p, grid%ph_2, &
- grid%alt, grid%t_2, grid%t_save, c2a, pm1, &
- grid%mu_2, grid%muts, grid%znu, t0, &
- grid%rdnw, grid%dnw, grid%smdiv, &
- config_flags%non_hydrostatic, 0, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- IF (config_flags%non_hydrostatic) THEN
- CALL calc_coef_w( a,alpha,gamma, &
- grid%mut, cqw, &
- grid%rdn, grid%rdnw, c2a, &
- dts_rk, g, grid%epssm, &
- config_flags%top_lid, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- ENDIF
- ENDDO
- !$OMP END PARALLEL DO
- BENCH_END(small_step_prep_tim)
- #ifdef DM_PARALLEL
- !-----------------------------------------------------------------------
- ! Stencils for patch communications (WCS, 29 June 2001)
- ! Note: the small size of this halo exchange reflects the
- ! fact that we are carrying the uncoupled variables
- ! as state variables in the mass coordinate model, as
- ! opposed to the coupled variables as in the height
- ! coordinate model.
- !
- ! * * * * *
- ! * * * * * * * * *
- ! * + * * + * * * + * *
- ! * * * * * * * * *
- ! * * * * *
- !
- ! 3D variables - note staggering! ph_2(Z), u_save(X), v_save(Y)
- !
- ! ph_2 x
- ! al x
- ! p x
- ! t_1 x
- ! t_save x
- ! u_save x
- ! v_save x
- !
- ! the following are 2D (xy) variables
- !
- ! mu_1 x
- ! mu_2 x
- ! mudf x
- ! php x
- ! alt x
- ! pb x
- !--------------------------------------------------------------
- # include "HALO_EM_B.inc"
- # include "PERIOD_BDY_EM_B.inc"
- #endif
- BENCH_START(set_phys_bc2_tim)
- !$OMP PARALLEL DO &
- !$OMP PRIVATE ( ij )
- DO ij = 1 , grid%num_tiles
- CALL set_physical_bc3d( grid%ru_tend, 'u', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%rv_tend, 'v', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%ph_2, 'w', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%al, 'p', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%p, 'p', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%t_1, 'p', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc3d( grid%t_save, 't', config_flags, &
- ids, ide, jds, jde, kds, kde, &
- ims, ime, jms, jme, kms, kme, &
- ips, ipe, jps, jpe, kps, kpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij), &
- k_start , k_end )
- CALL set_physical_bc2d( grid%mu_1, 't', config_flags, &
- ids, ide, jds, jde, &
- ims, ime, jms, jme, &
- ips, ipe, jps, jpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij) )
- CALL set_physical_bc2d( grid%mu_2, 't', config_flags, &
- ids, ide, jds, jde, &
- ims, ime, jms, jme, &
- ips, ipe, jps, jpe, &
- grid%i_start(ij), grid%i_end(ij), &
- grid%j_start(ij), grid%j_end(ij) )
- CALL set_physical_bc2d( grid%mudf, 't', config_flags, &
- ids, ide, jds, jde, …
Large files files are truncated, but you can click here to view the full file