diff --git a/GFDL_tools/fv_ada_nudge.F90 b/GFDL_tools/fv_ada_nudge.F90 index 414737a3c..72b04da6c 100644 --- a/GFDL_tools/fv_ada_nudge.F90 +++ b/GFDL_tools/fv_ada_nudge.F90 @@ -41,7 +41,6 @@ module fv_ada_nudge_mod use fms_mod, only: write_version_number, check_nml_error use mpp_mod, only: mpp_error, FATAL, stdlog, get_unit, mpp_pe, input_nml_file use mpp_mod, only: mpp_root_pe, stdout ! snz - use mpp_mod, only: mpp_clock_id, mpp_clock_begin, mpp_clock_end use mpp_mod, only: CLOCK_COMPONENT, CLOCK_SUBCOMPONENT, CLOCK_MODULE, CLOCK_ROUTINE use mpp_domains_mod, only: mpp_update_domains, domain2d use mpp_domains_mod, only: mpp_get_data_domain ! snz @@ -233,7 +232,6 @@ module fv_ada_nudge_mod integer :: id_u_adj, id_v_adj, id_t_adj, id_q_adj, id_ps_adj ! snz integer :: id_u_a, id_v_a, id_t_a, id_q_a, id_ps_a ! snz integer :: id_u_da, id_v_da, id_t_da, id_q_da, id_ps_da ! snz - integer :: id_ada type(FmsNetcdfDomainFile_t) :: ada_driver_restart ! snz character(len=*), parameter :: restart_file="INPUT/ada_driver.res.nc" ! snz @@ -439,7 +437,6 @@ subroutine fv_ada_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt ! *t_obs* is virtual temperature #ifdef ENABLE_ADA ! snz - call mpp_clock_begin(id_ada) ! call get_time (time, seconds, days) if (mod(seconds, 21600) == 0) then @@ -662,7 +659,6 @@ subroutine fv_ada_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt call mpp_update_domains(pt(:,:,:), domain, complete=.true.) end if - call mpp_clock_end(id_ada) #endif ! snz @@ -3801,9 +3797,9 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) enddo enddo enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(q, domain, complete=.true.) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') do n=1,ntimes diff --git a/README.md b/README.md index 3f77c8d2e..578bfe421 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # GFDL_atmos_cubed_sphere -The source contained herein reflects the 202210 release of the Finite Volume Cubed-Sphere Dynamical Core (FV3) from GFDL +The source contained herein reflects the 202305 release of the Finite Volume Cubed-Sphere Dynamical Core (FV3) from GFDL The GFDL Microphysics is also available within this repository. diff --git a/RELEASE.md b/RELEASE.md index 3dd4f450a..f07465102 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,45 @@ +# RELEASE NOTES for FV3 202305: Summary +FV3-202305-public --- May 2023 +Lucas Harris, GFDL lucas.harris@noaa.gov + +This version has been tested with SHiELD physics release 202305 +and with FMS release 2023.01 from https://github.com/NOAA-GFDL/FMS + +This release includes the following: +- Revised Vertical Remapping Operators (Lucas) + - kord=10 reverted back to AM4 version. + - Post-AM4 version of kord=10 is now kord=12. + - do_am4_remap no longer does anything and is deprecated. + - New strictly-monotone operators kord=14, 15 for improving tracer correlations, and kord=13 without subgrid limiting. + - kord <= 7 now deprecated; may be removed in a future release. +- New Test Cases: (Joseph, Kun, Lucas) + - Idealized TC test case with SHiELD physics + - Zurita-Gotor et al. 2022 Held-Suarez variant + - New Stable Boundary Layer (Beale at al.) doubly-periodic test case +- New nesting updates: (Joseph) + - Enable nesting in solo core and add a new idealized test case (58) + - Enable adding multiple nests in doubly-periodic test cases using absolute coordinates +- Additional idealized capability (Linjiong, Kun, Lucas) + - Added namelist variable is_ideal_case, which must be used for runs starting (or re-starting) from idealized states. + - Begin saving the initial wind fields (u0 and v0) to the restart files +- GFDL MP and Integrated Physics (Linjiong): + - Added options to sub-cycling condensation evaporation (nconds), control timescale or evaporation (do_evap_timescale), and delay condensation and evaporation (delay_cond_evap) + - Removed unused 3d microphysics diagnostics to save time and memory + - Optimized the mpp domain updates for fast physics + - Update gfdl_mp_nml reading code to avoid model crash for absent gfdl_mp_nml + - Added an option (do_intermediate_phys) to disable intermediate phys + - Removed grid size in GFDL MP energy and mass calculation + - Updates to use dry_cp instead of moist_cp in a hydrostatic case +- Added a function to use O3 data from IFS ICs (Jan-Huey) + - Namelist parameter: “use_gfsO3” with the default value = “false” + - This function only works when ecmwf_ic = T + - If the IFS IC does not include O3 data, or the run would like to use GFS O3 with other IFS ICs, set use_gfsO3 = T +- Solver Updates (Lucas) + - Revised semi-implicit solver to partially linearize vertical sound wave propagation about the hydrostatic state. This removes a specific instability causing deep “columnar” modes in the vertical velocity field due to the equation for the pressure perturbation being updated partially forward-in-time. This removes the spurious modes, reduces vertical velocities, and makes the solver slightly more stable. + - MPI bug fix for tracer diffusion + - Fast Rayleigh Damping on w controlled by fast_tau_w_sec. + + # RELEASE NOTES for FV3 202210: Summary FV3-202210-public --- October 2022 Lucas Harris, GFDL lucas.harris@noaa.gov diff --git a/driver/GFDL/atmosphere.F90 b/driver/GFDL/atmosphere.F90 index 800a8359b..9a79bb95c 100644 --- a/driver/GFDL/atmosphere.F90 +++ b/driver/GFDL/atmosphere.F90 @@ -75,7 +75,7 @@ module atmosphere_mod use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height, Mw_air!_3d use fv_cmip_diag_mod, only: fv_cmip_diag_init, fv_cmip_diag, fv_cmip_diag_end use fv_restart_mod, only: fv_restart, fv_write_restart -use fv_timing_mod, only: timing_on, timing_off +use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use fv_mp_mod, only: is_master use fv_sg_mod, only: fv_subgrid_z use fv_update_phys_mod, only: fv_update_phys @@ -137,7 +137,7 @@ module atmosphere_mod integer :: isd, ied, jsd, jed integer :: nq ! transported tracers integer :: sec, seconds, days - integer :: id_dynam, id_fv_diag, id_subgridz + integer :: id_dycore, id_fv_diag, id_update, id_dynam, id_subgrid logical :: cold_start = .false. ! read in initial condition integer, dimension(:), allocatable :: id_tracerdt_dyn, id_tracer, id_tracerdt_dyn_dp @@ -199,12 +199,15 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) integer :: nlunit = 9999 character (len = 64) :: fn_nml = 'input.nml' + call timing_init + call timing_on('ATMOS_TOTAL') + call timing_on('ATMOS_INIT') + !For regional a_step = 0 current_time_in_seconds = time_type_to_real( Time - Time_init ) if (mpp_pe() == 0) write(0,"('atmosphere_init: current_time_seconds = ',f9.1)")current_time_in_seconds - call timing_on('ATMOS_INIT') allocate(pelist(mpp_npes())) call mpp_get_current_pelist(pelist) @@ -313,8 +316,10 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) call gfdl_mp_init(input_nml_file, stdlog(), Atm(mygrid)%flagstruct%hydrostatic) endif + call timing_on('FV_RESTART') call fv_restart(Atm(mygrid)%domain, Atm, dt_atmos, seconds, days, cold_start, & Atm(mygrid)%gridstruct%grid_type, mygrid) + call timing_off('FV_RESTART') fv_time = Time @@ -344,9 +349,12 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) ! --- initialize clocks for dynamics, physics_down and physics_up - id_dynam = mpp_clock_id ('FV dy-core', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_subgridz = mpp_clock_id ('FV subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_dycore = mpp_clock_id ('---FV Dycore',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_update = mpp_clock_id ('---FV Update',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_fv_diag = mpp_clock_id ('---FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_dynam = mpp_clock_id ('----FV Dynamics',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_subgrid = mpp_clock_id ('----FV Subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + !---- initialize cmip diagnostic output ---- call atmos_cmip_diag_init ( Atm(mygrid)%ak, Atm(mygrid)%bk, pref(1,1), Atm(mygrid)%atmos_axes, Time ) call atmos_global_diag_init ( Atm(mygrid)%atmos_axes, Atm(mygrid)%gridstruct%area(isc:iec,jsc:jec) ) @@ -488,12 +496,7 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) if (cld_amt > size(qtend,4)) id_qadt_dyn = 0 !miz -! --- initialize clocks for dynamics, physics_down and physics_up - id_dynam = mpp_clock_id ('FV dy-core', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_subgridz = mpp_clock_id ('FV subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - - call timing_off('ATMOS_INIT') + call timing_off('ATMOS_INIT') end subroutine atmosphere_init @@ -565,6 +568,9 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) real :: rdt !---- Call FV dynamics ----- + call timing_on('ATMOS_DYNAMICS') + + call mpp_clock_begin (id_dycore) call mpp_clock_begin (id_dynam) Surf_diff%ddp_dyn(:,:,:) = Atm(mygrid)%delp(isc:iec, jsc:jec, :) @@ -615,15 +621,16 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) do psc=1,abs(p_split) p_step = psc - call timing_on('fv_dynamics') + + call timing_on('FV_DYNAMICS') !uc/vc only need be same on coarse grid? However BCs do need to be the same call fv_dynamics(npx, npy, npz, nq, Atm(n)%ng, dt_atmos/real(abs(p_split)),& Atm(n)%flagstruct%consv_te, Atm(n)%flagstruct%fill, & Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir,& Atm(n)%ptop, Atm(n)%ks, nq, & Atm(n)%flagstruct%n_split, Atm(n)%flagstruct%q_split,& - Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, & - Atm(n)%flagstruct%hydrostatic, & + Atm(n)%u0, Atm(n)%v0, Atm(n)%u, Atm(n)%v, Atm(n)%w, & + Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, & Atm(n)%pkz, Atm(n)%phis, Atm(n)%q_con, & @@ -635,7 +642,7 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) Atm(n)%neststruct, Atm(n)%idiag, Atm(n)%bd, & Atm(n)%parent_grid, Atm(n)%domain, Atm(n)%inline_mp, & Atm(n)%diss_est) - call timing_off('fv_dynamics') + call timing_off('FV_DYNAMICS') if (ngrids > 1 .and. (psc < p_split .or. p_split < 0)) then call mpp_sync() @@ -645,7 +652,6 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) endif end do !p_split - call mpp_clock_end (id_dynam) Surf_diff%ddp_dyn(:,:,:) =(Atm(mygrid)%delp(isc:iec,jsc:jec,:)-Surf_diff%ddp_dyn(:,:,:))/dt_atmos Surf_diff%tdt_dyn(:,:,:) =(Atm(mygrid)%pt(isc:iec,jsc:jec,:) -Surf_diff%tdt_dyn(:,:,:))/dt_atmos @@ -696,11 +702,16 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) enddo + call mpp_clock_end (id_dynam) + call mpp_clock_begin (id_subgrid) + !----------------------------------------------------- !--- COMPUTE SUBGRID Z !----------------------------------------------------- !--- zero out tendencies - call mpp_clock_begin (id_subgridz) + + call timing_on('FV_SUBGRID_Z') + u_dt(:,:,:) = 0. ! These are updated by fv_subgrid_z v_dt(:,:,:) = 0. ! t_dt is used for two different purposes: @@ -772,7 +783,12 @@ subroutine atmosphere_dynamics ( Time, Surf_diff ) ! zero out t_dt for use as an accumulator t_dt = 0. - call mpp_clock_end (id_subgridz) + call timing_off('FV_SUBGRID_Z') + + call mpp_clock_end (id_subgrid) + call mpp_clock_end (id_dycore) + + call timing_off('ATMOS_DYNAMICS') end subroutine atmosphere_dynamics @@ -781,6 +797,8 @@ subroutine atmosphere_end (Time, Grid_box ) type (time_type), intent(in) :: Time type(grid_box_type), intent(inout) :: Grid_box + call timing_on('ATMOS_END') + !--- end nudging module --- #if defined (ATMOS_NUDGE) if ( Atm(mygrid)%flagstruct%nudge ) call atmos_nudge_end @@ -796,16 +814,20 @@ subroutine atmosphere_end (Time, Grid_box ) call gfdl_mp_end ( ) endif - call timing_on('FV_DIAG') + call timing_on('FV_DIAG') call atmos_global_diag_end call fv_cmip_diag_end call fv_end(Atm, mygrid) - call timing_off('FV_DIAG') + call timing_off('FV_DIAG') deallocate ( Atm ) deallocate ( u_dt, v_dt, t_dt, qv_dt, q_dt, pref, dum1d ) deallocate ( is_vmr ) + call timing_off('ATMOS_END') + call timing_off('ATMOS_TOTAL') + call timing_prt( mpp_pe() ) + end subroutine atmosphere_end @@ -1091,6 +1113,10 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) integer :: nb, ibs, ibe, jbs, jbe real :: rcp + call timing_on('ATMOS_UPDATE') + + call mpp_clock_begin (id_update) + Time_prev = Time Time_next = Time + Time_step_atmos @@ -1142,8 +1168,7 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) endif #endif - call mpp_clock_begin (id_dynam) - call timing_on('FV_UPDATE_PHYS') + call timing_on('FV_UPDATE_PHYS') call fv_update_phys( dt_atmos, isc, iec, jsc, jec, isd, ied, jsd, jed, Atm(n)%ng, nt_dyn, & Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delp, Atm(n)%pt, & Atm(n)%q, Atm(n)%qdiag, & @@ -1156,8 +1181,7 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct, & Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, & Atm(n)%ptop, Atm(n)%phys_diag, Atm(n)%nudge_diag, q_dt) - call timing_off('FV_UPDATE_PHYS') - call mpp_clock_end (id_dynam) + call timing_off('FV_UPDATE_PHYS') !--- nesting update after updating atmospheric variables with !--- physics tendencies @@ -1173,24 +1197,30 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) if (query_cmip_diag_id(ID_tnhus)) & used = send_cmip_data_3d (ID_tnhus, (Atm(mygrid)%q(isc:iec,jsc:jec,:,sphum)-qtend(:,:,:,sphum))/dt_atmos, Time) + call mpp_clock_end (id_update) + + call mpp_clock_begin(id_fv_diag) + !---- diagnostics for FV dynamics ----- if (Atm(mygrid)%flagstruct%print_freq /= -99) then - call mpp_clock_begin(id_fv_diag) - call timing_on('FV_DIAG') fv_time = Time_next call get_time (fv_time, seconds, days) + call timing_on('FV_DIAG') call fv_diag(Atm(mygrid:mygrid), zvir, fv_time, Atm(mygrid)%flagstruct%print_freq) if (Atm(mygrid)%coarse_graining%write_coarse_diagnostics) then call fv_coarse_diag(Atm(mygrid:mygrid), fv_time, zvir) endif call fv_cmip_diag(Atm(mygrid:mygrid), zvir, fv_time) - call timing_off('FV_DIAG') - call mpp_clock_end(id_fv_diag) + endif + call mpp_clock_end(id_fv_diag) + + call timing_off('ATMOS_UPDATE') + end subroutine atmosphere_state_update @@ -1234,7 +1264,6 @@ subroutine adiabatic_init(zvir,nudge_dz) jsd = jsc - ngc jed = jec + ngc - call timing_on('adiabatic_init') do_adiabatic_init = .true. allocate ( u0(isc:iec, jsc:jec+1, npz) ) @@ -1285,8 +1314,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1299,8 +1328,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1371,8 +1400,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1385,8 +1414,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1436,7 +1465,6 @@ subroutine adiabatic_init(zvir,nudge_dz) if ( allocated(dz0) ) deallocate ( dz0 ) do_adiabatic_init = .false. - call timing_off('adiabatic_init') end subroutine adiabatic_init diff --git a/driver/SHiELD/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 index e6efa824f..eb4c19474 100644 --- a/driver/SHiELD/atmosphere.F90 +++ b/driver/SHiELD/atmosphere.F90 @@ -70,7 +70,7 @@ module atmosphere_mod use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height use fv_nggps_diags_mod, only: fv_nggps_diag_init, fv_nggps_diag use fv_restart_mod, only: fv_restart, fv_write_restart -use fv_timing_mod, only: timing_on, timing_off +use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use fv_mp_mod, only: is_master use fv_sg_mod, only: fv_subgrid_z use fv_update_phys_mod, only: fv_update_phys @@ -132,7 +132,7 @@ module atmosphere_mod integer :: isd, ied, jsd, jed integer :: nq ! transported tracers integer :: sec, seconds, days - integer :: id_dynam, id_fv_diag, id_subgridz + integer :: id_dycore, id_fv_diag, id_update, id_dynam, id_subgrid logical :: cold_start = .false. ! read in initial condition integer, dimension(:), allocatable :: id_tracerdt_dyn @@ -173,12 +173,15 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area, IAU_Data integer :: nlunit = 9999 character (len = 64) :: fn_nml = 'input.nml' + call timing_init + call timing_on('ATMOS_TOTAL') + call timing_on('ATMOS_INIT') + !For regional a_step = 0 current_time_in_seconds = time_type_to_real( Time - Time_init ) if (mpp_pe() == 0) write(0,"('atmosphere_init: current_time_seconds = ',f9.1)")current_time_in_seconds - call timing_on('ATMOS_INIT') allocate(pelist(mpp_npes())) call mpp_get_current_pelist(pelist) @@ -285,8 +288,10 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area, IAU_Data call gfdl_mp_init(input_nml_file, stdlog(), Atm(mygrid)%flagstruct%hydrostatic) + call timing_on('FV_RESTART') call fv_restart(Atm(mygrid)%domain, Atm, dt_atmos, seconds, days, cold_start, & Atm(mygrid)%gridstruct%grid_type, mygrid) + call timing_off('FV_RESTART') fv_time = Time @@ -323,11 +328,11 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area, IAU_Data call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) ! --- initialize clocks for dynamics, physics_down and physics_up - id_dynam = mpp_clock_id ('FV dy-core', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_subgridz = mpp_clock_id ('FV subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - - call timing_off('ATMOS_INIT') + id_dycore = mpp_clock_id ('---FV Dycore',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_update = mpp_clock_id ('---FV Update',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_fv_diag = mpp_clock_id ('---FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_dynam = mpp_clock_id ('----FV Dynamics',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_subgrid = mpp_clock_id ('----FV Subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) ! --- initiate the start for a restarted regional forecast if ( Atm(mygrid)%gridstruct%regional .and. Atm(mygrid)%flagstruct%warm_start ) then @@ -372,6 +377,8 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area, IAU_Data call data_override_init(Atm_domain_in = Atm(mygrid)%domain) endif + call timing_off('ATMOS_INIT') + end subroutine atmosphere_init @@ -440,8 +447,12 @@ subroutine atmosphere_dynamics ( Time ) type(time_type) :: atmos_time integer :: atmos_time_step real :: rdt + real :: time_total !---- Call FV dynamics ----- + call timing_on('ATMOS_DYNAMICS') + + call mpp_clock_begin (id_dycore) call mpp_clock_begin (id_dynam) n = mygrid @@ -466,15 +477,16 @@ subroutine atmosphere_dynamics ( Time ) do psc=1,abs(p_split) p_step = psc - call timing_on('fv_dynamics') + + call timing_on('FV_DYNAMICS') !uc/vc only need be same on coarse grid? However BCs do need to be the same call fv_dynamics(npx, npy, npz, nq, Atm(n)%ng, dt_atmos/real(abs(p_split)),& Atm(n)%flagstruct%consv_te, Atm(n)%flagstruct%fill, & Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir,& Atm(n)%ptop, Atm(n)%ks, nq, & Atm(n)%flagstruct%n_split, Atm(n)%flagstruct%q_split,& - Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, & - Atm(n)%flagstruct%hydrostatic, & + Atm(n)%u0, Atm(n)%v0, Atm(n)%u, Atm(n)%v, Atm(n)%w, & + Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, & Atm(n)%pkz, Atm(n)%phis, Atm(n)%q_con, & @@ -485,8 +497,9 @@ subroutine atmosphere_dynamics ( Time ) Atm(n)%gridstruct, Atm(n)%flagstruct, & Atm(n)%neststruct, Atm(n)%idiag, Atm(n)%bd, & Atm(n)%parent_grid, Atm(n)%domain, Atm(n)%inline_mp, & - Atm(n)%diss_est) - call timing_off('fv_dynamics') + Atm(n)%diss_est,time_total=time_total) + + call timing_off('FV_DYNAMICS') if (ngrids > 1 .and. (psc < p_split .or. p_split < 0)) then call mpp_sync() @@ -511,16 +524,18 @@ subroutine atmosphere_dynamics ( Time ) Atm(n)%flagstruct%nf_omega, & Atm(n)%bd) endif - endif - + endif - call mpp_clock_end (id_dynam) + call mpp_clock_end (id_dynam) + call mpp_clock_begin (id_subgrid) !----------------------------------------------------- !--- COMPUTE SUBGRID Z !----------------------------------------------------- !--- zero out tendencies - call mpp_clock_begin (id_subgridz) + + call timing_on('FV_SUBGRID_Z') + u_dt(:,:,:) = 0. ! These are updated by fv_subgrid_z v_dt(:,:,:) = 0. ! t_dt is used for two different purposes: @@ -576,7 +591,12 @@ subroutine atmosphere_dynamics ( Time ) ! zero out t_dt for use as an accumulator t_dt = 0. - call mpp_clock_end (id_subgridz) + call timing_off('FV_SUBGRID_Z') + + call mpp_clock_end (id_subgrid) + call mpp_clock_end (id_dycore) + + call timing_off('ATMOS_DYNAMICS') end subroutine atmosphere_dynamics @@ -587,6 +607,8 @@ subroutine atmosphere_end (Time, Grid_box )!rab, Radiation, Physics) !rab type (radiation_type), intent(inout) :: Radiation !rab type (physics_type), intent(inout) :: Physics + call timing_on('ATMOS_END') + ! initialize domains for writing global physics data if ( Atm(mygrid)%flagstruct%nudge ) call fv_nwp_nudge_end @@ -612,6 +634,10 @@ subroutine atmosphere_end (Time, Grid_box )!rab, Radiation, Physics) deallocate( u_dt, v_dt, t_dt, qv_dt, ps_dt, pref, dum1d ) + call timing_off('ATMOS_END') + call timing_off('ATMOS_TOTAL') + call timing_prt( mpp_pe() ) + end subroutine atmosphere_end @@ -657,10 +683,12 @@ subroutine atmosphere_pref (p_ref) end subroutine atmosphere_pref - subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro, tile_num) + subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro, tile_num, & + do_inline_mp, do_cosp) integer, intent(out) :: i1, i2, j1, j2, kt logical, intent(out), optional :: p_hydro, hydro integer, intent(out), optional :: tile_num + logical, intent(out), optional :: do_inline_mp, do_cosp i1 = Atm(mygrid)%bd%isc i2 = Atm(mygrid)%bd%iec j1 = Atm(mygrid)%bd%jsc @@ -670,6 +698,8 @@ subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro, tile_num if (present(p_hydro)) p_hydro = Atm(mygrid)%flagstruct%phys_hydrostatic if (present( hydro)) hydro = Atm(mygrid)%flagstruct%hydrostatic if (present(tile_num)) tile_num = Atm(mygrid)%global_tile + if (present(do_inline_mp)) do_inline_mp = Atm(mygrid)%flagstruct%do_inline_mp + if (present(do_cosp)) do_cosp = Atm(mygrid)%flagstruct%do_cosp end subroutine atmosphere_control_data @@ -1167,6 +1197,10 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) real :: tracer_clock, lat_thresh, fhr character(len=32) :: tracer_name + call timing_on('ATMOS_UPDATE') + + call mpp_clock_begin (id_update) + Time_prev = Time Time_next = Time + Time_step_atmos rdt = 1.d0 / dt_atmos @@ -1230,8 +1264,6 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) enddo endif - call timing_on('GFS_TENDENCIES') - call atmos_phys_qdt_diag(Atm(n)%q, Atm(n)%phys_diag, nt_dyn, dt_atmos, .true.) !--- put u/v tendencies into haloed arrays u_dt and v_dt @@ -1336,7 +1368,6 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) endif call atmos_phys_qdt_diag(Atm(n)%q, Atm(n)%phys_diag, nt_dyn, dt_atmos, .false.) - call timing_off('GFS_TENDENCIES') w_diff = get_tracer_index (MODEL_ATMOS, 'w_diff' ) if ( w_diff /= NO_TRACER ) then @@ -1364,8 +1395,7 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) endif #endif - call mpp_clock_begin (id_dynam) - call timing_on('FV_UPDATE_PHYS') + call timing_on('FV_UPDATE_PHYS') call fv_update_phys( dt_atmos, isc, iec, jsc, jec, isd, ied, jsd, jed, Atm(n)%ng, nt_dyn, & Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delp, Atm(n)%pt, & Atm(n)%q, Atm(n)%qdiag, & @@ -1378,8 +1408,7 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct, & Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, & Atm(n)%ptop, Atm(n)%phys_diag, Atm(n)%nudge_diag) - call timing_off('FV_UPDATE_PHYS') - call mpp_clock_end (id_dynam) + call timing_off('FV_UPDATE_PHYS') !MT surface pressure tendency (hPa/3hr) ps_dt(:,:)=(Atm(n)%ps(:,:)-ps_dt(:,:))*rdt*108. @@ -1441,9 +1470,12 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) call timing_off('TWOWAY_UPDATE') endif + call mpp_clock_end (id_update) + + call mpp_clock_begin(id_fv_diag) + !---- diagnostics for FV dynamics ----- if (Atm(mygrid)%flagstruct%print_freq /= -99) then - call mpp_clock_begin(id_fv_diag) fv_time = Time_next call get_time (fv_time, seconds, days) @@ -1456,9 +1488,12 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block) first_diag = .false. call timing_off('FV_DIAG') - call mpp_clock_end(id_fv_diag) endif + call mpp_clock_end(id_fv_diag) + + call timing_off('ATMOS_UPDATE') + end subroutine atmosphere_state_update @@ -1502,7 +1537,6 @@ subroutine adiabatic_init(zvir,nudge_dz) jsd = jsc - ngc jed = jec + ngc - call timing_on('adiabatic_init') do_adiabatic_init = .true. allocate ( u0(isc:iec, jsc:jec+1, npz) ) @@ -1553,8 +1587,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1567,8 +1601,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1639,8 +1673,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1653,8 +1687,8 @@ subroutine adiabatic_init(zvir,nudge_dz) call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & - Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & - Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u0, Atm(mygrid)%v0, Atm(mygrid)%u, & + Atm(mygrid)%v, Atm(mygrid)%w, Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & @@ -1704,7 +1738,6 @@ subroutine adiabatic_init(zvir,nudge_dz) if ( allocated(dz0) ) deallocate ( dz0 ) do_adiabatic_init = .false. - call timing_off('adiabatic_init') end subroutine adiabatic_init @@ -1776,14 +1809,16 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) IPD_Data(nb)%Statein%prei(ix) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prei(i,j))) IPD_Data(nb)%Statein%pres(ix) = _DBL_(_RL_(Atm(mygrid)%inline_mp%pres(i,j))) IPD_Data(nb)%Statein%preg(ix) = _DBL_(_RL_(Atm(mygrid)%inline_mp%preg(i,j))) - do k = 1, npz - k1 = npz+1-k ! flipping the index - IPD_Data(nb)%Statein%prefluxw(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxw(i,j,k1))) - IPD_Data(nb)%Statein%prefluxr(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxr(i,j,k1))) - IPD_Data(nb)%Statein%prefluxi(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxi(i,j,k1))) - IPD_Data(nb)%Statein%prefluxs(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxs(i,j,k1))) - IPD_Data(nb)%Statein%prefluxg(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxg(i,j,k1))) - enddo + if (Atm(mygrid)%flagstruct%do_cosp) then + do k = 1, npz + k1 = npz+1-k ! flipping the index + IPD_Data(nb)%Statein%prefluxw(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxw(i,j,k1))) + IPD_Data(nb)%Statein%prefluxr(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxr(i,j,k1))) + IPD_Data(nb)%Statein%prefluxi(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxi(i,j,k1))) + IPD_Data(nb)%Statein%prefluxs(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxs(i,j,k1))) + IPD_Data(nb)%Statein%prefluxg(ix,k) = _DBL_(_RL_(Atm(mygrid)%inline_mp%prefluxg(i,j,k1))) + enddo + endif enddo endif @@ -1898,7 +1933,6 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) enddo enddo endif - IPD_Data(nb)%Statein%dycore_hydrostatic = Atm(mygrid)%flagstruct%hydrostatic IPD_Data(nb)%Statein%nwat = Atm(mygrid)%flagstruct%nwat enddo diff --git a/driver/solo/atmosphere.F90 b/driver/solo/atmosphere.F90 index 3d2cd7526..fcc828e27 100644 --- a/driver/solo/atmosphere.F90 +++ b/driver/solo/atmosphere.F90 @@ -38,7 +38,9 @@ module atmosphere_mod use mpp_mod, only: input_nml_file use time_manager_mod, only: time_type, get_time, set_time, operator(+) use mpp_domains_mod, only: domain2d -use mpp_mod, only: input_nml_file +use mpp_mod, only: input_nml_file, mpp_sync_self, mpp_sync, & + mpp_set_current_pelist, mpp_npes, & + mpp_get_current_pelist !------------------ ! FV specific codes: !------------------ @@ -46,7 +48,7 @@ module atmosphere_mod use fv_control_mod, only: fv_control_init, fv_end, ngrids use fv_phys_mod, only: fv_phys, fv_nudge, fv_phys_init use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, eqv_pot -use fv_timing_mod, only: timing_on, timing_off +use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use fv_restart_mod, only: fv_restart use fv_dynamics_mod, only: fv_dynamics use fv_nesting_mod, only: twoway_nesting @@ -60,6 +62,7 @@ module atmosphere_mod private public atmosphere_init, atmosphere, atmosphere_end, atmosphere_domain +public mygrid !----------------------------------------------------------------------- !---- private data ---- @@ -77,7 +80,8 @@ module atmosphere_mod type(fv_atmos_type), allocatable, target :: Atm(:) logical, allocatable :: grids_on_this_pe(:) -integer :: this_grid !not used yet +integer :: mygrid = 1 !not used yet +integer, allocatable :: pelist(:) integer :: axes(4) integer:: isd, ied, jsd, jed, ngc !----------------------------------------------------------------------- @@ -104,7 +108,12 @@ subroutine atmosphere_init ( Time_init, Time, Time_step ) integer :: nlunit = 9999 character (len = 64) :: fn_nml = 'input.nml' - call timing_on('ATMOS_INIT') + call timing_init + call timing_on('ATMOS_TOTAL') + call timing_on('ATMOS_INIT') + + allocate(pelist(mpp_npes())) + call mpp_get_current_pelist(pelist) !----- write version and namelist to log file ----- call write_version_number ( 'SOLO/ATMOSPHERE_MOD', version ) @@ -118,80 +127,65 @@ subroutine atmosphere_init ( Time_init, Time, Time_step ) !----- initialize FV dynamical core ----- cold_start = (.not.file_exists('INPUT/fv_core.res.nc') .and. .not.file_exists('INPUT/fv_core.res.tile1.nc')) - call fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) ! allocates Atm components + call fv_control_init(Atm, dt_atmos, mygrid, grids_on_this_pe, p_split) ! allocates Atm components - do n=1,ngrids - if (grids_on_this_pe(n)) mytile = n - enddo + call mpp_set_current_pelist(Atm(mygrid)%pelist, no_sync=.TRUE.) - call timing_on('fv_restart') - call fv_restart(Atm(1)%domain, Atm, dt_atmos, seconds, days, cold_start, & - Atm(1)%flagstruct%grid_type, mytile) - call timing_off('fv_restart') + call timing_on('FV_RESTART') + call fv_restart(Atm(mygrid)%domain, Atm, dt_atmos, seconds, days, cold_start, & + Atm(mygrid)%flagstruct%grid_type, mygrid) + call timing_off('FV_RESTART') fv_time = time - do n=1,ngrids - - isc = Atm(n)%bd%isc - iec = Atm(n)%bd%iec - jsc = Atm(n)%bd%jsc - jec = Atm(n)%bd%jec - isd = Atm(n)%bd%isd - ied = Atm(n)%bd%ied - jsd = Atm(n)%bd%jsd - jed = Atm(n)%bd%jed - - - if ( grids_on_this_pe(n)) then - - Atm(N)%flagstruct%moist_phys = .false. ! need this for fv_diag calendar - call fv_diag_init(Atm(n:n), axes, Time, Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct%p_ref) - + isc = Atm(mygrid)%bd%isc + iec = Atm(mygrid)%bd%iec + jsc = Atm(mygrid)%bd%jsc + jec = Atm(mygrid)%bd%jec + isd = Atm(mygrid)%bd%isd + ied = Atm(mygrid)%bd%ied + jsd = Atm(mygrid)%bd%jsd + jed = Atm(mygrid)%bd%jed + + Atm(mygrid)%flagstruct%moist_phys = .false. ! need this for fv_diag calendar + call fv_diag_init(Atm(mygrid:mygrid), axes, Time, Atm(mygrid)%npx, Atm(mygrid)%npy, Atm(mygrid)%npz, Atm(mygrid)%flagstruct%p_ref) + + ! if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then + zvir = 0. + if ( Atm(mygrid)%flagstruct%adiabatic ) then + Atm(mygrid)%flagstruct%moist_phys = .false. + else + zvir = rvgas/rdgas - 1. + Atm(mygrid)%flagstruct%moist_phys = .true. + call fv_phys_init(isc,iec,jsc,jec,Atm(mygrid)%npz,Atm(mygrid)%flagstruct%nwat, Atm(mygrid)%ts, Atm(mygrid)%pt(isc:iec,jsc:jec,:), & + Time, axes, Atm(mygrid)%gridstruct%agrid(isc:iec,jsc:jec,2)) + endif + + if (.not. Atm(mygrid)%flagstruct%adiabatic) call gfdl_mp_init (input_nml_file, stdlog(), Atm(mygrid)%flagstruct%hydrostatic) + + + if ( Atm(mygrid)%flagstruct%nudge ) & + call fv_nwp_nudge_init( Time, axes, Atm(mygrid)%npz, zvir, Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%ts, & + Atm(mygrid)%phis, Atm(mygrid)%gridstruct, Atm(mygrid)%ks, Atm(mygrid)%npx, Atm(mygrid)%neststruct, Atm(mygrid)%bd) + + if ( Atm(mygrid)%flagstruct%make_nh ) then + Atm(mygrid)%w(:,:,:) = 0. endif - ! if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then - zvir = 0. - if ( Atm(n)%flagstruct%adiabatic ) then - Atm(n)%flagstruct%moist_phys = .false. - else - zvir = rvgas/rdgas - 1. - Atm(n)%flagstruct%moist_phys = .true. - if ( grids_on_this_pe(n) ) then - call fv_phys_init(isc,iec,jsc,jec,Atm(n)%npz,Atm(n)%flagstruct%nwat, Atm(n)%ts, Atm(n)%pt(isc:iec,jsc:jec,:), & - Time, axes, Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,2)) - endif + if ( Atm(mygrid)%flagstruct%na_init>0 ) then + call adiabatic_init(zvir,mygrid) endif - if (.not. Atm(n)%flagstruct%adiabatic) call gfdl_mp_init (input_nml_file, stdlog(), Atm(n)%flagstruct%hydrostatic) - - - - if ( grids_on_this_pe(n) ) then - - if ( Atm(n)%flagstruct%nudge ) & - call fv_nwp_nudge_init( Time, axes, Atm(n)%npz, zvir, Atm(n)%ak, Atm(n)%bk, Atm(n)%ts, & - Atm(n)%phis, Atm(n)%gridstruct, Atm(n)%ks, Atm(n)%npx, Atm(n)%neststruct, Atm(n)%bd) - - if ( Atm(n)%flagstruct%make_nh ) then - Atm(n)%w(:,:,:) = 0. - endif - - if ( Atm(n)%flagstruct%na_init>0 ) then - call adiabatic_init(zvir,n) - endif - - theta_d = get_tracer_index (MODEL_ATMOS, 'theta_d') - if ( theta_d > 0 ) then - call eqv_pot(Atm(n)%q(isc:iec,jsc:jec,:,theta_d), Atm(n)%pt, Atm(n)%delp, & - Atm(n)%delz, Atm(n)%peln, Atm(n)%pkz, Atm(n)%q(isd,jsd,1,1), isc, iec, jsc, jec, Atm(n)%ng, & - Atm(n)%npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys) - endif + theta_d = get_tracer_index (MODEL_ATMOS, 'theta_d') + if ( theta_d > 0 ) then + call eqv_pot(Atm(mygrid)%q(isc:iec,jsc:jec,:,theta_d), Atm(mygrid)%pt, Atm(mygrid)%delp, & + Atm(mygrid)%delz, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%q(isd,jsd,1,1), isc, iec, jsc, jec, Atm(mygrid)%ng, & + Atm(mygrid)%npz, Atm(mygrid)%flagstruct%hydrostatic, Atm(mygrid)%flagstruct%moist_phys) endif - enddo - call timing_off('ATMOS_INIT') + call timing_off('ATMOS_INIT') + end subroutine atmosphere_init subroutine adiabatic_init(zvir, n) @@ -228,7 +222,6 @@ subroutine adiabatic_init(zvir, n) jsd = jsc - ngc jed = jec + ngc - call timing_on('adiabatic_init') do_adiabatic_init = .true. allocate ( u0(isc:iec, jsc:jec+1, npz) ) @@ -265,8 +258,8 @@ subroutine adiabatic_init(zvir, n) call fv_dynamics(Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ncnst, Atm(n)%ng, dt_atmos, 0., & Atm(n)%flagstruct%fill, Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, Atm(n)%flagstruct%n_split, & - Atm(n)%flagstruct%q_split, Atm(n)%u, Atm(n)%v, Atm(n)%w, & - Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & + Atm(n)%flagstruct%q_split, Atm(n)%u0, Atm(n)%v0, Atm(n)%u, & + Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, Atm(n)%phis, & Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & @@ -279,8 +272,8 @@ subroutine adiabatic_init(zvir, n) call fv_dynamics(Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ncnst, Atm(n)%ng, -dt_atmos, 0., & Atm(n)%flagstruct%fill, Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, Atm(n)%flagstruct%n_split, & - Atm(n)%flagstruct%q_split, Atm(n)%u, Atm(n)%v, Atm(n)%w, & - Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & + Atm(n)%flagstruct%q_split, Atm(n)%u0, Atm(n)%v0, Atm(n)%u, & + Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, Atm(n)%phis, & Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & @@ -326,8 +319,8 @@ subroutine adiabatic_init(zvir, n) call fv_dynamics(Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ncnst, Atm(n)%ng, -dt_atmos, 0., & Atm(n)%flagstruct%fill, Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, Atm(n)%flagstruct%n_split, & - Atm(n)%flagstruct%q_split, Atm(n)%u, Atm(n)%v, Atm(n)%w, & - Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & + Atm(n)%flagstruct%q_split, Atm(n)%u0, Atm(n)%v0, Atm(n)%u, & + Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, Atm(n)%phis, & Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & @@ -340,8 +333,8 @@ subroutine adiabatic_init(zvir, n) call fv_dynamics(Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ncnst, Atm(n)%ng, dt_atmos, 0., & Atm(n)%flagstruct%fill, Atm(n)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, Atm(n)%flagstruct%n_split, & - Atm(n)%flagstruct%q_split, Atm(n)%u, Atm(n)%v, Atm(n)%w, & - Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & + Atm(n)%flagstruct%q_split, Atm(n)%u0, Atm(n)%v0, Atm(n)%u, & + Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%flagstruct%hydrostatic, & Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, Atm(n)%phis, & Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & @@ -391,7 +384,6 @@ subroutine adiabatic_init(zvir, n) deallocate (dp0 ) do_adiabatic_init = .false. - call timing_off('adiabatic_init') end subroutine adiabatic_init @@ -405,8 +397,8 @@ subroutine atmosphere (Time) integer :: n, sphum, p, nc integer :: psc ! p_split counter + call timing_on('ATMOS_DYNAMICS') - call timing_on('ATMOSPHERE') fv_time = Time + Time_step_atmos call get_time (fv_time, seconds, days) @@ -414,105 +406,89 @@ subroutine atmosphere (Time) do psc=1,abs(p_split) - do n=1,ngrids + n=mygrid - if (.not. grids_on_this_pe(n)) then - cycle - endif + call mpp_set_current_pelist(Atm(n)%pelist, no_sync=.TRUE.) if ( Atm(n)%flagstruct%nudge_ic ) & call fv_nudge(Atm(n)%npz, Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, Atm(n)%ng, & Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%delp, Atm(n)%pt, dt_atmos/real(abs(p_split)), Atm(n)%flagstruct%hydrostatic ) - !---- call fv dynamics ----- -! if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then - if ( Atm(n)%flagstruct%adiabatic ) then - zvir = 0. ! no virtual effect - else - zvir = rvgas/rdgas - 1. - endif + !---- call fv dynamics ----- + !if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then + if ( Atm(n)%flagstruct%adiabatic ) then + zvir = 0. ! no virtual effect + else + zvir = rvgas/rdgas - 1. + endif - call timing_on('fv_dynamics') - call fv_dynamics(Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%ncnst, Atm(n)%ng, & - dt_atmos/real(abs(p_split)), Atm(n)%flagstruct%consv_te, Atm(n)%flagstruct%fill, & - Atm(n)%flagstruct%reproduce_sum, kappa, & - cp_air, zvir, Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, & - Atm(n)%flagstruct%n_split, Atm(n)%flagstruct%q_split, & - Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, & - Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & - Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, & - Atm(n)%phis, Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & - Atm(n)%ak, Atm(n)%bk, Atm(n)%mfx, Atm(n)%mfy, Atm(n)%cx, Atm(n)%cy, & - Atm(n)%ze0, Atm(n)%flagstruct%hybrid_z, Atm(n)%gridstruct, Atm(n)%flagstruct, & - Atm(n)%neststruct, Atm(n)%idiag, Atm(n)%bd, Atm(n)%parent_grid, Atm(n)%domain, & - Atm(n)%inline_mp, Atm(n)%diss_est, time_total=time_total) - call timing_off('fv_dynamics') - end do + call timing_on('FV_DYNAMICS') + call fv_dynamics(Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%ncnst, Atm(n)%ng, & + dt_atmos/real(abs(p_split)), Atm(n)%flagstruct%consv_te, Atm(n)%flagstruct%fill, & + Atm(n)%flagstruct%reproduce_sum, kappa, & + cp_air, zvir, Atm(n)%ptop, Atm(n)%ks, Atm(n)%ncnst, & + Atm(n)%flagstruct%n_split, Atm(n)%flagstruct%q_split, & + Atm(n)%u0, Atm(n)%v0, Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%ps, & + Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, & + Atm(n)%phis, Atm(n)%q_con, Atm(n)%omga, Atm(n)%ua, Atm(n)%va, Atm(n)%uc, Atm(n)%vc, & + Atm(n)%ak, Atm(n)%bk, Atm(n)%mfx, Atm(n)%mfy, Atm(n)%cx, Atm(n)%cy, & + Atm(n)%ze0, Atm(n)%flagstruct%hybrid_z, Atm(n)%gridstruct, Atm(n)%flagstruct, & + Atm(n)%neststruct, Atm(n)%idiag, Atm(n)%bd, Atm(n)%parent_grid, Atm(n)%domain, & + Atm(n)%inline_mp, Atm(n)%diss_est, time_total=time_total) + call timing_off('FV_DYNAMICS') if (ngrids > 1 .and. (psc < p_split .or. p_split < 0)) then call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mytile) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') endif end do !p_split - do n=1,ngrids - - if (.not. grids_on_this_pe(n)) then - cycle - endif - - if(Atm(n)%npz /=1 .and. .not. Atm(n)%flagstruct%adiabatic)then - - call timing_on('FV_PHYS') - call fv_phys(Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%bd%isc, Atm(n)%bd%iec, & - Atm(n)%bd%jsc, Atm(n)%bd%jec, Atm(n)%ng, Atm(n)%ncnst, & - Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%pt, Atm(n)%q, Atm(n)%pe, & - Atm(n)%delp, Atm(n)%peln, Atm(n)%pkz, dt_atmos, & - Atm(n)%ua, Atm(n)%va, Atm(n)%phis, Atm(n)%gridstruct%agrid, & - Atm(n)%ptop, Atm(n)%ak, Atm(n)%bk, Atm(n)%ks, Atm(n)%ps, Atm(n)%pk, & - Atm(n)%u_srf, Atm(n)%v_srf, Atm(n)%ts, Atm(n)%delz, & - Atm(n)%flagstruct%hydrostatic, Atm(n)%oro, .false., & - Atm(n)%flagstruct%p_ref, & - Atm(n)%flagstruct%fv_sg_adj, Atm(n)%flagstruct%do_Held_Suarez, & - Atm(n)%gridstruct, Atm(n)%flagstruct, Atm(n)%neststruct, & - Atm(n)%flagstruct%nwat, Atm(n)%bd, & - Atm(n)%domain, fv_time, Atm(n)%phys_diag, Atm(n)%nudge_diag, time_total) - call timing_off('FV_PHYS') + if(Atm(n)%npz /=1 .and. .not. Atm(n)%flagstruct%adiabatic)then + + call timing_on('FV_PHYS') + call fv_phys(Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%bd%isc, Atm(n)%bd%iec, & + Atm(n)%bd%jsc, Atm(n)%bd%jec, Atm(n)%ng, Atm(n)%ncnst, & + Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%pt, Atm(n)%q, Atm(n)%pe, & + Atm(n)%delp, Atm(n)%peln, Atm(n)%pkz, dt_atmos, & + Atm(n)%ua, Atm(n)%va, Atm(n)%phis, Atm(n)%gridstruct%agrid, & + Atm(n)%ptop, Atm(n)%ak, Atm(n)%bk, Atm(n)%ks, Atm(n)%ps, Atm(n)%pk, & + Atm(n)%u_srf, Atm(n)%v_srf, Atm(n)%ts, Atm(n)%delz, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%oro, .false., & + Atm(n)%flagstruct%p_ref, & + Atm(n)%flagstruct%fv_sg_adj, Atm(n)%flagstruct%do_Held_Suarez, & + Atm(n)%gridstruct, Atm(n)%flagstruct, Atm(n)%neststruct, & + Atm(n)%flagstruct%nwat, Atm(n)%bd, & + Atm(n)%domain, fv_time, Atm(n)%phys_diag, Atm(n)%nudge_diag, time_total) + call timing_off('FV_PHYS') endif - end do - if (ngrids > 1 .and. p_split > 0) then call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mytile) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') endif !---- diagnostics for FV dynamics ----- - do n=1,ngrids - - if (.not. grids_on_this_pe(n)) then - cycle - endif - !For correct diagnostics (may need to be changed for moist Held-Suarez) - if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then - zvir = 0. ! no virtual effect - else - zvir = rvgas/rdgas - 1. - endif + !For correct diagnostics (may need to be changed for moist Held-Suarez) + if ( Atm(n)%flagstruct%adiabatic .or. Atm(n)%flagstruct%do_Held_Suarez ) then + zvir = 0. ! no virtual effect + else + zvir = rvgas/rdgas - 1. + endif - call timing_on('FV_DIAG') - call fv_diag(Atm(n:n), zvir, fv_time, Atm(n)%flagstruct%print_freq) + call timing_on('FV_DIAG') + call fv_diag(Atm(n:n), zvir, fv_time, Atm(n)%flagstruct%print_freq) + call timing_off('FV_DIAG') - call timing_off('FV_DIAG') - end do + call timing_off('ATMOS_DYNAMICS') + call mpp_set_current_pelist() - call timing_off('ATMOSPHERE') end subroutine atmosphere @@ -520,15 +496,20 @@ subroutine atmosphere_end integer n + call timing_on('ATMOS_END') + call get_time (fv_time, seconds, days) - do n=1,ngrids - if ( Atm(n)%flagstruct%moist_phys .and. Atm(n)%flagstruct%nwat==6 .and. grids_on_this_pe(N)) call gfdl_mp_end - enddo + if ( Atm(mygrid)%flagstruct%moist_phys .and. Atm(mygrid)%flagstruct%nwat==6 ) call gfdl_mp_end - call fv_end(Atm, mytile) + call fv_end(Atm, mygrid) deallocate(Atm) + call timing_off('ATMOS_END') + call timing_off('ATMOS_TOTAL') + call timing_prt( mpp_pe() ) + call mpp_set_current_pelist() + end subroutine atmosphere_end subroutine atmosphere_domain ( fv_domain ) @@ -537,7 +518,7 @@ subroutine atmosphere_domain ( fv_domain ) ! returns the domain2d variable associated with the coupling grid ! note: coupling is done using the mass/temperature grid with no halos - fv_domain = Atm(mytile)%domain + fv_domain = Atm(mygrid)%domain end subroutine atmosphere_domain diff --git a/driver/solo/fv_phys.F90 b/driver/solo/fv_phys.F90 index ea31d252b..7f1ab7242 100644 --- a/driver/solo/fv_phys.F90 +++ b/driver/solo/fv_phys.F90 @@ -101,11 +101,14 @@ module fv_phys_mod integer:: seconds, days logical :: print_diag integer :: istep = 0 + logical :: do_zurita_HS = .false. !use form from Zurita-Gotor and Held 2022 + real :: rd_zur = 1./25. !close to traditional H-S !GFDL Simplified Physics (mostly Frierson) logical:: diurnal_cycle = .false. logical:: mixed_layer = .false. logical:: gray_rad = .false. + logical:: cloudy_rad = .false. logical:: strat_rad = .false. logical:: do_abl = .false. logical:: do_mon_obkv = .true. @@ -132,6 +135,7 @@ module fv_phys_mod real:: abl_s_fac = 0.1 real:: ml_c0 = 6.285E7 ! Ocean heat capabicity 4190*depth*e3, depth = 15. real:: sw_abs = 0. ! fraction of the solar absorbed/reflected by the atm + real :: fixed_sfc_htg = 0. !surface heating rate, K/s !Kessler parameters @@ -160,12 +164,15 @@ module fv_phys_mod tau_temp, tau_press, sst_restore_timescale, & do_K_warm_rain, do_GFDL_sim_phys,& do_reed_sim_phys, do_LS_cond, do_surf_drag, & - tau_surf_drag, do_terminator + tau_surf_drag, do_terminator, do_zurita_HS, rd_zur -namelist /GFDL_sim_phys_nml/ diurnal_cycle, mixed_layer, gray_rad, strat_rad, do_abl, do_mon_obkv, & - heating_rate, cooling_rate, uniform_sst, sst0, sst_type, shift_n, do_t_strat, p_strat, t_strat, tau_strat, & - mo_t_fac, tau_difz, prog_low_cloud, low_cf0, zero_winds, tau_zero, do_mo_fixed_cd, mo_cd, mo_u_mean, & - abl_s_fac, ml_c0, sw_abs +namelist /GFDL_sim_phys_nml/ diurnal_cycle, mixed_layer, gray_rad, & + cloudy_rad, strat_rad, do_abl, do_mon_obkv, & + heating_rate, cooling_rate, uniform_sst, sst0, & + sst_type, shift_n, do_t_strat, p_strat, t_strat, tau_strat, & + mo_t_fac, tau_difz, prog_low_cloud, low_cf0, & + zero_winds, tau_zero, do_mo_fixed_cd, mo_cd, mo_u_mean, & + abl_s_fac, ml_c0, sw_abs, fixed_sfc_htg namelist /Kessler_sim_phys_nml/ K_sedi_transport, do_K_sedi_w, do_K_sedi_heat, K_cycle @@ -452,12 +459,12 @@ subroutine fv_phys(npx, npy, npz, is, ie, js, je, ng, nq, & if ( do_GFDL_sim_phys ) then moist_phys = .true. - call timing_on('GFDL_SIM_PHYS') + call timing_on('GFDL_SIM_PHYS') call GFDL_sim_phys(npx, npy, npz, is, ie, js, je, ng, nq, nwat, pk, pkz, & u_dt, v_dt, t_dt, q_dt, u, v, w, ua, va, pt, delz, q, & pe, delp, peln, ts, oro, hydrostatic, pdt, grid, ak, bk, & !ts --> sst p_ref, Time, time_total, flagstruct%grid_type, gridstruct) - call timing_off('GFDL_SIM_PHYS') + call timing_off('GFDL_SIM_PHYS') no_tendency = .false. endif @@ -541,7 +548,7 @@ subroutine fv_phys(npx, npy, npz, is, ie, js, je, ng, nq, & u, v, pt, q, pe, delp, peln, pkz, pdt, & ua, va, u_dt, v_dt, t_dt, q_dt, grid, & delz, phis, hydrostatic, ak, bk, ks, & - do_strat_HS_forcing, .false., master, Time, time_total) + do_strat_HS_forcing, do_zurita_HS, rd_zur, master, Time, time_total) no_tendency = .false. elseif ( do_surf_drag ) then ! Bottom friction: @@ -577,10 +584,10 @@ subroutine fv_phys(npx, npy, npz, is, ie, js, je, ng, nq, & if (id_dudt > 0) then - used=send_data(id_dudt, u_dt(is:ie,js:je,npz), time) + used=send_data(id_dudt, u_dt(is:ie,js:je,:), time) endif if (id_dvdt > 0) then - used=send_data(id_dvdt, v_dt(is:ie,js:je,npz), time) + used=send_data(id_dvdt, v_dt(is:ie,js:je,:), time) endif if (id_dtdt > 0) then used=send_data(id_dtdt, t_dt(:,:,:), time) @@ -591,7 +598,7 @@ subroutine fv_phys(npx, npy, npz, is, ie, js, je, ng, nq, & if ( .not. no_tendency ) then - call timing_on('UPDATE_PHYS') + call timing_on('FV_UPDATE_PHYS') call fv_update_phys (pdt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, & u, v, w, delp, pt, q, qdiag, ua, va, ps, pe, peln, pk, pkz, & ak, bk, phis, u_srf, v_srf, ts, & @@ -601,7 +608,7 @@ subroutine fv_phys(npx, npy, npz, is, ie, js, je, ng, nq, & npx, npy, npz, flagstruct, neststruct, bd, domain, ptop, & phys_diag, nudge_diag, q_dt=q_dt) - call timing_off('UPDATE_PHYS') + call timing_off('FV_UPDATE_PHYS') endif deallocate ( u_dt ) deallocate ( v_dt ) @@ -840,6 +847,36 @@ subroutine GFDL_sim_phys(npx, npy, npz, is, ie, js, je, ng, nq, nwat, pk, pkz, endif endif + elseif (cloudy_rad) then + + do j=js,je + do k=1, km + do i=is,ie + den(i,k) = -delp(i,j,k)/(grav*dz(i,j,k)) + enddo + enddo + call cloudy_radiation(is, ie, km, t3(is:ie,j,1:km), & + q(is:ie,j,1:km,sphum), q(is:ie,j,1:km,liq_wat), & + delp(is:ie,j,1:km), & + dz(is:ie,j,1:km), den, t_dt_rad, & + olr(is,j), lwu(is,j), lwd(is,j), sw_surf(is,j)) + do k=1, km + do i=is,ie + t_dt(i,j,k) = t_dt(i,j,k) + t_dt_rad(i,k) + enddo + enddo + enddo + if ( print_diag ) then + olrm = g0_sum(olr, is, ie, js, je, 0, gridstruct%area(is:ie,js:je), 1) + swab = g0_sum(sw_surf, is, ie, js, je, 0, gridstruct%area(is:ie,js:je), 1) + + + if( master ) then + write(*,*) 'Domain mean OLR', trim(gn), ' =', olrm + write(*,*) 'Domain mean SWA', trim(gn), ' =', swab + endif + endif + else ! Prescribed (non-interating) heating/cooling rate: @@ -910,12 +947,12 @@ subroutine GFDL_sim_phys(npx, npy, npz, is, ie, js, je, ng, nq, nwat, pk, pkz, u_star(:,:) = 1.E-3 endif - call timing_on('mon_obkv') + call timing_on('MON_OBKV') call mon_obkv(zvir, ps, t3(is:ie,js:je, km), zfull(is:ie,js:je,km), & rho, p3(is:ie,js:je,km), u3(is:ie,js:je,km), v3(is:ie,js:je,km), mo_u_mean, do_mo_fixed_cd, mo_cd, sst, & qs, q3(is:ie,js:je,km,sphum), drag_t, drag_q, flux_t, flux_q, flux_u, flux_v, u_star, & delm, pdt, mu, mo_t_fac, master) - call timing_off('mon_obkv') + call timing_off('MON_OBKV') !--------------------------------------------------- ! delp/grav = delm = kg/m**2 ! watts = J/s = N*m/s = kg * m**2 / s**3 @@ -930,9 +967,14 @@ subroutine GFDL_sim_phys(npx, npy, npz, is, ie, js, je, ng, nq, nwat, pk, pkz, rate_v = flux_v(i,j)/delm(i,j) v3(i,j,km) = v3(i,j,km) + pdt*rate_v v_dt(i,j,km) = v_dt(i,j,km) + rate_v + if (abs(fixed_sfc_htg) > 1.e-8) then + rate_t = fixed_sfc_htg + flux_t(i,j) = rate_t*cp_air*delm(i,j) + else rate_t = flux_t(i,j)/(cp_air*delm(i,j)) + endif t_dt(i,j,km) = t_dt(i,j,km) + rate_t - t3(i,j,km) = t3(i,j,km) + rate_t*pdt + t3(i,j,km) = t3(i,j,km) + rate_t*pdt rate_q = flux_q(i,j)/delm(i,j) q_dt(i,j,km,sphum) = q_dt(i,j,km,sphum) + rate_q q3(i,j,km,sphum) = q3(i,j,km,sphum) + rate_q*pdt @@ -1029,7 +1071,7 @@ subroutine GFDL_sim_phys(npx, npy, npz, is, ie, js, je, ng, nq, nwat, pk, pkz, enddo enddo - if ( gray_rad ) then + if ( gray_rad .or. cloudy_rad) then do j=js, je do i=is, ie sst(i,j) = sst(i,j)+pdt*(sw_surf(i,j) + lwd(i,j) - rflux(i,j) & @@ -1149,6 +1191,12 @@ subroutine pbl_diff(hydrostatic, dt, is, ie, js, je, ng, km, nq, ua, va, & enddo 125 continue +!!! DEBUG CODE + if (is_master()) then + print*, 'ABL: pblh = ', sum(pblh)/((ie-is+1)*(je-js+1)) + endif +!!! END DEBUG CODE + do k=km, 1, -1 do i=is, ie if ( gh(i,k)>6.E3 .or. (pblh(i,j) < -0.5*dz(i,j,km)) ) then @@ -1544,7 +1592,109 @@ subroutine gray_radiation(sec, is, ie, km, lon, lat, clouds, ts, temp, ps, phalf end subroutine gray_radiation + !From Stevens et al. 2005, MWR + subroutine cloudy_radiation(is, ie, km, pt, qv, ql, & + delp, delz, rho, t_dt, olr, lwu, lwd, sw_surf) + + integer, intent(in):: is, ie, km + real, intent(in), dimension(is:ie,km):: pt, delp, delz, rho, qv, ql + real, intent(out), dimension(is:ie,km):: t_dt + real, intent(out), dimension(is:ie):: olr, lwu, lwd, sw_surf + !local: + real, dimension(is:ie) :: lwp, Qup + real, dimension(is:ie,km+1) :: Frad + real :: qz, tmp, zi, zint, densT, densB, cube, df + integer :: i,j,k,ki + + real, parameter :: F_0 = 70. + real, parameter :: F_1 = 22. + real, parameter :: kappa = 85. + real, parameter :: az = 1. + real, parameter :: Divg = 3.75e-6 + real, parameter :: c13 = 1./3. + real, parameter :: c43 = 4.*c13 + + !Compute column cloud water + + do i=is,ie + lwp(i) = 0.0 + enddo + do k=1,km + do i=is,ie + lwp(i) = lwp(i) + delp(i,k)*ql(i,k) + enddo + enddo + + !Compute water above and flux terms + do i=is,ie + Qup(i) = 0.0 + Frad(i,1) = F_0 + F_1*exp(-kappa*lwp(i)) + enddo + do k=1,km + do i=is,ie + Qup(i) = Qup(i) + delp(i,k)*ql(i,k) + qz = lwp(i) - Qup(i) + tmp = F_0*exp(-kappa*Qup(i)) + Frad(i,k+1) = tmp + F_1*exp(-kappa*qz) + enddo + enddo + + do i=is,ie + sw_surf(i) = Frad(i,km+1) + lwu(i) = F_1 + enddo + + !Compute cloud top and free tropospheric cooling + do i=is,ie + ki = 1 + do while (ki <= km .and. qv(i,ki)+ql(i,ki) < 0.008 ) + ki = ki+1 + enddo + if (ki > km) continue + + zi = 0.0 + do k=km,ki+1,-1 + zi = zi - delz(i,k) + enddo + zi = zi - 0.5*delz(i,ki) + + zint = 0.0 + !densT = -delp(i,k)/(grav*delz(i,k))*rdgas*pt(i,k)*(1.+zvir*qv(i,k)) + densT=rho(i,k) + + cube = -exp(c13*log(abs(zi))) + tmp = 0.25*cube**4 + tmp = tmp + zi*cube + tmp = tmp*densT*cp_air*divg*az + lwd(i) = tmp + Frad(i,km+1) = Frad(i,km+1) + tmp + + do k=km,1,-1 + zint = zint - delz(i,k) + df = zint-zi + cube = sign(exp(c13*log(abs(df))), df) + tmp = 0.25*cube**4 + tmp = tmp + zi*cube + densB = densT + densT = rho(i,k) ! -delp(i,k)/(grav*delz(i,k))*rdgas*pt(i,k)*(1.+zvir*qv(i,k)) + tmp = tmp*0.5*(densB+densT)*cp_air*Divg*az + Frad(i,k) = Frad(i,k) + tmp + enddo + enddo + + !Compute outputs + + do i=is,ie + olr(i) = Frad(i,1) + enddo + do k=1,km + do i=is,ie + t_dt(i,k) = (Frad(i,k) - Frad(i,k+1))/(cp_air*rho(i,k)*delz(i,k)) + enddo + enddo + + end subroutine cloudy_radiation subroutine get_low_clouds( is,ie, js,je, km, ql, qi, qa, clouds ) integer, intent(in):: is,ie, js,je, km diff --git a/driver/solo/hswf.F90 b/driver/solo/hswf.F90 index 1eb213004..bae1d4ed7 100644 --- a/driver/solo/hswf.F90 +++ b/driver/solo/hswf.F90 @@ -28,7 +28,6 @@ module hswf_mod use mpp_domains_mod, only: mpp_update_domains, domain2d use time_manager_mod, only: time_type, get_date, get_time use diag_manager_mod, only: send_data - use fv_timing_mod, only: timing_on, timing_off implicit none !----------------------------------------------------------------------- @@ -43,7 +42,7 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & u, v, pt, q, pe, delp, peln, pkz, pdt, & ua, va, u_dt, v_dt, t_dt, q_dt, agrid, & delz, phis, hydrostatic, ak, bk, ks, & - strat, rayf, master, Time, time_total) + strat, zurita, rd_zur, master, Time, time_total) integer, INTENT(IN ) :: npx, npy, npz integer, INTENT(IN ) :: is, ie, js, je, ng, nq @@ -74,8 +73,8 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & real , INTENT(IN ) :: ak(npz+1), bk(npz+1) integer, INTENT(IN ) :: ks - real , INTENT(IN ) :: pdt - logical, INTENT(IN ) :: strat, rayf, master + real , INTENT(IN ) :: pdt, rd_zur + logical, INTENT(IN ) :: strat, zurita, master type(time_type), intent(in) :: Time real, INTENT(IN), optional:: time_total @@ -85,7 +84,7 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & real, dimension(is:ie):: u1, v1 integer i,j,k integer seconds, days - real ty, tz, akap + real ty, tz, akap, rakap real p0, t0, sday, rkv, rka, rks, rkt, sigb, rsgb real tmp, solar_ang, solar_rate real ap0k, algpk @@ -97,10 +96,12 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & real t_st, t_ms real rdt, f1 real rad_ratio, kf_day + real rd_zur_rad ty = 60.0 tz = 10.0 ! Original value from H-S was 10. akap = 2./7. + rakap = 1./akap p0 = 100000. t0 = 200. @@ -129,10 +130,13 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & ap0k = 1./p0**akap algpk = log(ap0k) + rd_zur_rad = rd_zur*pi/180. + ! Temperature forcing... !$OMP parallel do default(none) shared(is,ie,js,je,npz,delp,peln,ap0k,ty,agrid,tz,akap, & !$OMP strat,h0,t_dt,pt,rms,rmr,rdt,t_ms,tau,pdt,sday,pe, & -!$OMP sigb,rsgb,pkz,algpk,t0,rka,rks,rkv,u_dt,ua,v_dt,va) & +!$OMP sigb,rsgb,pkz,algpk,t0,rka,rks,rkv,u_dt,ua,v_dt,va,& +!$OMP zurita,rd_zur_rad,rakap) & !$OMP private(pl, teq, tey, tez, dz, relx, dt_tropic, sigl, f1, rkt,tmp,u1,v1) do j=js,je do k=1,npz @@ -142,8 +146,6 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & enddo do k=npz,1,-1 do i=is,ie - tey = ap0k*( 315.0 - ty*SIN(agrid(i,j,2))*SIN(agrid(i,j,2)) ) - tez = tz*( ap0k/akap )*COS(agrid(i,j,2))*COS(agrid(i,j,2)) if (strat .and. pl(i,k) <= 1.E2) then ! Mesosphere: defined as the region above 1 mb dz = h0 * log(pl(i,k+1)/pl(i,k)) @@ -165,11 +167,24 @@ subroutine Held_Suarez_Tend(npx, npy, npz, is, ie, js, je, ng, nq, & ! Troposphere: standard Held-Suarez sigl = pl(i,k)/pe(i,npz+1,j) f1 = max(0., (sigl-sigb) * rsgb ) - teq(i,k) = tey - tez*(log(pkz(i,j,k))+algpk) - teq(i,k) = max(t0, teq(i,k)*pkz(i,j,k)) - rkt = rka + (rks-rka)*f1*(COS(agrid(i,j,2))**4.0) - t_dt(i,j,k) = t_dt(i,j,k) + rkt*(teq(i,k)-pt(i,j,k))/(1.+rkt) * rdt - ! Bottom friction: + if (zurita) then + tmp = agrid(i,j,2)/rd_zur_rad + tey = 1.0 - 0.19*(1.-exp(-tmp*tmp)) + tmp = exp(akap*log(sigl)) + tez = 0.1*(1.-tmp)*rakap + tmp = tmp*315.0 + teq(i,k) = max(t0, tmp*(tey + tez)) + !t_dt(i,j,k) = t_dt(i,j,k) + rka*(teq(i,k)-pt(i,j,k)) * rdt + rkt = rka + else + tey = ap0k*( 315.0 - ty*SIN(agrid(i,j,2))*SIN(agrid(i,j,2)) ) + tez = tz*( ap0k/akap )*COS(agrid(i,j,2))*COS(agrid(i,j,2)) + tmp = tey - tez*(log(pkz(i,j,k))+algpk) + teq(i,k) = max(t0, tmp*pkz(i,j,k)) + rkt = rka + (rks-rka)*f1*(COS(agrid(i,j,2))**4.0) + endif + t_dt(i,j,k) = t_dt(i,j,k) + rkt*(teq(i,k)-pt(i,j,k))/(1.+rkt) * rdt + ! Bottom friction: sigl = pl(i,k) / pe(i,npz+1,j) sigl = (sigl-sigb)*rsgb * rkv if (sigl > 0.) then diff --git a/model/boundary.F90 b/model/boundary.F90 index 8c02048fa..668451805 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -636,10 +636,10 @@ subroutine nested_grid_BC_mpp_3d(var_nest, var_coarse, nest_domain, ind, wt, ist nbuffer = 0 - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, & nest_level=nest_level, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if (process) then @@ -886,10 +886,10 @@ subroutine nested_grid_BC_mpp_3d_vector(u_nest, v_nest, u_coarse, v_coarse, nest call init_buffer(nest_domain, wbufferx, sbufferx, ebufferx, nbufferx, npz, nest_level, position_x) call init_buffer(nest_domain, wbuffery, sbuffery, ebuffery, nbuffery, npz, nest_level, position_x) - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(u_coarse, v_coarse, nest_domain, wbufferx, wbuffery, sbufferx, sbuffery, & ebufferx, ebuffery, nbufferx, nbuffery, flags=flags, nest_level=nest_level, gridtype=gridtype) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if (process) then @@ -1102,9 +1102,9 @@ subroutine nested_grid_BC_mpp_send_3d(var_coarse, nest_domain, istag, jstag, nes allocate(nbuffer(1,1,1)) - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') deallocate(wbuffer, ebuffer, sbuffer, nbuffer) @@ -1148,9 +1148,9 @@ subroutine nested_grid_BC_mpp_send_2d(var_coarse, nest_domain, istag, jstag, nes allocate(nbuffer(1,1)) - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') deallocate(wbuffer, ebuffer, sbuffer, nbuffer) @@ -1255,9 +1255,9 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist endif nbuffer = 0 - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nl, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if (process) then @@ -1668,9 +1668,9 @@ subroutine nested_grid_BC_send_scalar(var_coarse, nest_domain, istag, jstag, nes position = CENTER end if - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') end subroutine nested_grid_BC_send_scalar @@ -1704,10 +1704,10 @@ subroutine nested_grid_BC_recv_scalar(nest_domain, istag, jstag, npz, & call init_nest_bc_type(nest_domain, nest_BC_buffers, npz, nest_level, position) endif - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(var_coarse_dummy, nest_domain, nest_BC_buffers%west_t1, nest_BC_buffers%south_t1, & nest_BC_buffers%east_t1, nest_BC_buffers%north_t1, nest_level=nest_level, position=position) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') end subroutine nested_grid_BC_recv_scalar @@ -1724,10 +1724,10 @@ subroutine nested_grid_BC_send_vector(u_coarse, v_coarse, nest_domain, nest_leve integer :: nl = 1 - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(u_coarse, v_coarse, nest_domain, wbufferx,wbuffery, sbufferx, sbuffery, & ebufferx, ebuffery, nbufferx, nbuffery, nest_level=nest_level, flags=flags, gridtype=gridtype) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') end subroutine nested_grid_BC_send_vector @@ -1838,12 +1838,12 @@ subroutine nested_grid_BC_recv_vector(nest_domain, npz, bd, nest_BC_u_buffers, n call init_nest_bc_type(nest_domain, nest_BC_v_buffers, npz, nest_level, position_y) endif - call timing_on ('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_fine(u_coarse_dummy, v_coarse_dummy, nest_domain, & nest_BC_u_buffers%west_t1, nest_BC_v_buffers%west_t1, nest_BC_u_buffers%south_t1, nest_BC_v_buffers%south_t1, & nest_BC_u_buffers%east_t1, nest_BC_v_buffers%east_t1, nest_BC_u_buffers%north_t1, nest_BC_v_buffers%north_t1, & nest_level, flags, gridtype) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') end subroutine nested_grid_BC_recv_vector @@ -2289,7 +2289,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are npx, npy, npz, istag, jstag, r, nestupdate) endif - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_coarse(field_in=coarse_dat_send, nest_domain=nest_domain, field_out=coarse_dat_recv, & nest_level=nest_level, position=position) @@ -2297,7 +2297,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are deallocate(coarse_dat_send) end if - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') s = r/2 !rounds down (since r > 0) qr = r*upoff + nsponge - s @@ -2568,14 +2568,14 @@ subroutine update_coarse_grid_mpp_vector(u_coarse, v_coarse, u_nest, v_nest, nes npx, npy, npz, istag_v, jstag_v, r, nestupdate) endif - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_nest_coarse(coarse_dat_send_u, coarse_dat_send_v, nest_domain, coarse_dat_recv_u, & coarse_dat_recv_v, nest_level, flags, gridtype) if (allocated(coarse_dat_send_u)) deallocate(coarse_dat_send_u) if (allocated(coarse_dat_send_v)) deallocate(coarse_dat_send_v) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') s = r/2 !rounds down (since r > 0) qr = r*upoff + nsponge - s diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index 8ae8bbb70..525dfc2e1 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -32,10 +32,11 @@ module dyn_core_mod use fv_mp_mod, only: group_halo_update_type use sw_core_mod, only: c_sw, d_sw use a2b_edge_mod, only: a2b_ord2, a2b_ord4 - use nh_core_mod, only: Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d, nh_bc + use nh_core_mod, only: Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d + use nh_core_mod, only: nh_bc, edge_profile1 use tp_core_mod, only: copy_corners use fv_timing_mod, only: timing_on, timing_off - use fv_diagnostics_mod, only: prt_maxmin, fv_time, prt_mxm, is_ideal_case + use fv_diagnostics_mod, only: prt_maxmin, fv_time, prt_mxm use fv_diag_column_mod, only: do_diag_debug_dyn, debug_column_dyn #ifdef ROT3 use fv_update_phys_mod, only: update_dwinds_phys @@ -57,6 +58,8 @@ module dyn_core_mod #ifdef SW_DYNAMICS use test_cases_mod, only: test_case, case9_forcing1, case9_forcing2 #endif + use test_cases_mod, only: w_forcing + use w_forcing_mod, only: do_w_forcing use fv_regional_mod, only: dump_field, exch_uv, H_STAGGER, U_STAGGER, V_STAGGER use fv_regional_mod, only: a_step, p_step, k_step, n_step use fast_phys_mod, only: fast_phys @@ -329,20 +332,20 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, endif if ( nq > 0 ) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') if ( flagstruct%inline_q ) then call start_group_halo_update(i_pack(10), q, domain) endif - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') endif #ifndef SW_DYNAMICS if ( .not. hydrostatic ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(7), w, domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if ( it==1 ) then if (gridstruct%bounded_domain) then @@ -377,9 +380,9 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, enddo enddo enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(5), gz, domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') endif endif @@ -392,9 +395,9 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, if ( it==1 ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(1), domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') beta_d = 0. else beta_d = beta @@ -420,13 +423,13 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, last_step = .false. endif - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(8), domain) if( .not. hydrostatic ) & call complete_group_halo_update(i_pack(7), domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') - call timing_on('c_sw') + call timing_on('C_SW') !$OMP parallel do default(none) shared(npz,isd,jsd,delpc,delp,ptc,pt,u,v,w,uc,vc,ua,va, & !$OMP omga,ut,vt,divgd,flagstruct,dt2,hydrostatic,bd, & !$OMP gridstruct) @@ -439,11 +442,11 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, flagstruct%nord, dt2, hydrostatic, .true., bd, & gridstruct, flagstruct) enddo - call timing_off('c_sw') + call timing_off('C_SW') if ( flagstruct%nord > 0 ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(3), divgd, domain, position=CORNER) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') endif if (gridstruct%nested) then @@ -478,9 +481,9 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, #ifndef SW_DYNAMICS if ( it == 1 ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(5), domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz) do k=1,npz+1 @@ -515,19 +518,19 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, enddo endif - call timing_on('UPDATE_DZ_C') + call timing_on('UPDATE_DZ_C') call update_dz_c(is, ie, js, je, npz, ng, dt2, dp_ref, zs, gridstruct%area, ut, vt, gz, ws3, & npx, npy, gridstruct%sw_corner, gridstruct%se_corner, & gridstruct%ne_corner, gridstruct%nw_corner, bd, gridstruct%grid_type) - call timing_off('UPDATE_DZ_C') + call timing_off('UPDATE_DZ_C') - call timing_on('Riem_Solver') + call timing_on('Riem_Solver') call Riem_Solver_C( ms, dt2, is, ie, js, je, npz, ng, & akap, cappa, cp, ptop, phis, omga, ptc, & q_con, delpc, gz, pkc, ws3, flagstruct%p_fac, & flagstruct%a_imp, flagstruct%scale_z, pfull, & flagstruct%fast_tau_w_sec, flagstruct%rf_cutoff ) - call timing_off('Riem_Solver') + call timing_off('Riem_Solver') if (gridstruct%nested) then call nh_bc(ptop, grav, akap, cp, delpc, neststruct%delz_BC, ptc, phis, & @@ -564,15 +567,15 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, call p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, gridstruct%rdxc, gridstruct%rdyc, hydrostatic) - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(9), uc, vc, domain, gridtype=CGRID_NE) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') #ifdef SW_DYNAMICS if (test_case==9) call case9_forcing2(phis, isd, ied, jsd, jed) endif !test_case>1 #endif - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') if (flagstruct%inline_q .and. nq>0) call complete_group_halo_update(i_pack(10), domain) #ifdef SW_DYNAMICS if (test_case > 1) then @@ -582,8 +585,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, #ifdef SW_DYNAMICS endif #endif + call timing_off('COMM_TOTAL') - call timing_off('COMM_TOTAL') if (gridstruct%nested) then !On a nested grid we have to do SOMETHING with uc and vc in ! the boundary halo, particularly at the corners of the @@ -657,12 +660,12 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, endif - call timing_on('d_sw') + call timing_on('D_SW') !$OMP parallel do default(none) shared(npz,flagstruct,nord_v,pfull,damp_vt,hydrostatic,last_step, & !$OMP is,ie,js,je,isd,ied,jsd,jed,omga,delp,gridstruct,npx,npy, & !$OMP ng,zh,vt,ptc,pt,u,v,w,uc,vc,ua,va,divgd,mfx,mfy,cx,cy, & !$OMP crx,cry,xfx,yfx,q_con,zvir,sphum,nq,q,dt,bd,rdt,iep1,jep1, & -!$OMP heat_source,is_ideal_case,diss_est,radius) & +!$OMP heat_source,diss_est,radius) & !$OMP private(nord_k, nord_w, nord_t, damp_w, damp_t, d2_divg, & !$OMP d_con_k,kgb, hord_m, hord_v, hord_t, hord_p, wk, heat_s, diss_e, z_rat) do k=1,npz @@ -702,7 +705,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, if ( k==1 ) then ! Divergence damping: nord_k=0; - if (is_ideal_case) then + if (flagstruct%is_ideal_case) then d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k1) else d2_divg = max(0.01, flagstruct%d2_bg, flagstruct%d2_bg_k1) @@ -769,7 +772,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, kgb, heat_s, diss_e, zvir, sphum, nq, q, k, npz, flagstruct%inline_q, dt, & flagstruct%hord_tr, hord_m, hord_v, hord_t, hord_p, & nord_k, nord_v(k), nord_w, nord_t, flagstruct%dddmp, d2_divg, flagstruct%d4_bg, & - damp_vt(k), damp_w, damp_t, d_con_k, hydrostatic, gridstruct, flagstruct, bd) + damp_vt(k), damp_w, damp_t, d_con_k, & + hydrostatic, gridstruct, flagstruct, bd) if((.not.flagstruct%use_old_omega) .and. last_step ) then ! Average horizontal "convergence" to cell center @@ -802,17 +806,17 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, call mpp_update_domains(uc, vc, domain, gridtype=CGRID_NE) call mpp_update_domains(u , v , domain, gridtype=DGRID_NE) endif - call timing_off('d_sw') + call timing_off('D_SW') if( flagstruct%fill_dp ) call mix_dp(hydrostatic, w, delp, pt, npz, ak, bk, .false., flagstruct%fv_debug, bd, gridstruct) - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(1), delp, domain, complete=.false.) call start_group_halo_update(i_pack(1), pt, domain, complete=.true.) #ifdef USE_COND call start_group_halo_update(i_pack(11), q_con, domain) #endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if ( flagstruct%d_ext > 0. ) then d2_divg = flagstruct%d_ext * gridstruct%da_min_c @@ -836,12 +840,12 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, divg2(:,:) = 0. endif - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(1), domain) #ifdef USE_COND call complete_group_halo_update(i_pack(11), domain) #endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if ( flagstruct%fv_debug ) then if ( .not. flagstruct%hydrostatic ) & call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) @@ -897,10 +901,10 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, gridstruct%bounded_domain, .true., npx, npy, flagstruct%a2b_ord, bd) else #ifndef SW_DYNAMICS - call timing_on('UPDATE_DZ') + call timing_on('UPDATE_DZ') call update_dz_d(nord_v, damp_vt, flagstruct%hord_tm, is, ie, js, je, npz, ng, npx, npy, gridstruct%area, & gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, ws, rdt, gridstruct, bd, flagstruct%lim_fac) - call timing_off('UPDATE_DZ') + call timing_off('UPDATE_DZ') if ( flagstruct%fv_debug ) then if ( .not. flagstruct%hydrostatic ) then call prt_mxm('delz updated', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) @@ -917,17 +921,17 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, - call timing_on('Riem_Solver') + call timing_on('Riem_Solver') call Riem_Solver3(flagstruct%m_split, dt, is, ie, js, je, npz, ng, & isd, ied, jsd, jed, & akap, cappa, cp, ptop, zs, q_con, w, delz, pt, delp, zh, & pe, pkc, pk3, pk, peln, ws, & flagstruct%scale_z, flagstruct%p_fac, flagstruct%a_imp, & - flagstruct%use_logp, remap_step, beta<-0.1, & - flagstruct%fast_tau_w_sec) - call timing_off('Riem_Solver') + flagstruct%use_logp, remap_step, beta<-0.1, flagstruct%d2bg_zq, & + flagstruct%fv_debug, flagstruct%fast_tau_w_sec) + call timing_off('Riem_Solver') - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') if ( gridstruct%square_domain ) then call start_group_halo_update(i_pack(4), zh , domain) call start_group_halo_update(i_pack(5), pkc, domain, whalo=2, ehalo=2, shalo=2, nhalo=2) @@ -935,7 +939,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, call start_group_halo_update(i_pack(4), zh , domain, complete=.false.) call start_group_halo_update(i_pack(4), pkc, domain, complete=.true.) endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') if ( remap_step ) & call pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp) @@ -987,7 +991,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, if ( gridstruct%square_domain ) then call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(5), domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') endif #endif SW_DYNAMICS endif ! end hydro check @@ -1010,7 +1014,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, !---------------------------- ! Compute pressure gradient: !---------------------------- - call timing_on('PG_D') + call timing_on('PG_D') if ( hydrostatic ) then if ( beta > 0. ) then call grad1_p_update(divg2, u, v, pkc, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta_d, flagstruct%a2b_ord) @@ -1049,7 +1053,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, endif #endif endif - call timing_off('PG_D') + call timing_off('PG_D') ! *** Inline Rayleigh friction here? if( flagstruct%RF_fast .and. flagstruct%tau > 0. ) & @@ -1089,22 +1093,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, if (flagstruct%do_fast_phys) then - call timing_on('COMM_TOTAL') - call start_group_halo_update(i_pack(1), delp, domain, complete=.false.) - call start_group_halo_update(i_pack(1), pt, domain, complete=.true.) - call start_group_halo_update(i_pack(7), w, domain) - call start_group_halo_update(i_pack(8), u, v, domain, gridtype=DGRID_NE) - call start_group_halo_update(i_pack(10), q, domain) - call start_group_halo_update(i_pack(11), q_con, domain) - call start_group_halo_update(i_pack(12), cappa, domain) - call complete_group_halo_update(i_pack(1), domain) - call complete_group_halo_update(i_pack(7), domain) - call complete_group_halo_update(i_pack(8), domain) - call complete_group_halo_update(i_pack(10), domain) - call complete_group_halo_update(i_pack(11), domain) - call complete_group_halo_update(i_pack(12), domain) - call timing_off('COMM_TOTAL') - + call timing_on('FAST_PHYS') + call fast_phys (is, ie, js, je, isd, ied, jsd, jed, npz, npx, npy, nq, & flagstruct%c2l_ord, dt, consv, akap, ptop, phis, te0_2d, u, v, w, pt, & delp, delz, q_con, cappa, q, pkz, zvir, flagstruct%te_err, flagstruct%tw_err, & @@ -1112,19 +1102,18 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, flagstruct%consv_checker, flagstruct%adj_mass_vmr) call timing_on('COMM_TOTAL') - call start_group_halo_update(i_pack(1), delp, domain, complete=.false.) - call start_group_halo_update(i_pack(1), pt, domain, complete=.true.) - call start_group_halo_update(i_pack(7), w, domain) - call start_group_halo_update(i_pack(8), u, v, domain, gridtype=DGRID_NE) - call start_group_halo_update(i_pack(10), q, domain) - call start_group_halo_update(i_pack(11), q_con, domain) - call start_group_halo_update(i_pack(12), cappa, domain) - call complete_group_halo_update(i_pack(1), domain) - call complete_group_halo_update(i_pack(7), domain) - call complete_group_halo_update(i_pack(8), domain) - call complete_group_halo_update(i_pack(10), domain) - call complete_group_halo_update(i_pack(11), domain) - call complete_group_halo_update(i_pack(12), domain) + !some mpp domains updates are commented out at this moment -- Linjiong + !future visit is needed if the model is not reprodicible using fast physics + !--- the following performs a staggered vector update with appropriate rotations + !call mpp_update_domains (u, v, domain, gridtype=DGRID_NE) + !--- the following is a 4D update for all the tracers + !call mpp_update_domains(q, domain) + !--- the following will be buffered into a single update + call mpp_update_domains (delp, domain, complete=.false.) + call mpp_update_domains (pt, domain, complete=.false.) + !call mpp_update_domains (w, domain, complete=.false.) + call mpp_update_domains (q_con, domain, complete=.false.) + call mpp_update_domains (cappa, domain, complete=.true.) call timing_off('COMM_TOTAL') if (remap_step) then @@ -1143,13 +1132,15 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, call pe_halo (is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp) endif + call timing_off('FAST_PHYS') + endif !----------------------------------------------------------------------- ! <<< Fast Physics !----------------------------------------------------------------------- - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') if( it==n_split .and. gridstruct%grid_type<4 .and. .not. gridstruct%bounded_domain) then ! Prevent accumulation of rounding errors at overlapped domain edges: call mpp_get_boundary(u, v, domain, ebuffery=ebuffer, & @@ -1170,8 +1161,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, if ( .not. flagstruct%regional .and. it/=n_split) & call start_group_halo_update(i_pack(8), u, v, domain, gridtype=DGRID_NE) #endif - - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') #ifdef SW_DYNAMICS endif @@ -1351,6 +1341,10 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, endif if (allocated(heat_source)) deallocate( heat_source ) !If ncon == 0 but d_con > 1.e-5, this would not be deallocated in earlier versions of the code + if ((.not. hydrostatic) .and. w_forcing .and. present(time_total)) then + call do_w_forcing(bd, npx, npy, npz, w, delz, phis, & + flagstruct%grid_type, gridstruct%agrid, domain, flagstruct, bdt, time_total) + endif if ( end_step ) then deallocate( gz ) @@ -2671,5 +2665,66 @@ subroutine gz_bc(gz,delzBC,bd,npx,npy,npz,step,split) end subroutine gz_bc + !routine to compute vertical gradients in winds + ! for 2D smag damping + ! Call AFTER updating gz + !TODO needs cubed-sphere support (don't compute in corners) + subroutine compute_dudz(bd, npz, u, v, dudz, dvdz, gz, dp_ref) + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npz + real, intent(in) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) + real, intent(in) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz) + real, intent(in) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) + real, intent(IN) :: dp_ref(npz) + real, intent(OUT) :: dudz(bd%isd:bd%ied,bd%jsd:bd%jed+1,npz) + real, intent(OUT) :: dvdz(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) + + real :: dz + real :: ue(bd%isd:bd%ied ,npz+1) + real :: ve(bd%isd:bd%ied+1,npz+1) + integer :: i,j,k + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + dudz = -1.e50 + dvdz = -1.e50 + + do j=jsd,jed + + !TODO: pass by reference and not copy + call edge_profile1(v(isd:ied+1,j,:), ve, isd, ied+1, npz, dp_ref, 0) + do k=1,npz + do i=isd+1,ied + dz = gz(i,j,k) + gz(i-1,j,k) + dz = dz - (gz(i,j,k+1) + gz(i-1,j,k+1)) + dz = 0.5*dz*rgrav + dvdz(i,j,k) = (ve(i,k)-ve(i,k+1))/dz + enddo + enddo + enddo + + do j=jsd+1,jed + call edge_profile1(u(isd:ied,j,:), ue, isd, ied, npz, dp_ref, 0) + do k=1,npz + do i=isd,ied + dz = gz(i,j,k) + gz(i,j-1,k) + dz = dz - (gz(i,j,k+1) + gz(i,j-1,k+1)) + dz = 0.5*dz*rgrav + dudz(i,j,k) = (ue(i,k)-ue(i,k+1))/dz + enddo + enddo + enddo + + + end subroutine compute_dudz end module dyn_core_mod diff --git a/model/fast_phys.F90 b/model/fast_phys.F90 index 0b0eb5ebf..721a47716 100644 --- a/model/fast_phys.F90 +++ b/model/fast_phys.F90 @@ -31,7 +31,6 @@ module fast_phys_mod use fv_grid_utils_mod, only: cubed_to_latlon, update_dwinds_phys use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type use mpp_domains_mod, only: domain2d, mpp_update_domains - use fv_timing_mod, only: timing_on, timing_off use tracer_manager_mod, only: get_tracer_index, get_tracer_names use field_manager_mod, only: model_atmos use gfdl_mp_mod, only: mtetw diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index 856efa183..dd1038d67 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -282,6 +282,7 @@ module fv_arrays_mod real :: scale_z = 0. !< diff_z = scale_z**2 * 0.25 (only used for Riemann solver) real :: w_max = 75. !< Not used. real :: z_min = 0.05 !< Not used. + real :: d2bg_zq = 0.0 !< Implicit vertical diffusion for scalars (currently vertical velocity only) real :: lim_fac = 1.0 !< linear scheme limiting factor when using hord = 1. 1: hord = 5, 3: hord = 6 integer :: nord=1 !< Order of divergence damping: 0 for second-order; 1 for fourth-order @@ -294,6 +295,7 @@ module fv_arrays_mod real :: dddmp = 0.0 !< Dimensionless coefficient for the second-order Smagorinsky-type !< divergence damping. The default is value is 0.0. 0.2 !< (the Smagorinsky constant) is recommended if ICs are noisy. + real :: smag2d = 0.0 !< Dimensionless coefficient for 2d smag damping. Experimental!! real :: d2_bg = 0.0 !< Coefficient for explicit second-order divergence damping. !< This option remains active even if nord is nonzero. The default !< value is 0.0. The proper range is 0 to 0.02, with 0 strongly recommended @@ -362,14 +364,19 @@ module fv_arrays_mod !< value as do_sat_adj in gfdl_mp_nml. Not compatible with other microphysics !< schemes. Also requires GFDL microphysics be installed within the physics driver. logical :: consv_checker = .false.!< turn on energy and water conservation checker - logical :: do_fast_phys = .false.!< Controls fast physics, in which the SA-TKE-EDMF and part of the GWD are - !< within the acoustic time step of FV3. If .true. disabling the SA-TKE-EDMF + logical :: do_fast_phys = .false.!< Controls fast physics, in which the SA-TKE-EDMF and part of the GWD are + !< within the acoustic time step of FV3. If .true. disabling the SA-TKE-EDMF + !< and part of the GWD in the intermediate physics. + logical :: do_intermediate_phys = .true.!< Controls intermediate physics, in which the GFDL MP, SA-SAS and part of the GWD are + !< within the remapping time step of FV3. If .false. disabling the GFDL MP, SA-SAS !< and part of the GWD in the intermediate physics. logical :: do_inline_mp = .false.!< Controls Inline GFDL cloud microphysics, in which the full microphysics is !< called entirely within FV3. If .true. disabling microphysics within the physics !< is very strongly recommended. .false. by default. logical :: do_aerosol = .false. !< Controls climatological aerosol data used in the GFDL cloud microphyiscs. !< .false. by default. + logical :: do_cosp = .false. !< Controls COSP + !< .false. by default. logical :: do_f3d = .false. ! logical :: no_dycore = .false. !< Disables execution of the dynamical core, only running !< the initialization, diagnostic, and I/O routines, and @@ -807,6 +814,8 @@ module fv_arrays_mod logical :: do_diss_est = .false. !< compute and save dissipation estimate logical :: ecmwf_ic = .false. !< If external_ic = .true., reads initial conditions from ECMWF analyses. !< The default is .false. + logical :: use_gfsO3 = .false. ! only work when "ecmwf_ic = .T.". + ! Need to be 'true', when the IFS IC does not include O3 data. logical :: gfs_phil = .false. !< if .T., compute geopotential inside of GFS physics (not used?) logical :: agrid_vel_rst = .false. !< Whether to write the unstaggered latitude-longitude winds !< (ua and va) to the restart files. This is useful for data @@ -826,6 +835,7 @@ module fv_arrays_mod !< from either the restart file (if restarting) or from the external initial !< condition file (if nggps_ic or ecwmf_ic are .true.). This overrides the !< hard-coded levels in fv_eta. The default is .false. + logical :: is_ideal_case = .false. !< if .T., this is an ideal test case logical :: read_increment = .false. !< read in analysis increment and add to restart ! Default restart files from the "Memphis" latlon FV core: character(len=128) :: res_latlon_dynamics = 'INPUT/fv_rst.res.nc' !< If external_ic =.true.gives the filename of the @@ -895,6 +905,8 @@ module fv_arrays_mod real(kind=R_GRID) :: deglat=15. !< Latitude (in degrees) used to compute the uniform f-plane !< Coriolis parameter for doubly-periodic simulations !< (grid_type = 4). The default value is 15. + real(kind=R_GRID) :: domain_deg = 0. + !The following deglat_*, deglon_* options are not used. real(kind=R_GRID) :: deglon_start = -30., deglon_stop = 30., & !< boundaries of latlon patch deglat_start = -30., deglat_stop = 30. @@ -1048,36 +1060,6 @@ module fv_arrays_mod real, _ALLOCATABLE :: prefluxi(:,:,:) _NULL real, _ALLOCATABLE :: prefluxs(:,:,:) _NULL real, _ALLOCATABLE :: prefluxg(:,:,:) _NULL - real, _ALLOCATABLE :: cond(:,:) _NULL - real, _ALLOCATABLE :: dep(:,:) _NULL - real, _ALLOCATABLE :: reevap(:,:) _NULL - real, _ALLOCATABLE :: sub(:,:) _NULL - - real, _ALLOCATABLE :: pcw(:,:,:) _NULL - real, _ALLOCATABLE :: edw(:,:,:) _NULL - real, _ALLOCATABLE :: oew(:,:,:) _NULL - real, _ALLOCATABLE :: rrw(:,:,:) _NULL - real, _ALLOCATABLE :: tvw(:,:,:) _NULL - real, _ALLOCATABLE :: pci(:,:,:) _NULL - real, _ALLOCATABLE :: edi(:,:,:) _NULL - real, _ALLOCATABLE :: oei(:,:,:) _NULL - real, _ALLOCATABLE :: rri(:,:,:) _NULL - real, _ALLOCATABLE :: tvi(:,:,:) _NULL - real, _ALLOCATABLE :: pcr(:,:,:) _NULL - real, _ALLOCATABLE :: edr(:,:,:) _NULL - real, _ALLOCATABLE :: oer(:,:,:) _NULL - real, _ALLOCATABLE :: rrr(:,:,:) _NULL - real, _ALLOCATABLE :: tvr(:,:,:) _NULL - real, _ALLOCATABLE :: pcs(:,:,:) _NULL - real, _ALLOCATABLE :: eds(:,:,:) _NULL - real, _ALLOCATABLE :: oes(:,:,:) _NULL - real, _ALLOCATABLE :: rrs(:,:,:) _NULL - real, _ALLOCATABLE :: tvs(:,:,:) _NULL - real, _ALLOCATABLE :: pcg(:,:,:) _NULL - real, _ALLOCATABLE :: edg(:,:,:) _NULL - real, _ALLOCATABLE :: oeg(:,:,:) _NULL - real, _ALLOCATABLE :: rrg(:,:,:) _NULL - real, _ALLOCATABLE :: tvg(:,:,:) _NULL real, _ALLOCATABLE :: qv_dt(:,:,:) real, _ALLOCATABLE :: ql_dt(:,:,:) @@ -1130,6 +1112,8 @@ module fv_arrays_mod type coarse_restart_type + real, _ALLOCATABLE :: u0(:,:,:) + real, _ALLOCATABLE :: v0(:,:,:) real, _ALLOCATABLE :: u(:,:,:) real, _ALLOCATABLE :: v(:,:,:) real, _ALLOCATABLE :: w(:,:,:) @@ -1251,6 +1235,8 @@ module fv_arrays_mod ! ! The C grid component is "diagnostic" in that it is predicted every time step ! from the D grid variables. + real, _ALLOCATABLE :: u0(:,:,:) _NULL !< initial (t=0) D grid zonal wind (m/s) + real, _ALLOCATABLE :: v0(:,:,:) _NULL !< initial (t=0) D grid meridional wind (m/s) real, _ALLOCATABLE :: u(:,:,:) _NULL !< D grid zonal wind (m/s) real, _ALLOCATABLE :: v(:,:,:) _NULL !< D grid meridional wind (m/s) real, _ALLOCATABLE :: pt(:,:,:) _NULL !< temperature (K) @@ -1487,6 +1473,13 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie Atm%flagstruct%ndims = ndims_in + if (Atm%flagstruct%is_ideal_case) then + allocate ( Atm%u0(isd:ied ,jsd:jed+1,npz) ) + allocate ( Atm%v0(isd:ied+1,jsd:jed ,npz) ) + else + allocate ( Atm%u0(isd:isd,jsd:jsd,1) ) + allocate ( Atm%v0(isd:isd,jsd:jsd,1) ) + endif allocate ( Atm%u(isd:ied ,jsd:jed+1,npz) ) allocate ( Atm%v(isd:ied+1,jsd:jed ,npz) ) @@ -1533,45 +1526,18 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie allocate ( Atm%ak(npz_2d+1) ) allocate ( Atm%bk(npz_2d+1) ) - allocate ( Atm%inline_mp%prew(is:ie,js:je) ) - allocate ( Atm%inline_mp%prer(is:ie,js:je) ) - allocate ( Atm%inline_mp%prei(is:ie,js:je) ) - allocate ( Atm%inline_mp%pres(is:ie,js:je) ) - allocate ( Atm%inline_mp%preg(is:ie,js:je) ) - allocate ( Atm%inline_mp%prefluxw(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%prefluxr(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%prefluxi(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%prefluxs(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%prefluxg(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%cond(is:ie,js:je) ) - allocate ( Atm%inline_mp%dep(is:ie,js:je) ) - allocate ( Atm%inline_mp%reevap(is:ie,js:je) ) - allocate ( Atm%inline_mp%sub(is:ie,js:je) ) - allocate ( Atm%inline_mp%pcw(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%edw(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%oew(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%rrw(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%tvw(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%pci(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%edi(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%oei(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%rri(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%tvi(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%pcr(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%edr(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%oer(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%rrr(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%tvr(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%pcs(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%eds(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%oes(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%rrs(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%tvs(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%pcg(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%edg(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%oeg(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%rrg(is:ie,js:je,npz) ) - allocate ( Atm%inline_mp%tvg(is:ie,js:je,npz) ) + if (Atm%flagstruct%do_inline_mp) then + allocate ( Atm%inline_mp%prew(is:ie,js:je) ) + allocate ( Atm%inline_mp%prer(is:ie,js:je) ) + allocate ( Atm%inline_mp%prei(is:ie,js:je) ) + allocate ( Atm%inline_mp%pres(is:ie,js:je) ) + allocate ( Atm%inline_mp%preg(is:ie,js:je) ) + allocate ( Atm%inline_mp%prefluxw(is:ie,js:je,npz) ) + allocate ( Atm%inline_mp%prefluxr(is:ie,js:je,npz) ) + allocate ( Atm%inline_mp%prefluxi(is:ie,js:je,npz) ) + allocate ( Atm%inline_mp%prefluxs(is:ie,js:je,npz) ) + allocate ( Atm%inline_mp%prefluxg(is:ie,js:je,npz) ) + endif !-------------------------- ! Non-hydrostatic dynamics: @@ -1613,12 +1579,18 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie enddo do j=jsd, jed+1 do i=isd, ied + if (Atm%flagstruct%is_ideal_case) then + Atm%u0(i,j,k) = 0. + endif Atm%u(i,j,k) = 0. Atm%vc(i,j,k) = real_big enddo enddo do j=jsd, jed do i=isd, ied+1 + if (Atm%flagstruct%is_ideal_case) then + Atm%v0(i,j,k) = 0. + endif Atm%v(i,j,k) = 0. Atm%uc(i,j,k) = real_big enddo @@ -1650,48 +1622,25 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie enddo enddo enddo + if (Atm%flagstruct%do_inline_mp) then + do j=js, je + do i=is, ie + Atm%inline_mp%prew(i,j) = real_big + Atm%inline_mp%prer(i,j) = real_big + Atm%inline_mp%prei(i,j) = real_big + Atm%inline_mp%pres(i,j) = real_big + Atm%inline_mp%preg(i,j) = real_big + Atm%inline_mp%prefluxw(i,j,:) = real_big + Atm%inline_mp%prefluxr(i,j,:) = real_big + Atm%inline_mp%prefluxi(i,j,:) = real_big + Atm%inline_mp%prefluxs(i,j,:) = real_big + Atm%inline_mp%prefluxg(i,j,:) = real_big + enddo + enddo + endif + do j=js, je do i=is, ie - Atm%inline_mp%prew(i,j) = real_big - Atm%inline_mp%prer(i,j) = real_big - Atm%inline_mp%prei(i,j) = real_big - Atm%inline_mp%pres(i,j) = real_big - Atm%inline_mp%preg(i,j) = real_big - Atm%inline_mp%prefluxw(i,j,:) = real_big - Atm%inline_mp%prefluxr(i,j,:) = real_big - Atm%inline_mp%prefluxi(i,j,:) = real_big - Atm%inline_mp%prefluxs(i,j,:) = real_big - Atm%inline_mp%prefluxg(i,j,:) = real_big - Atm%inline_mp%cond(i,j) = real_big - Atm%inline_mp%dep(i,j) = real_big - Atm%inline_mp%reevap(i,j) = real_big - Atm%inline_mp%sub(i,j) = real_big - Atm%inline_mp%pcw(i,j,:) = real_big - Atm%inline_mp%edw(i,j,:) = real_big - Atm%inline_mp%oew(i,j,:) = real_big - Atm%inline_mp%rrw(i,j,:) = real_big - Atm%inline_mp%tvw(i,j,:) = real_big - Atm%inline_mp%pci(i,j,:) = real_big - Atm%inline_mp%edi(i,j,:) = real_big - Atm%inline_mp%oei(i,j,:) = real_big - Atm%inline_mp%rri(i,j,:) = real_big - Atm%inline_mp%tvi(i,j,:) = real_big - Atm%inline_mp%pcr(i,j,:) = real_big - Atm%inline_mp%edr(i,j,:) = real_big - Atm%inline_mp%oer(i,j,:) = real_big - Atm%inline_mp%rrr(i,j,:) = real_big - Atm%inline_mp%tvr(i,j,:) = real_big - Atm%inline_mp%pcs(i,j,:) = real_big - Atm%inline_mp%eds(i,j,:) = real_big - Atm%inline_mp%oes(i,j,:) = real_big - Atm%inline_mp%rrs(i,j,:) = real_big - Atm%inline_mp%tvs(i,j,:) = real_big - Atm%inline_mp%pcg(i,j,:) = real_big - Atm%inline_mp%edg(i,j,:) = real_big - Atm%inline_mp%oeg(i,j,:) = real_big - Atm%inline_mp%rrg(i,j,:) = real_big - Atm%inline_mp%tvg(i,j,:) = real_big - Atm%ts(i,j) = 300. Atm%phis(i,j) = real_big @@ -1899,7 +1848,7 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie Atm%gridstruct%grid_type => Atm%flagstruct%grid_type Atm%flagstruct%grid_number => Atm%grid_number Atm%gridstruct%regional => Atm%flagstruct%regional - Atm%gridstruct%bounded_domain = Atm%flagstruct%regional .or. Atm%neststruct%nested + Atm%gridstruct%bounded_domain = Atm%flagstruct%regional .or. Atm%neststruct%nested .or. (Atm%flagstruct%grid_type == 4) if (Atm%neststruct%nested) Atm%neststruct%parent_grid => Atm%parent_grid Atm%allocated = .true. @@ -1915,6 +1864,8 @@ subroutine deallocate_fv_atmos_type(Atm) integer :: n if (.not.Atm%allocated) return + deallocate ( Atm%u0 ) + deallocate ( Atm%v0 ) deallocate ( Atm%u ) deallocate ( Atm%v ) deallocate ( Atm%pt ) @@ -1941,45 +1892,18 @@ subroutine deallocate_fv_atmos_type(Atm) deallocate ( Atm%bk ) deallocate ( Atm%diss_est ) - deallocate ( Atm%inline_mp%prew ) - deallocate ( Atm%inline_mp%prer ) - deallocate ( Atm%inline_mp%prei ) - deallocate ( Atm%inline_mp%pres ) - deallocate ( Atm%inline_mp%preg ) - deallocate ( Atm%inline_mp%prefluxw ) - deallocate ( Atm%inline_mp%prefluxr ) - deallocate ( Atm%inline_mp%prefluxi ) - deallocate ( Atm%inline_mp%prefluxs ) - deallocate ( Atm%inline_mp%prefluxg ) - deallocate ( Atm%inline_mp%cond ) - deallocate ( Atm%inline_mp%dep ) - deallocate ( Atm%inline_mp%reevap ) - deallocate ( Atm%inline_mp%sub ) - deallocate ( Atm%inline_mp%pcw ) - deallocate ( Atm%inline_mp%edw ) - deallocate ( Atm%inline_mp%oew ) - deallocate ( Atm%inline_mp%rrw ) - deallocate ( Atm%inline_mp%tvw ) - deallocate ( Atm%inline_mp%pci ) - deallocate ( Atm%inline_mp%edi ) - deallocate ( Atm%inline_mp%oei ) - deallocate ( Atm%inline_mp%rri ) - deallocate ( Atm%inline_mp%tvi ) - deallocate ( Atm%inline_mp%pcr ) - deallocate ( Atm%inline_mp%edr ) - deallocate ( Atm%inline_mp%oer ) - deallocate ( Atm%inline_mp%rrr ) - deallocate ( Atm%inline_mp%tvr ) - deallocate ( Atm%inline_mp%pcs ) - deallocate ( Atm%inline_mp%eds ) - deallocate ( Atm%inline_mp%oes ) - deallocate ( Atm%inline_mp%rrs ) - deallocate ( Atm%inline_mp%tvs ) - deallocate ( Atm%inline_mp%pcg ) - deallocate ( Atm%inline_mp%edg ) - deallocate ( Atm%inline_mp%oeg ) - deallocate ( Atm%inline_mp%rrg ) - deallocate ( Atm%inline_mp%tvg ) + if (Atm%flagstruct%do_inline_mp) then + deallocate ( Atm%inline_mp%prew ) + deallocate ( Atm%inline_mp%prer ) + deallocate ( Atm%inline_mp%prei ) + deallocate ( Atm%inline_mp%pres ) + deallocate ( Atm%inline_mp%preg ) + deallocate ( Atm%inline_mp%prefluxw ) + deallocate ( Atm%inline_mp%prefluxr ) + deallocate ( Atm%inline_mp%prefluxi ) + deallocate ( Atm%inline_mp%prefluxs ) + deallocate ( Atm%inline_mp%prefluxg ) + endif deallocate ( Atm%u_srf ) deallocate ( Atm%v_srf ) @@ -2293,4 +2217,3 @@ end subroutine deallocate_fv_nest_BC_type_3d end module fv_arrays_mod - diff --git a/model/fv_control.F90 b/model/fv_control.F90 index f9d171dd9..ebace0a44 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -34,7 +34,7 @@ module fv_control_mod use mpp_mod, only: FATAL, mpp_error, mpp_pe, stdlog, & mpp_npes, mpp_get_current_pelist, & input_nml_file, get_unit, WARNING, & - read_ascii_file + read_ascii_file, NOTE use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_tile_id use tracer_manager_mod, only: tm_get_number_tracers => get_number_tracers, & tm_get_tracer_index => get_tracer_index, & @@ -55,7 +55,6 @@ module fv_control_mod use fv_mp_mod, only: broadcast_domains, mp_barrier, is_master, setup_master, grids_master_procs, tile_fine use fv_mp_mod, only: MAX_NNEST, MAX_NTILE use test_cases_mod, only: read_namelist_test_case_nml - use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use mpp_domains_mod, only: domain2D use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type, mpp_get_global_domain use mpp_domains_mod, only: mpp_get_C2F_index, mpp_get_F2C_index @@ -143,11 +142,13 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) real , pointer :: scale_z real , pointer :: w_max real , pointer :: z_min + real , pointer :: d2bg_zq real , pointer :: lim_fac integer , pointer :: nord integer , pointer :: nord_tr real , pointer :: dddmp + real , pointer :: smag2d real , pointer :: d2_bg real , pointer :: d4_bg real , pointer :: vtdm4 @@ -167,8 +168,10 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) logical , pointer :: do_sat_adj logical , pointer :: consv_checker logical , pointer :: do_fast_phys + logical , pointer :: do_intermediate_phys logical , pointer :: do_inline_mp logical , pointer :: do_aerosol + logical , pointer :: do_cosp logical , pointer :: do_f3d logical , pointer :: no_dycore logical , pointer :: convert_ke @@ -267,6 +270,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) logical , pointer :: nggps_ic logical , pointer :: hrrrv3_ic logical , pointer :: ecmwf_ic + logical , pointer :: use_gfsO3 logical , pointer :: gfs_phil logical , pointer :: agrid_vel_rst logical , pointer :: use_new_ncep @@ -274,6 +278,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) logical , pointer :: fv_diag_ic logical , pointer :: external_ic logical , pointer :: external_eta + logical , pointer :: is_ideal_case logical , pointer :: read_increment logical , pointer :: hydrostatic logical , pointer :: phys_hydrostatic @@ -296,6 +301,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) real(kind=R_GRID), pointer :: deglon_start, deglon_stop, & ! boundaries of latlon patch deglat_start, deglat_stop real(kind=R_GRID), pointer :: deglat + real(kind=R_GRID), pointer :: domain_deg logical, pointer :: nested, twowaynest logical, pointer :: regional, write_restart_with_bcs, regional_bcs_from_gsi @@ -444,10 +450,6 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) endif endif - ! 3pre. - call timing_init - call timing_on('TOTAL') - ! 3. Read namelists, do option processing and I/O call set_namelist_pointers(Atm(this_grid)) @@ -456,6 +458,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) call read_namelist_fv_grid_nml call read_namelist_fv_core_nml(Atm(this_grid)) ! do options processing here too? call read_namelist_test_case_nml + call read_namelist_integ_phys_nml call mpp_get_current_pelist(Atm(this_grid)%pelist, commID=commID) ! for commID call mp_start(commID,halo_update_type) @@ -680,10 +683,12 @@ subroutine set_namelist_pointers(Atm) scale_z => Atm%flagstruct%scale_z w_max => Atm%flagstruct%w_max z_min => Atm%flagstruct%z_min + d2bg_zq => Atm%flagstruct%d2bg_zq lim_fac => Atm%flagstruct%lim_fac nord => Atm%flagstruct%nord nord_tr => Atm%flagstruct%nord_tr dddmp => Atm%flagstruct%dddmp + smag2d => Atm%flagstruct%smag2d d2_bg => Atm%flagstruct%d2_bg d4_bg => Atm%flagstruct%d4_bg vtdm4 => Atm%flagstruct%vtdm4 @@ -702,8 +707,10 @@ subroutine set_namelist_pointers(Atm) do_sat_adj => Atm%flagstruct%do_sat_adj consv_checker => Atm%flagstruct%consv_checker do_fast_phys => Atm%flagstruct%do_fast_phys + do_intermediate_phys => Atm%flagstruct%do_intermediate_phys do_inline_mp => Atm%flagstruct%do_inline_mp do_aerosol => Atm%flagstruct%do_aerosol + do_cosp => Atm%flagstruct%do_cosp do_f3d => Atm%flagstruct%do_f3d no_dycore => Atm%flagstruct%no_dycore convert_ke => Atm%flagstruct%convert_ke @@ -798,6 +805,7 @@ subroutine set_namelist_pointers(Atm) nggps_ic => Atm%flagstruct%nggps_ic hrrrv3_ic => Atm%flagstruct%hrrrv3_ic ecmwf_ic => Atm%flagstruct%ecmwf_ic + use_gfsO3 => Atm%flagstruct%use_gfsO3 gfs_phil => Atm%flagstruct%gfs_phil agrid_vel_rst => Atm%flagstruct%agrid_vel_rst use_new_ncep => Atm%flagstruct%use_new_ncep @@ -805,6 +813,7 @@ subroutine set_namelist_pointers(Atm) fv_diag_ic => Atm%flagstruct%fv_diag_ic external_ic => Atm%flagstruct%external_ic external_eta => Atm%flagstruct%external_eta + is_ideal_case => Atm%flagstruct%is_ideal_case read_increment => Atm%flagstruct%read_increment hydrostatic => Atm%flagstruct%hydrostatic @@ -829,6 +838,7 @@ subroutine set_namelist_pointers(Atm) deglat_stop => Atm%flagstruct%deglat_stop deglat => Atm%flagstruct%deglat + domain_deg => Atm%flagstruct%domain_deg nested => Atm%neststruct%nested twowaynest => Atm%neststruct%twowaynest @@ -919,23 +929,25 @@ subroutine read_namelist_fv_core_nml(Atm) character(len=128) :: res_latlon_dynamics = '' character(len=128) :: res_latlon_tracers = '' + character(len=72) :: err_str + namelist /fv_core_nml/npx, npy, ntiles, npz, npz_type, fv_eta_file, npz_rst, layout, io_layout, ncnst, nwat, & use_logp, p_fac, a_imp, k_split, n_split, m_split, q_split, print_freq, write_3d_diags, & do_schmidt, do_cube_transform, & hord_mt, hord_vt, hord_tm, hord_dp, hord_tr, shift_fac, stretch_fac, target_lat, target_lon, & - kord_mt, kord_wz, kord_tm, kord_tr, remap_te, fv_debug, fv_land, consv_checker, & - do_am4_remap, nudge, do_sat_adj, do_fast_phys, do_inline_mp, do_aerosol, do_f3d, & - external_ic, read_increment, ncep_ic, nggps_ic, hrrrv3_ic, ecmwf_ic, use_new_ncep, use_ncep_phy, fv_diag_ic, & - external_eta, res_latlon_dynamics, res_latlon_tracers, scale_z, w_max, z_min, lim_fac, & - dddmp, d2_bg, d4_bg, vtdm4, trdm2, d_ext, delt_max, beta, non_ortho, n_sponge, & + kord_mt, kord_wz, kord_tm, kord_tr, remap_te, fv_debug, fv_land, & + do_am4_remap, nudge, do_f3d, external_ic, is_ideal_case, read_increment, & + ncep_ic, nggps_ic, hrrrv3_ic, ecmwf_ic, use_gfsO3, use_new_ncep, use_ncep_phy, fv_diag_ic, & + external_eta, res_latlon_dynamics, res_latlon_tracers, scale_z, w_max, z_min, d2bg_zq, lim_fac, & + dddmp, smag2d, d2_bg, d4_bg, vtdm4, trdm2, d_ext, delt_max, beta, non_ortho, n_sponge, & warm_start, adjust_dry_mass, mountain, d_con, ke_bg, nord, nord_tr, convert_ke, use_old_omega, & dry_mass, grid_type, do_Held_Suarez, & consv_te, fill, filter_phys, fill_dp, fill_wz, fill_gfs, consv_am, RF_fast, & range_warn, dwind_2d, inline_q, z_tracer, reproduce_sum, adiabatic, do_vort_damp, no_dycore, & - tau, fast_tau_w_sec, tau_h2o, rf_cutoff, te_err, tw_err, nf_omega, hydrostatic, fv_sg_adj, sg_cutoff, breed_vortex_inline, & + tau, fast_tau_w_sec, tau_h2o, rf_cutoff, nf_omega, hydrostatic, fv_sg_adj, sg_cutoff, breed_vortex_inline, & na_init, nudge_dz, hybrid_z, Make_NH, n_zs_filter, nord_zs_filter, full_zs_filter, reset_eta, & pnats, dnats, dnrts, a2b_ord, remap_t, p_ref, d2_bg_k1, d2_bg_k2, & - c2l_ord, dx_const, dy_const, umax, deglat, & + c2l_ord, dx_const, dy_const, umax, deglat, domain_deg, & deglon_start, deglon_stop, deglat_start, deglat_stop, & phys_hydrostatic, use_hydro_pressure, make_hybrid_z, old_divg_damp, add_noise, & nested, twowaynest, nudge_qv, & @@ -1061,8 +1073,52 @@ subroutine read_namelist_fv_core_nml(Atm) target_lon = target_lon * pi/180. target_lat = target_lat * pi/180. + !Checks for deprecated options + if (abs(kord_tr) <= 7) then + write(err_str,'(A, i2, A)') "**DEPRECATED KORD_TR = ", kord_tr, "**" + call mpp_error(NOTE, trim(err_str)) + call mpp_error(NOTE, " The old PPM remapping operators will be removed in a future release.") + endif + if (.not. hydrostatic .and. abs(kord_wz) <= 7) then + write(err_str,'(A, i2, A)') "**DEPRECATED KORD_WZ = ", kord_wz, "**" + call mpp_error(NOTE, trim(err_str)) + call mpp_error(NOTE, " The old PPM remapping operators will be removed in a future release.") + endif + if (abs(kord_tm) <= 7) then + write(err_str,'(A, i2, A)') "**DEPRECATED KORD_TM = ", kord_tm, "**" + call mpp_error(NOTE, trim(err_str)) + call mpp_error(NOTE, " The old PPM remapping operators will be removed in a future release.") + endif + if (abs(kord_mt) <= 7) then + write(err_str,'(A, i2, A)') "**DEPRECATED KORD_MT = ", kord_mt, "**" + call mpp_error(NOTE, trim(err_str)) + call mpp_error(NOTE, " The old PPM remapping operators will be removed in a future release.") + endif + + if (do_am4_remap) then + call mpp_error(NOTE, "** DEPRECATED DO_AM4_REMAP **") + call mpp_error(NOTE, " This switch is no longer necessary because the AM4 kord=10 has been") + call mpp_error(NOTE, " restored to normal operation.") + endif + + end subroutine read_namelist_fv_core_nml + subroutine read_namelist_integ_phys_nml + + integer :: ios, ierr + namelist /integ_phys_nml/ do_sat_adj, do_fast_phys, do_intermediate_phys, do_inline_mp, do_aerosol, do_cosp, consv_checker, te_err, tw_err + + read (input_nml_file,integ_phys_nml,iostat=ios) + ierr = check_nml_error(ios,'integ_phys_nml') + + call write_version_number ( 'FV_CONTROL_MOD', version ) + unit = stdlog() + write(unit, nml=integ_phys_nml) + + !Basic option processing + end subroutine read_namelist_integ_phys_nml + subroutine setup_update_regions integer :: isu, ieu, jsu, jeu ! update regions for centered variables @@ -1186,9 +1242,6 @@ subroutine fv_end(Atm, this_grid) integer :: n - call timing_off('TOTAL') - call timing_prt( mpp_pe() ) - call fv_restart_end(Atm(this_grid)) call fv_io_exit() diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index cfc257055..57f0623c9 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -32,7 +32,7 @@ module fv_dynamics_mod use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update use fv_timing_mod, only: timing_on, timing_off use diag_manager_mod, only: send_data - use fv_diagnostics_mod, only: fv_time, prt_mxm, range_check, prt_minmax, is_ideal_case + use fv_diagnostics_mod, only: fv_time, prt_mxm, range_check, prt_minmax use mpp_domains_mod, only: DGRID_NE, CGRID_NE, mpp_update_domains, domain2D use mpp_mod, only: mpp_pe use field_manager_mod, only: MODEL_ATMOS @@ -73,7 +73,7 @@ module fv_dynamics_mod subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, & - q_split, u, v, w, delz, hydrostatic, pt, delp, q, & + q_split, u0, v0, u, v, w, delz, hydrostatic, pt, delp, q, & ps, pe, pk, peln, pkz, phis, q_con, omga, ua, va, uc, vc, & ak, bk, mfx, mfy, cx, cy, ze0, hybrid_z, & gridstruct, flagstruct, neststruct, idiag, bd, & @@ -100,6 +100,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, logical, intent(IN) :: hybrid_z ! Using hybrid_z for remapping type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(inout), dimension(bd%isd:,bd%jsd:,1:) :: u0 ! initial (t=0) D grid zonal wind (m/s) + real, intent(inout), dimension(bd%isd:,bd%jsd:,1:) :: v0 ! initial (t=0) D grid meridional wind (m/s) real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u ! D grid zonal wind (m/s) real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v ! D grid meridional wind (m/s) real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:) ! W (m/s) @@ -159,7 +161,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, real:: pfull(npz) real, dimension(bd%is:bd%ie):: cvm real, allocatable :: dp1(:,:,:), cappa(:,:,:) - real:: akap, rdg, ph1, ph2, mdt, gam, amdt, u0 + real:: akap, rdg, ph1, ph2, mdt, gam, amdt, u00 real:: recip_k_split,reg_bc_update_time integer:: kord_tracer(ncnst) integer :: i,j,k, n, iq, n_map, nq, nr, nwat, k_split @@ -204,7 +206,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, !We call this BEFORE converting pt to virtual potential temperature, !since we interpolate on (regular) temperature rather than theta. if (gridstruct%nested .or. ANY(neststruct%child_grids)) then - call timing_on('NEST_BCs') + call timing_on('NEST_BCs') + call setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & u, v, w, pt, delp, delz, q, uc, vc, & #ifdef USE_COND @@ -218,7 +221,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, neststruct%nest_timestep, neststruct%tracer_nest_timestep, & domain, parent_grid, bd, nwat, ak, bk) - call timing_off('NEST_BCs') + call timing_off('NEST_BCs') endif ! For the regional domain set values valid the beginning of the @@ -376,14 +379,14 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, endif if( .not.flagstruct%RF_fast .and. flagstruct%tau > 0. ) then - if ( gridstruct%grid_type<4 .or. gridstruct%bounded_domain .or. is_ideal_case ) then + if ( gridstruct%grid_type<4 .or. gridstruct%bounded_domain .or. flagstruct%is_ideal_case ) then ! if ( flagstruct%RF_fast ) then ! call Ray_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, & ! dp_ref, ptop, hydrostatic, flagstruct%rf_cutoff, bd) ! else - call Rayleigh_Super(abs(bdt), npx, npy, npz, ks, pfull, phis, flagstruct%tau, u, v, w, pt, & + call Rayleigh_Super(abs(bdt), npx, npy, npz, ks, pfull, phis, flagstruct%tau, u0, v0, u, v, w, pt, & ua, va, delz, gridstruct%agrid, cp_air, rdgas, ptop, hydrostatic, & - .not. (gridstruct%bounded_domain .or. is_ideal_case), flagstruct%rf_cutoff, gridstruct, domain, bd) + .not. (gridstruct%bounded_domain .or. flagstruct%is_ideal_case), flagstruct%rf_cutoff, gridstruct, domain, bd, flagstruct%is_ideal_case) ! endif else call Rayleigh_Friction(abs(bdt), npx, npy, npz, ks, pfull, flagstruct%tau, u, v, w, pt, & @@ -421,10 +424,6 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, inline_mp%prefluxi = 0.0 inline_mp%prefluxs = 0.0 inline_mp%prefluxg = 0.0 - inline_mp%cond = 0.0 - inline_mp%dep = 0.0 - inline_mp%reevap = 0.0 - inline_mp%sub = 0.0 if (allocated(inline_mp%qv_dt)) inline_mp%qv_dt = 0.0 if (allocated(inline_mp%ql_dt)) inline_mp%ql_dt = 0.0 if (allocated(inline_mp%qi_dt)) inline_mp%qi_dt = 0.0 @@ -438,10 +437,11 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if (allocated(inline_mp%v_dt)) inline_mp%v_dt = 0.0 endif - call timing_on('FV_DYN_LOOP') + call timing_on('FV_DYN_LOOP') + do n_map=1, k_split ! first level of time-split k_step = n_map - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') #ifdef USE_COND call start_group_halo_update(i_pack(11), q_con, domain) #ifdef MOIST_CAPPA @@ -453,7 +453,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #ifndef ROT3 call start_group_halo_update(i_pack(8), u, v, domain, gridtype=DGRID_NE) #endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,dp1,delp) do k=1,npz do j=jsd,jed @@ -462,30 +462,26 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, enddo enddo enddo - if ( flagstruct%trdm2 > 1.e-4 ) then - call start_group_halo_update(i_pack(13), dp1, domain) - endif if ( n_map==k_split ) last_step = .true. #ifdef USE_COND - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(11), domain) #ifdef MOIST_CAPPA call complete_group_halo_update(i_pack(12), domain) #endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') #endif - call timing_on('DYN_CORE') + call timing_on('DYN_CORE') call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, n_map, n_split, zvir, cp_air, akap, cappa, grav, hydrostatic, & u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, & uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, & gridstruct, flagstruct, neststruct, idiag, bd, & domain, n_map==1, i_pack, last_step, diss_est, & consv_te, te_2d, time_total) - call timing_off('DYN_CORE') - + call timing_off('DYN_CORE') #ifdef SW_DYNAMICS !!$OMP parallel do default(none) shared(is,ie,js,je,ps,delp,agrav) @@ -499,7 +495,12 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, !-------------------------------------------------------- ! Perform large-time-step scalar transport using the accumulated CFL and ! mass fluxes - call timing_on('tracer_2d') + + if ( flagstruct%trdm2 > 1.e-4 ) then + call start_group_halo_update(i_pack(13), dp1, domain) + endif + + call timing_on('tracer_2d') !!! CLEANUP: merge these two calls? if (gridstruct%bounded_domain) then call tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, & @@ -517,11 +518,11 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, flagstruct%nord_tr, flagstruct%trdm2, flagstruct%lim_fac) endif endif - call timing_off('tracer_2d') + call timing_off('tracer_2d') #ifdef FILL2D if ( flagstruct%hord_tr<8 .and. flagstruct%moist_phys ) then - call timing_on('Fill2D') + call timing_on('Fill2D') if ( liq_wat > 0 ) & call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,liq_wat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( rainwat > 0 ) & @@ -532,7 +533,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,snowwat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( graupel > 0 ) & call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,graupel), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) - call timing_off('Fill2D') + call timing_off('Fill2D') endif #endif @@ -555,7 +556,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if ( iq==cld_amt ) kord_tracer(iq) = 9 ! monotonic enddo - call timing_on('Remapping') + call timing_on('Remapping') + if ( flagstruct%fv_debug ) then if (is_master()) write(*,'(A, I3, A1, I3)') 'before remap k_split ', n_map, '/', k_split call prt_mxm('T_ldyn', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) @@ -598,8 +600,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, hybrid_z, & flagstruct%adiabatic, do_adiabatic_init, flagstruct%do_inline_mp, & inline_mp, flagstruct%c2l_ord, bd, flagstruct%fv_debug, & - flagstruct%w_limiter, flagstruct%do_am4_remap, & - flagstruct%do_fast_phys, flagstruct%consv_checker, flagstruct%adj_mass_vmr) + flagstruct%w_limiter, flagstruct%do_fast_phys, flagstruct%do_intermediate_phys, & + flagstruct%consv_checker, flagstruct%adj_mass_vmr) if ( flagstruct%fv_debug ) then if (is_master()) write(*,'(A, I3, A1, I3)') 'finished k_split ', n_map, '/', k_split @@ -617,7 +619,9 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if ( graupel > 0 ) & call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) endif - call timing_off('Remapping') + + call timing_off('Remapping') + #ifdef MOIST_CAPPA if ( neststruct%nested .and. .not. last_step) then call nested_grid_BC_apply_intT(cappa, & @@ -645,6 +649,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #endif !endif SW_DYNAMICS enddo ! n_map loop + call timing_off('FV_DYN_LOOP') + ! Initialize rain, ice, snow and graupel precipitaiton if (flagstruct%do_inline_mp) then inline_mp%prew = inline_mp%prew / k_split @@ -657,10 +663,6 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, inline_mp%prefluxi = inline_mp%prefluxi / k_split inline_mp%prefluxs = inline_mp%prefluxs / k_split inline_mp%prefluxg = inline_mp%prefluxg / k_split - inline_mp%cond = inline_mp%cond / k_split - inline_mp%dep = inline_mp%dep / k_split - inline_mp%reevap = inline_mp%reevap / k_split - inline_mp%sub = inline_mp%sub / k_split if (allocated(inline_mp%qv_dt)) inline_mp%qv_dt = inline_mp%qv_dt / bdt if (allocated(inline_mp%ql_dt)) inline_mp%ql_dt = inline_mp%ql_dt / bdt if (allocated(inline_mp%qi_dt)) inline_mp%qi_dt = inline_mp%qi_dt / bdt @@ -674,8 +676,6 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if (allocated(inline_mp%v_dt)) inline_mp%v_dt = inline_mp%v_dt / bdt endif - call timing_off('FV_DYN_LOOP') - if( nwat==6 ) then if (cld_amt > 0) then call neg_adj3(is, ie, js, je, ng, npz, & @@ -728,29 +728,29 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if ( flagstruct%consv_am .or. prt_minmax ) then amdt = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.) - u0 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0,reproduce=.true.) + u00 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0,reproduce=.true.) if(is_master() .and. prt_minmax) & - write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u0*86400./bdt + write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u00*86400./bdt endif if( flagstruct%consv_am ) then -!$OMP parallel do default(none) shared(is,ie,js,je,m_fac,u0,gridstruct) +!$OMP parallel do default(none) shared(is,ie,js,je,m_fac,u00,gridstruct) do j=js,je do i=is,ie - m_fac(i,j) = u0*cos(gridstruct%agrid(i,j,2)) + m_fac(i,j) = u00*cos(gridstruct%agrid(i,j,2)) enddo enddo !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pt,m_fac,ua,cp_air, & -!$OMP u,u0,gridstruct,v ) +!$OMP u,u00,gridstruct,v ) do k=1,npz do j=js,je+1 do i=is,ie - u(i,j,k) = u(i,j,k) + u0*gridstruct%l2c_u(i,j) + u(i,j,k) = u(i,j,k) + u00*gridstruct%l2c_u(i,j) enddo enddo do j=js,je do i=is,ie+1 - v(i,j,k) = v(i,j,k) + u0*gridstruct%l2c_v(i,j) + v(i,j,k) = v(i,j,k) + u00*gridstruct%l2c_v(i,j) enddo enddo enddo @@ -906,9 +906,9 @@ end subroutine Rayleigh_fast - subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & - ua, va, delz, agrid, cp, rg, ptop, hydrostatic, & - conserve, rf_cutoff, gridstruct, domain, bd) + subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u0, v0, u, v, & + w, pt, ua, va, delz, agrid, cp, rg, ptop, hydrostatic, & + conserve, rf_cutoff, gridstruct, domain, bd, is_ideal_case) real, intent(in):: dt real, intent(in):: tau ! time scale (days) real, intent(in):: cp, rg, ptop, rf_cutoff @@ -916,7 +916,10 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & integer, intent(in):: npx, npy, npz, ks logical, intent(in):: hydrostatic logical, intent(in):: conserve + logical, intent(in):: is_ideal_case type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(inout):: u0(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! initial (t=0) D grid zonal wind (m/s) + real, intent(inout):: v0(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) ! initial (t=0) D grid meridional wind (m/s) real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! D grid zonal wind (m/s) real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) ! D grid meridional wind (m/s) real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) ! cell center vertical wind (m/s) @@ -930,7 +933,6 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & type(domain2d), intent(INOUT) :: domain ! real, allocatable :: u2f(:,:,:) - real, parameter:: u0 = 60. ! scaling velocity real, parameter:: sday = 86400. real rcv, tau0 integer i, j, k @@ -953,16 +955,16 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & if ( is_ideal_case )then allocate ( u00(is:ie, js:je+1,npz) ) allocate ( v00(is:ie+1,js:je ,npz) ) -!$OMP parallel do default(none) shared(is,ie,js,je,npz,u00,u,v00,v) +!$OMP parallel do default(none) shared(is,ie,js,je,npz,u00,u0,v00,v0) do k=1,npz do j=js,je+1 do i=is,ie - u00(i,j,k) = u(i,j,k) + u00(i,j,k) = u0(i,j,k) enddo enddo do j=js,je do i=is,ie+1 - v00(i,j,k) = v(i,j,k) + v00(i,j,k) = v0(i,j,k) enddo enddo enddo @@ -1004,9 +1006,9 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & u2f(:,:,k) = 1. endif enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(u2f, domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pm,rf_cutoff,w,rf,u,v, & !$OMP u00,v00,is_ideal_case, & @@ -1154,9 +1156,9 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & endif enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(u2f, domain) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(is,ie,js,je,kmax,conserve,hydrostatic,pt,u2f,cp,rg, & !$OMP ptop,pm,rf,delz,rcv,u,v,w) @@ -1259,4 +1261,3 @@ subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, end subroutine compute_aam end module fv_dynamics_mod - diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index 18b19c901..99f8269e8 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -194,7 +194,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) if (.not. Atm%flagstruct%external_eta) then call set_eta(npz, Atm%ks, Atm%ptop, Atm%ak, Atm%bk, Atm%flagstruct%npz_type, Atm%flagstruct%fv_eta_file) if ( is_master() ) then - write(*,*) 'Grid_init', npz, Atm%ks, Atm%ptop + !write(*,*) 'Grid_init', npz, Atm%ks, Atm%ptop tmp1 = Atm%ak(Atm%ks+1) do k=Atm%ks+1,npz tmp1 = max(tmp1, (Atm%ak(k)-Atm%ak(k+1))/max(1.E-9, (Atm%bk(k+1)-Atm%bk(k))) ) @@ -230,7 +230,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) if ( sw_corner ) then tmp1 = great_circle_dist(grid(1,1,1:2), agrid(1,1,1:2)) tmp2 = great_circle_dist(grid(1,1,1:2), agrid(2,2,1:2)) - write(*,*) 'Corner interpolation coefficient=', tmp2/(tmp2-tmp1) + if (Atm%flagstruct%fv_debug) write(*,*) 'Corner interpolation coefficient=', tmp2/(tmp2-tmp1) endif if (grid_type < 3) then @@ -674,12 +674,15 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) call init_cubed_to_latlon( Atm%gridstruct, Atm%flagstruct%hydrostatic, agrid, grid_type, c2l_order, Atm%bd ) call global_mx(area, Atm%ng, Atm%gridstruct%da_min, Atm%gridstruct%da_max, Atm%bd) - if( is_master() ) write(*,*) 'da_max/da_min=', Atm%gridstruct%da_max/Atm%gridstruct%da_min + if( is_master() ) write(*,'(A, G20.8)') 'da_max/da_min=', Atm%gridstruct%da_max/Atm%gridstruct%da_min call global_mx_c(area_c(is:ie,js:je), is, ie, js, je, Atm%gridstruct%da_min_c, Atm%gridstruct%da_max_c) - if( is_master() ) write(*,*) 'da_max_c, da_min_c, da_max_c/da_min_c=', Atm%gridstruct%da_max_c, Atm%gridstruct%da_min_c, Atm%gridstruct%da_max_c/Atm%gridstruct%da_min_c - + if( is_master() ) then + write(*,'(A, G20.8)') 'da_max_c = ', Atm%gridstruct%da_max_c + write(*,'(A, G20.8)') 'da_min_c = ', Atm%gridstruct%da_min_c + write(*,'(A, G20.8)') 'da_max_c/da_min_c=', Atm%gridstruct%da_max_c/Atm%gridstruct%da_min_c + endif !------------------------------------------------ ! Initialization for interpolation at face edges !------------------------------------------------ @@ -2364,9 +2367,9 @@ subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, b je = bd%je if ( mode > 0 ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(u, v, domain, gridtype=DGRID_NE) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') endif !$OMP parallel do default(none) shared(is,ie,js,je,km,npx,npy,grid_type,bounded_domain,c2,c1, & @@ -3525,9 +3528,9 @@ subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_ enddo enddo ! (u_dt,v_dt) are now on local coordinate system - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(u_dt, v_dt, domain, gridtype=AGRID_PARAM) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') dt5 = 0.5 * dt diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 9a707992b..bdaac733c 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -28,7 +28,7 @@ module fv_mapz_mod use constants_mod, only: pi=>pi_8, rvgas, rdgas, grav, hlv, hlf, cp_air, cp_vapor use fv_arrays_mod, only: radius ! scaled for small earth - use tracer_manager_mod,only: get_tracer_index + use tracer_manager_mod,only: get_tracer_index, adjust_mass use field_manager_mod, only: MODEL_ATMOS use fv_grid_utils_mod, only: g_sum, ptop_min, cubed_to_latlon use fv_fill_mod, only: fillz @@ -68,12 +68,13 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & ptop, ak, bk, pfull, gridstruct, domain, do_sat_adj, & hydrostatic, hybrid_z, adiabatic, do_adiabatic_init, & do_inline_mp, inline_mp, c2l_ord, bd, fv_debug, & - w_limiter, do_am4_remap, do_fast_phys, consv_checker, adj_mass_vmr) + w_limiter, do_fast_phys, do_intermediate_phys, consv_checker, adj_mass_vmr) + logical, intent(in):: last_step logical, intent(in):: fv_debug logical, intent(in):: w_limiter - logical, intent(in):: do_am4_remap logical, intent(in):: do_fast_phys + logical, intent(in):: do_intermediate_phys logical, intent(in):: consv_checker integer, intent(in):: adj_mass_vmr real, intent(in):: mdt ! remap time step @@ -159,7 +160,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real rcp, rg, rrg, bkh, dtmp, k1k, dlnp, tpe integer:: i,j,k - integer:: nt, liq_wat, ice_wat, rainwat, snowwat, cld_amt, graupel, iq, n, kmp, kp, k_next + integer:: nt, liq_wat, ice_wat, rainwat, snowwat, cld_amt, graupel, w_diff, iq, n, kmp, kp, k_next integer:: ccn_cm3, cin_cm3, aerosol k1k = rdgas/cv_air ! akap / (1.-akap) = rg/Cv=0.4 @@ -173,6 +174,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') graupel = get_tracer_index (MODEL_ATMOS, 'graupel') cld_amt = get_tracer_index (MODEL_ATMOS, 'cld_amt') + w_diff = get_tracer_index (MODEL_ATMOS, 'w_diff') ccn_cm3 = get_tracer_index (MODEL_ATMOS, 'ccn_cm3') cin_cm3 = get_tracer_index (MODEL_ATMOS, 'cin_cm3') aerosol = get_tracer_index (MODEL_ATMOS, 'aerosol') @@ -182,7 +184,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & !$OMP graupel,q_con,sphum,cappa,r_vir,k1k,delp, & !$OMP delz,akap,pkz,te,u,v,ps, gridstruct, last_step, & !$OMP ak,bk,nq,isd,ied,jsd,jed,kord_tr,fill, adiabatic, & -!$OMP hs,w,ws,kord_wz,omga,rrg,kord_mt,pe4,w_limiter,cp,remap_te,do_am4_remap) & +!$OMP hs,w,ws,kord_wz,omga,rrg,kord_mt,pe4,w_limiter,cp,remap_te) & !$OMP private(gz,cvm,kp,k_next,bkh,dp2,dlnp,tpe, & !$OMP pe0,pe1,pe2,pe3,pk1,pk2,pn2,phis,q2,w2) @@ -366,7 +368,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & else call map_scalar(km, peln(is,1,j), te, gz, & km, pn2, te, & - is, ie, j, isd, ied, jsd, jed, 1, abs(kord_tm), cp_air*t_min, do_am4_remap) + is, ie, j, isd, ied, jsd, jed, 1, abs(kord_tm), cp_air*t_min) endif else @@ -375,13 +377,13 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & call map_scalar(km, peln(is,1,j), pt, gz, & km, pn2, pt, & is, ie, j, isd, ied, jsd, jed, & - 1, abs(kord_tm), t_min, do_am4_remap) + 1, abs(kord_tm), t_min) else ! Map pt using pe call map1_ppm (km, pe1, pt, gz, & km, pe2, pt, & is, ie, j, isd, ied, jsd, jed, & - 1, abs(kord_tm), do_am4_remap) + 1, abs(kord_tm)) endif endif @@ -389,14 +391,14 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & if( nq > 5 ) then call mapn_tracer(nq, km, pe1, pe2, q, dp2, kord_tr, j, & - is, ie, isd, ied, jsd, jed, 0., fill, do_am4_remap) + is, ie, isd, ied, jsd, jed, 0., fill) elseif ( nq > 0 ) then ! Remap one tracer at a time do iq=1,nq call map1_q2(km, pe1, q(isd,jsd,1,iq), & km, pe2, q2, dp2, & is, ie, 0, kord_tr(iq), j, & - isd, ied, jsd, jed, 0., do_am4_remap) + isd, ied, jsd, jed, 0.) if (fill) call fillz(ie-is+1, km, 1, q2, dp2) do k=1,km do i=is,ie @@ -413,18 +415,18 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & call map1_ppm (km, pe1, w, ws(is,j), & km, pe2, w, & is, ie, j, isd, ied, jsd, jed, & - -3, abs(kord_wz), do_am4_remap) + -3, abs(kord_wz)) else call map1_ppm (km, pe1, w, ws(is,j), & km, pe2, w, & is, ie, j, isd, ied, jsd, jed, & - -2, abs(kord_wz), do_am4_remap) + -2, abs(kord_wz)) endif ! Remap delz for hybrid sigma-p coordinate call map1_ppm (km, pe1, delz, gz, & ! works km, pe2, delz, & is, ie, j, is, ie, js, je, & - 1, abs(kord_tm), do_am4_remap) + 1, abs(kord_tm)) do k=1,km do i=is,ie delz(i,j,k) = -delz(i,j,k)*dp2(i,k) @@ -614,7 +616,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & call map1_ppm( km, pe0(is:ie,:), u, gz, & km, pe3(is:ie,:), u, & is, ie, j, isd, ied, jsd, jed+1, & - -1, kord_mt, do_am4_remap) + -1, kord_mt) if (j < je+1) then @@ -634,7 +636,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & call map1_ppm (km, pe0, v, gz, & km, pe3, v, is, ie+1, & - j, isd, ied+1, jsd, jed, -1, kord_mt, do_am4_remap) + j, isd, ied+1, jsd, jed, -1, kord_mt) ! 4a) update Tv and pkz from total energy (if remapping total energy) if ( remap_te ) then @@ -836,13 +838,17 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & !----------------------------------------------------------------------- ! Intermediate Physics >>> +! Note: if intemediate physics is disable, cloud fraction will be zero at the first time step. !----------------------------------------------------------------------- - - call intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, nq, nwat, & - c2l_ord, mdt, consv, akap, ptop, pfull, hs, te0_2d, u, & - v, w, pt, delp, delz, q_con, cappa, q, pkz, r_vir, te_err, tw_err, & - inline_mp, gridstruct, domain, bd, hydrostatic, do_adiabatic_init, & - do_inline_mp, do_sat_adj, last_step, do_fast_phys, consv_checker, adj_mass_vmr) + if (do_intermediate_phys) then + call timing_on('INTERMEDIATE_PHYS') + call intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, nq, nwat, & + c2l_ord, mdt, consv, akap, ptop, pfull, hs, te0_2d, u, & + v, w, pt, delp, delz, q_con, cappa, q, pkz, r_vir, te_err, tw_err, & + inline_mp, gridstruct, domain, bd, hydrostatic, do_adiabatic_init, & + do_inline_mp, do_sat_adj, last_step, do_fast_phys, consv_checker, adj_mass_vmr) + call timing_off('INTERMEDIATE_PHYS') + endif !----------------------------------------------------------------------- ! <<< Intermediate Physics @@ -1088,7 +1094,7 @@ end subroutine pkez subroutine map_scalar( km, pe1, q1, qs, & kn, pe2, q2, i1, i2, & j, ibeg, iend, jbeg, jend, & - iv, kord, q_min, do_am4_remap) + iv, kord, q_min) ! iv=1 integer, intent(in) :: i1 ! Starting longitude integer, intent(in) :: i2 ! Finishing longitude @@ -1108,7 +1114,6 @@ subroutine map_scalar( km, pe1, q1, qs, & ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input - logical, intent(in) :: do_am4_remap ! !INPUT/OUTPUT PARAMETERS: real, intent(inout):: q2(ibeg:iend,jbeg:jend,kn) ! Field output real, intent(in):: q_min @@ -1134,11 +1139,7 @@ subroutine map_scalar( km, pe1, q1, qs, & ! Compute vertical subgrid distribution if ( kord >7 ) then - if (do_am4_remap) then - call scalar_profile_am4( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) - else - call scalar_profile( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) - endif + call scalar_profile( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) else call ppm_profile( q4, dp1, km, i1, i2, iv, kord ) endif @@ -1190,7 +1191,7 @@ end subroutine map_scalar subroutine map1_ppm( km, pe1, q1, qs, & kn, pe2, q2, i1, i2, & j, ibeg, iend, jbeg, jend, & - iv, kord, do_am4_remap) + iv, kord) integer, intent(in) :: i1 ! Starting longitude integer, intent(in) :: i2 ! Finishing longitude integer, intent(in) :: iv ! Mode: 0 == constituents 1 == ??? @@ -1209,7 +1210,6 @@ subroutine map1_ppm( km, pe1, q1, qs, & ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input - logical, intent(in) :: do_am4_remap ! !INPUT/OUTPUT PARAMETERS: real, intent(inout):: q2(ibeg:iend,jbeg:jend,kn) ! Field output @@ -1234,11 +1234,7 @@ subroutine map1_ppm( km, pe1, q1, qs, & ! Compute vertical subgrid distribution if ( kord >7 ) then - if (do_am4_remap) then - call cs_profile_am4( qs, q4, dp1, km, i1, i2, iv, kord ) - else - call cs_profile( qs, q4, dp1, km, i1, i2, iv, kord ) - endif + call cs_profile( qs, q4, dp1, km, i1, i2, iv, kord ) else call ppm_profile( q4, dp1, km, i1, i2, iv, kord ) endif @@ -1291,7 +1287,7 @@ end subroutine map1_ppm !ONLY supports cubic-spline remapping subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & i1, i2, isd, ied, jsd, jed, & - q_min, fill, do_am4_remap) + q_min, fill) ! !INPUT PARAMETERS: integer, intent(in):: km ! vertical dimension integer, intent(in):: j, nq, i1, i2 @@ -1307,7 +1303,6 @@ subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & real, intent(in):: q_min logical, intent(in):: fill real, intent(inout):: q1(isd:ied,jsd:jed,km,nq) ! Field input - logical, intent(in) :: do_am4_remap ! !LOCAL VARIABLES: real:: q4(4,i1:i2,km,nq) real:: q2(i1:i2,km,nq) ! Field output @@ -1329,11 +1324,7 @@ subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & q4(1,i,k,iq) = q1(i,j,k,iq) enddo enddo - if (do_am4_remap) then - call scalar_profile_am4( qs, q4(1,i1,1,iq), dp1, km, i1, i2, 0, kord(iq), q_min ) - else - call scalar_profile( qs, q4(1,i1,1,iq), dp1, km, i1, i2, 0, kord(iq), q_min ) - endif + call scalar_profile( qs, q4(1,i1,1,iq), dp1, km, i1, i2, 0, kord(iq), q_min ) enddo ! Mapping @@ -1416,7 +1407,7 @@ subroutine map1_q2(km, pe1, q1, & kn, pe2, q2, dp2, & i1, i2, iv, kord, j, & ibeg, iend, jbeg, jend, & - q_min, do_am4_remap ) + q_min ) ! !INPUT PARAMETERS: @@ -1437,7 +1428,6 @@ subroutine map1_q2(km, pe1, q1, & real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input real, intent(in) :: dp2(i1:i2,kn) real, intent(in) :: q_min - logical, intent(in) :: do_am4_remap ! !INPUT/OUTPUT PARAMETERS: real, intent(inout):: q2(i1:i2,kn) ! Field output ! !LOCAL VARIABLES: @@ -1457,11 +1447,7 @@ subroutine map1_q2(km, pe1, q1, & ! Compute vertical subgrid distribution if ( kord >7 ) then - if (do_am4_remap) then - call scalar_profile_am4( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) - else - call scalar_profile( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) - endif + call scalar_profile( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) else call ppm_profile( q4, dp1, km, i1, i2, iv, kord ) endif @@ -1512,7 +1498,7 @@ end subroutine map1_q2 !Currently this routine is only called with kord = 4, - ! so do_am4_remap is unnecessary --- lmh 9 june 21 + ! --- lmh 9 june 21 subroutine remap_2d(km, pe1, q1, & kn, pe2, q2, & i1, i2, & @@ -1608,9 +1594,9 @@ subroutine remap_2d(km, pe1, q1, & end subroutine remap_2d - !Scalar profile and cs_profile differ ONLY in that scalar_profile + !scalar_profile and cs_profile differ ONLY in that scalar_profile ! accepts a qmin argument. (Unfortunately I was not able to make - ! qmin an optional argument in scalar_profile.) + ! qmin an optional argument in scalar_profile.) --- lmh summer 2020 subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) ! Optimized vertical profile reconstruction: ! Latest: Apr 2008 S.-J. Lin, NOAA/GFDL @@ -1692,7 +1678,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) endif !Perfectly linear scheme - if ( abs(kord) > 16 ) then + if ( abs(kord) == 17 ) then do k=1,km do i=i1,i2 a4(2,i,k) = q(i,k ) @@ -1722,7 +1708,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) ! Interior: do k=3,km-1 do i=i1,i2 - if ( gam(i,k-1)*gam(i,k+1)>0. ) then + if ( abs(kord) >= 14 .or. gam(i,k-1)*gam(i,k+1)>0. ) then ! Apply large-scale constraint to ALL fields if not local max/min ! first guess interface values cannot exceeed values ! of adjacent cells @@ -1783,21 +1769,23 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) ! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) ! Top 2 and bottom 2 layers always use monotonic mapping - if ( iv==0 ) then + select case (iv) + + case (0) do i=i1,i2 a4(2,i,1) = max(0., a4(2,i,1)) enddo - elseif ( iv==-1 ) then + case (-1) do i=i1,i2 if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. enddo - elseif ( iv==2 ) then + case (2) do i=i1,i2 a4(2,i,1) = a4(1,i,1) a4(3,i,1) = a4(1,i,1) a4(4,i,1) = 0. enddo - endif + end select !iv if ( iv/=2 ) then do i=i1,i2 @@ -1815,8 +1803,10 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) !------------------------------------- ! Huynh's 2nd constraint for interior: !------------------------------------- - do k=3,km-2 - if ( abs(kord)<9 ) then + do k=3,km-2 + select case (abs(kord)) + + case (0:8) do i=i1,i2 ! Left edges pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) @@ -1832,7 +1822,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) enddo - elseif ( abs(kord)==9 ) then + case (9) do i=i1,i2 if ( extm(i,k) .and. extm(i,k-1) ) then ! grid-scale 2-delta-z wave detected @@ -1865,44 +1855,18 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) endif endif enddo - elseif ( abs(kord)==10 ) then + case(10) !restored AM4 case 10 do i=i1,i2 - if( ext5(i,k) ) then - if( ext5(i,k-1) .or. ext5(i,k+1) ) then + if( extm(i,k) ) then + if( a4(1,i,k) ehance vertical mixing a4(2,i,k) = a4(1,i,k) a4(3,i,k) = a4(1,i,k) - elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - endif - elseif( ext6(i,k) ) then - if( ext5(i,k-1) .or. ext5(i,k+1) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 0. + else +! True local extremum + a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) endif - endif - enddo - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - elseif ( abs(kord)==12 ) then - do i=i1,i2 - if( extm(i,k) ) then - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. else ! not a local extremum a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) ! Check within the smooth region if subgrid profile is non-monotonic @@ -1919,103 +1883,81 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) endif endif enddo - elseif ( abs(kord)==13 ) then - do i=i1,i2 - if( ext6(i,k) ) then - if ( ext6(i,k-1) .and. ext6(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - endif - endif - enddo - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - elseif ( abs(kord)==14 ) then - + case(11) do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + if ( ext5(i,k) .and. (ext5(i,k-1).or.ext5(i,k+1).or.a4(1,i,k) 16 ) then + if ( abs(kord) == 17 ) then do k=1,km do i=i1,i2 a4(2,i,k) = q(i,k ) @@ -2160,8 +2102,9 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) ! Interior: do k=3,km-1 do i=i1,i2 - if ( gam(i,k-1)*gam(i,k+1)>0. ) then + if ( abs(kord) >= 14 .or. gam(i,k-1)*gam(i,k+1)>0. ) then ! Apply large-scale constraint to ALL fields if not local max/min +! OR for the strictly monotone schemes q(i,k) = min( q(i,k), max(a4(1,i,k-1),a4(1,i,k)) ) q(i,k) = max( q(i,k), min(a4(1,i,k-1),a4(1,i,k)) ) else @@ -2217,21 +2160,22 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) ! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) ! Top 2 and bottom 2 layers always use monotonic mapping - if ( iv==0 ) then + select case (iv) + case (0) do i=i1,i2 a4(2,i,1) = max(0., a4(2,i,1)) enddo - elseif ( iv==-1 ) then + case(-1) do i=i1,i2 if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. enddo - elseif ( iv==2 ) then + case(2) do i=i1,i2 a4(2,i,1) = a4(1,i,1) a4(3,i,1) = a4(1,i,1) a4(4,i,1) = 0. enddo - endif + end select !iv if ( iv/=2 ) then do i=i1,i2 @@ -2250,7 +2194,8 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) ! Huynh's 2nd constraint for interior: !------------------------------------- do k=3,km-2 - if ( abs(kord)<9 ) then + select case (abs(kord)) + case (0:8) do i=i1,i2 ! Left edges pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) @@ -2266,7 +2211,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) enddo - elseif ( abs(kord)==9 ) then + case (9) do i=i1,i2 if ( extm(i,k) .and. extm(i,k-1) ) then ! c90_mp122 ! grid-scale 2-delta-z wave detected @@ -2294,45 +2239,18 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) endif endif enddo - elseif ( abs(kord)==10 ) then + case(10) !restored AM4 case 10 do i=i1,i2 - if( ext5(i,k) ) then - if( ext5(i,k-1) .or. ext5(i,k+1) ) then + if( extm(i,k) ) then + if( extm(i,k-1) .or. extm(i,k+1) ) then +! grid-scale 2-delta-z wave detected a4(2,i,k) = a4(1,i,k) a4(3,i,k) = a4(1,i,k) - elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - endif - elseif( ext6(i,k) ) then - if( ext5(i,k-1) .or. ext5(i,k+1) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 0. + else +! True local extremum + a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) endif - endif - enddo - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - elseif ( abs(kord)==12 ) then - do i=i1,i2 - if( extm(i,k) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. else ! not a local extremum a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) ! Check within the smooth region if subgrid profile is non-monotonic @@ -2349,101 +2267,81 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) endif endif enddo - elseif ( abs(kord)==13 ) then - do i=i1,i2 - if( ext6(i,k) ) then - if ( ext6(i,k-1) .and. ext6(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - endif - endif - enddo + case (11) do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - elseif ( abs(kord)==14 ) then - - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + if ( ext5(i,k) .and. (ext5(i,k-1) .or. ext5(i,k+1)) ) then +! Noisy region: + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0. + else + a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + endif enddo - - elseif ( abs(kord)==15 ) then ! revised kord=9 scehem + case (12) !post-AM4 case 10 do i=i1,i2 - if ( ext5(i,k) ) then ! c90_mp122 - if ( ext5(i,k-1) .or. ext5(i,k+1) ) then ! c90_mp122 -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - endif + if( ext5(i,k) ) then + if( ext5(i,k-1) .or. ext5(i,k+1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then + pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) + lac_1 = pmp_1 + 1.5*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.*gam(i,k) + lac_2 = pmp_2 - 1.5*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif elseif( ext6(i,k) ) then -! Check within the smooth region if subgrid profile is non-monotonic + if( ext5(i,k-1) .or. ext5(i,k+1) ) then pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) pmp_2 = a4(1,i,k) + 2.*gam(i,k) lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif endif enddo do i=i1,i2 a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) enddo - elseif ( abs(kord)==16 ) then - do i=i1,i2 - if( ext5(i,k) ) then - if ( ext5(i,k-1) .or. ext5(i,k+1) ) then - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then - ! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - ! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - endif - endif - enddo + case (13) !former 14: no subgrid limiter + do i=i1,i2 a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) enddo - else ! kord = 11 - do i=i1,i2 - if ( ext5(i,k) .and. (ext5(i,k-1) .or. ext5(i,k+1)) ) then -! Noisy region: - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - enddo - endif + + case (14) !strict monotonicity constraint + call cs_limiters(im, extm(i1,k), a4(1,i1,k), 2) + case (15) + call cs_limiters(im, extm(i1,k), a4(1,i1,k), 1) + case default + call mpp_error(FATAL, 'kord not implemented') + end select ! Additional constraint to ensure positivity - if ( iv==0 ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 0) + if ( iv==0 .and. abs(kord) <= 13 ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 0) enddo ! k-loop !---------------------------------- ! Bottom layer subgrid constraints: !---------------------------------- - if ( iv==0 ) then + select case (iv) + case (0) do i=i1,i2 a4(3,i,km) = max(0., a4(3,i,km)) enddo - elseif ( iv .eq. -1 ) then + case (-1) do i=i1,i2 if ( a4(3,i,km)*a4(1,i,km) <= 0. ) a4(3,i,km) = 0. enddo - endif + end select do k=km-1,km do i=i1,i2 @@ -2534,748 +2432,10 @@ subroutine cs_limiters(im, extm, a4, iv) end subroutine cs_limiters - subroutine scalar_profile_am4(qs, a4, delp, km, i1, i2, iv, kord, qmin) -! Optimized vertical profile reconstruction: -! Latest: Apr 2008 S.-J. Lin, NOAA/GFDL - integer, intent(in):: i1, i2 - integer, intent(in):: km ! vertical dimension - integer, intent(in):: iv ! iv =-1: winds - ! iv = 0: positive definite scalars - ! iv = 1: others - integer, intent(in):: kord - real, intent(in) :: qs(i1:i2) - real, intent(in) :: delp(i1:i2,km) ! layer pressure thickness - real, intent(inout):: a4(4,i1:i2,km) ! Interpolated values - real, intent(in):: qmin -!----------------------------------------------------------------------- - logical, dimension(i1:i2,km):: extm, ext6 - real gam(i1:i2,km) - real q(i1:i2,km+1) - real d4(i1:i2) - real bet, a_bot, grat - real pmp_1, lac_1, pmp_2, lac_2 - integer i, k, im - - if ( iv .eq. -2 ) then - do i=i1,i2 - gam(i,2) = 0.5 - q(i,1) = 1.5*a4(1,i,1) - enddo - do k=2,km-1 - do i=i1, i2 - grat = delp(i,k-1) / delp(i,k) - bet = 2. + grat + grat - gam(i,k) - q(i,k) = (3.*(a4(1,i,k-1)+a4(1,i,k)) - q(i,k-1))/bet - gam(i,k+1) = grat / bet - enddo - enddo - do i=i1,i2 - grat = delp(i,km-1) / delp(i,km) - q(i,km) = (3.*(a4(1,i,km-1)+a4(1,i,km)) - grat*qs(i) - q(i,km-1)) / & - (2. + grat + grat - gam(i,km)) - q(i,km+1) = qs(i) - enddo - do k=km-1,1,-1 - do i=i1,i2 - q(i,k) = q(i,k) - gam(i,k+1)*q(i,k+1) - enddo - enddo - else - do i=i1,i2 - grat = delp(i,2) / delp(i,1) ! grid ratio - bet = grat*(grat+0.5) - q(i,1) = ( (grat+grat)*(grat+1.)*a4(1,i,1) + a4(1,i,2) ) / bet - gam(i,1) = ( 1. + grat*(grat+1.5) ) / bet - enddo - - do k=2,km - do i=i1,i2 - d4(i) = delp(i,k-1) / delp(i,k) - bet = 2. + d4(i) + d4(i) - gam(i,k-1) - q(i,k) = ( 3.*(a4(1,i,k-1)+d4(i)*a4(1,i,k)) - q(i,k-1) )/bet - gam(i,k) = d4(i) / bet - enddo - enddo - - do i=i1,i2 - a_bot = 1. + d4(i)*(d4(i)+1.5) - q(i,km+1) = (2.*d4(i)*(d4(i)+1.)*a4(1,i,km)+a4(1,i,km-1)-a_bot*q(i,km)) & - / ( d4(i)*(d4(i)+0.5) - a_bot*gam(i,km) ) - enddo - - do k=km,1,-1 - do i=i1,i2 - q(i,k) = q(i,k) - gam(i,k)*q(i,k+1) - enddo - enddo - endif - -!----- Perfectly linear scheme -------------------------------- - if ( abs(kord) > 16 ) then - do k=1,km - do i=i1,i2 - a4(2,i,k) = q(i,k ) - a4(3,i,k) = q(i,k+1) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - enddo - return - endif -!----- Perfectly linear scheme -------------------------------- -!------------------ -! Apply constraints -!------------------ - im = i2 - i1 + 1 - -! Apply *large-scale* constraints - do i=i1,i2 - q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) - q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) - enddo - - do k=2,km - do i=i1,i2 - gam(i,k) = a4(1,i,k) - a4(1,i,k-1) - enddo - enddo - -! Interior: - do k=3,km-1 - do i=i1,i2 - if ( gam(i,k-1)*gam(i,k+1)>0. ) then -! Apply large-scale constraint to ALL fields if not local max/min - q(i,k) = min( q(i,k), max(a4(1,i,k-1),a4(1,i,k)) ) - q(i,k) = max( q(i,k), min(a4(1,i,k-1),a4(1,i,k)) ) - else - if ( gam(i,k-1) > 0. ) then -! There exists a local max - q(i,k) = max(q(i,k), min(a4(1,i,k-1),a4(1,i,k))) - else -! There exists a local min - q(i,k) = min(q(i,k), max(a4(1,i,k-1),a4(1,i,k))) - if ( iv==0 ) q(i,k) = max(0., q(i,k)) - endif - endif - enddo - enddo - -! Bottom: - do i=i1,i2 - q(i,km) = min( q(i,km), max(a4(1,i,km-1), a4(1,i,km)) ) - q(i,km) = max( q(i,km), min(a4(1,i,km-1), a4(1,i,km)) ) - enddo - - do k=1,km - do i=i1,i2 - a4(2,i,k) = q(i,k ) - a4(3,i,k) = q(i,k+1) - enddo - enddo - - do k=1,km - if ( k==1 .or. k==km ) then - do i=i1,i2 - extm(i,k) = (a4(2,i,k)-a4(1,i,k)) * (a4(3,i,k)-a4(1,i,k)) > 0. - enddo - else - do i=i1,i2 - extm(i,k) = gam(i,k)*gam(i,k+1) < 0. - enddo - endif - if ( abs(kord)==16 ) then - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - ext6(i,k) = abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) - enddo - endif - enddo - -!--------------------------- -! Apply subgrid constraints: -!--------------------------- -! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) -! Top 2 and bottom 2 layers always use monotonic mapping - - if ( iv==0 ) then - do i=i1,i2 - a4(2,i,1) = max(0., a4(2,i,1)) - enddo - elseif ( iv==-1 ) then - do i=i1,i2 - if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. - enddo - elseif ( iv==2 ) then - do i=i1,i2 - a4(2,i,1) = a4(1,i,1) - a4(3,i,1) = a4(1,i,1) - a4(4,i,1) = 0. - enddo - endif - - if ( iv/=2 ) then - do i=i1,i2 - a4(4,i,1) = 3.*(2.*a4(1,i,1) - (a4(2,i,1)+a4(3,i,1))) - enddo - call cs_limiters(im, extm(i1,1), a4(1,i1,1), 1) - endif - -! k=2 - do i=i1,i2 - a4(4,i,2) = 3.*(2.*a4(1,i,2) - (a4(2,i,2)+a4(3,i,2))) - enddo - call cs_limiters(im, extm(i1,2), a4(1,i1,2), 2) - -!------------------------------------- -! Huynh's 2nd constraint for interior: -!------------------------------------- - do k=3,km-2 - if ( abs(kord)<9 ) then - do i=i1,i2 -! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) -! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - - elseif ( abs(kord)==9 ) then - do i=i1,i2 - if ( extm(i,k) .and. extm(i,k-1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else if ( extm(i,k) .and. extm(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else if ( extm(i,k) .and. a4(1,i,k) abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - endif - enddo - elseif ( abs(kord)==10 ) then - do i=i1,i2 - if( extm(i,k) ) then - if( a4(1,i,k) ehance vertical mixing - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else -! True local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - else ! not a local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) -! Check within the smooth region if subgrid profile is non-monotonic - if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - endif - enddo - elseif ( abs(kord)==12 ) then - do i=i1,i2 - if( extm(i,k) ) then - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else ! not a local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) -! Check within the smooth region if subgrid profile is non-monotonic - if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - endif - enddo - elseif ( abs(kord)==13 ) then - do i=i1,i2 - if( extm(i,k) ) then - if ( extm(i,k-1) .and. extm(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else - ! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - ! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - else - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - enddo - elseif ( abs(kord)==14 ) then - - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - elseif ( abs(kord)==16 ) then - do i=i1,i2 - if( ext6(i,k) ) then - if ( extm(i,k-1) .or. extm(i,k+1) ) then - ! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - ! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - endif - enddo - else ! kord = 11, 13 - do i=i1,i2 - if ( extm(i,k) .and. (extm(i,k-1).or.extm(i,k+1).or.a4(1,i,k) 16 ) then - do k=1,km - do i=i1,i2 - a4(2,i,k) = q(i,k ) - a4(3,i,k) = q(i,k+1) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - enddo - return - endif -!----- Perfectly linear scheme -------------------------------- - -!------------------ -! Apply constraints -!------------------ - im = i2 - i1 + 1 - -! Apply *large-scale* constraints - do i=i1,i2 - q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) - q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) - enddo - - do k=2,km - do i=i1,i2 - gam(i,k) = a4(1,i,k) - a4(1,i,k-1) - enddo - enddo - -! Interior: - do k=3,km-1 - do i=i1,i2 - if ( gam(i,k-1)*gam(i,k+1)>0. ) then -! Apply large-scale constraint to ALL fields if not local max/min - q(i,k) = min( q(i,k), max(a4(1,i,k-1),a4(1,i,k)) ) - q(i,k) = max( q(i,k), min(a4(1,i,k-1),a4(1,i,k)) ) - else - if ( gam(i,k-1) > 0. ) then -! There exists a local max - q(i,k) = max(q(i,k), min(a4(1,i,k-1),a4(1,i,k))) - else -! There exists a local min - q(i,k) = min(q(i,k), max(a4(1,i,k-1),a4(1,i,k))) - if ( iv==0 ) q(i,k) = max(0., q(i,k)) - endif - endif - enddo - enddo - -! Bottom: - do i=i1,i2 - q(i,km) = min( q(i,km), max(a4(1,i,km-1), a4(1,i,km)) ) - q(i,km) = max( q(i,km), min(a4(1,i,km-1), a4(1,i,km)) ) - enddo - - do k=1,km - do i=i1,i2 - a4(2,i,k) = q(i,k ) - a4(3,i,k) = q(i,k+1) - enddo - enddo - - do k=1,km - if ( k==1 .or. k==km ) then - do i=i1,i2 - extm(i,k) = (a4(2,i,k)-a4(1,i,k)) * (a4(3,i,k)-a4(1,i,k)) > 0. - enddo - else - do i=i1,i2 - extm(i,k) = gam(i,k)*gam(i,k+1) < 0. - enddo - endif - enddo - -!--------------------------- -! Apply subgrid constraints: -!--------------------------- -! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) -! Top 2 and bottom 2 layers always use monotonic mapping - - if ( iv==0 ) then - do i=i1,i2 - a4(2,i,1) = max(0., a4(2,i,1)) - enddo - elseif ( iv==-1 ) then - do i=i1,i2 - if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. - enddo - elseif ( iv==2 ) then - do i=i1,i2 - a4(2,i,1) = a4(1,i,1) - a4(3,i,1) = a4(1,i,1) - a4(4,i,1) = 0. - enddo - endif - - if ( iv/=2 ) then - do i=i1,i2 - a4(4,i,1) = 3.*(2.*a4(1,i,1) - (a4(2,i,1)+a4(3,i,1))) - enddo - call cs_limiters(im, extm(i1,1), a4(1,i1,1), 1) - endif - -! k=2 - do i=i1,i2 - a4(4,i,2) = 3.*(2.*a4(1,i,2) - (a4(2,i,2)+a4(3,i,2))) - enddo - call cs_limiters(im, extm(i1,2), a4(1,i1,2), 2) - -!------------------------------------- -! Huynh's 2nd constraint for interior: -!------------------------------------- - do k=3,km-2 - if ( abs(kord)<9 ) then - do i=i1,i2 -! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) -! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - - elseif ( abs(kord)==9 ) then - do i=i1,i2 - if ( extm(i,k) .and. extm(i,k-1) ) then ! c90_mp122 -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else if ( extm(i,k) .and. extm(i,k+1) ) then ! c90_mp122 -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) -! Check within the smooth region if subgrid profile is non-monotonic - if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - endif - enddo - elseif ( abs(kord)==10 ) then - do i=i1,i2 - if( extm(i,k) ) then - if( extm(i,k-1) .or. extm(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else -! True local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - else ! not a local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) -! Check within the smooth region if subgrid profile is non-monotonic - if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - endif - enddo - elseif ( abs(kord)==12 ) then - do i=i1,i2 - if( extm(i,k) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else ! not a local extremum - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) -! Check within the smooth region if subgrid profile is non-monotonic - if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 6.*a4(1,i,k) - 3.*(a4(2,i,k)+a4(3,i,k)) - endif - endif - enddo - elseif ( abs(kord)==13 ) then - do i=i1,i2 - if( extm(i,k) ) then - if ( extm(i,k-1) .and. extm(i,k+1) ) then -! grid-scale 2-delta-z wave detected - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else - ! Left edges - pmp_1 = a4(1,i,k) - 2.*gam(i,k+1) - lac_1 = pmp_1 + 1.5*gam(i,k+2) - a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & - max(a4(1,i,k), pmp_1, lac_1) ) - ! Right edges - pmp_2 = a4(1,i,k) + 2.*gam(i,k) - lac_2 = pmp_2 - 1.5*gam(i,k-1) - a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & - max(a4(1,i,k), pmp_2, lac_2) ) - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - else - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - enddo - elseif ( abs(kord)==14 ) then - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - else ! kord = 11 - do i=i1,i2 - if ( extm(i,k) .and. (extm(i,k-1) .or. extm(i,k+1)) ) then -! Noisy region: - a4(2,i,k) = a4(1,i,k) - a4(3,i,k) = a4(1,i,k) - a4(4,i,k) = 0. - else - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - endif - enddo - endif - -! Additional constraint to ensure positivity - if ( iv==0 ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 0) - - enddo ! k-loop - -!---------------------------------- -! Bottom layer subgrid constraints: -!---------------------------------- - if ( iv==0 ) then - do i=i1,i2 - a4(3,i,km) = max(0., a4(3,i,km)) - enddo - elseif ( iv .eq. -1 ) then - do i=i1,i2 - if ( a4(3,i,km)*a4(1,i,km) <= 0. ) a4(3,i,km) = 0. - enddo - endif - - do k=km-1,km - do i=i1,i2 - a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) - enddo - if(k==(km-1)) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 2) - if(k== km ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 1) - enddo - - end subroutine cs_profile_am4 - - - - subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) - -! !INPUT PARAMETERS: + + subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) + +! !INPUT PARAMETERS: integer, intent(in):: iv ! iv =-1: winds ! iv = 0: positive definite scalars ! iv = 1: others @@ -3674,10 +2834,10 @@ end subroutine steepz !This routine should be moved to fv_io.F90. subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & - delp_r, u_r, v_r, w_r, delz_r, pt_r, q_r, qdiag_r, & - delp, u, v, w, delz, pt, q, qdiag, & + delp_r, u0_r, v0_r, u_r, v_r, w_r, delz_r, pt_r, q_r, qdiag_r, & + delp, u0, v0, u, v, w, delz, pt, q, qdiag, & ak_r, bk_r, ptop, ak, bk, hydrostatic, make_nh, & - domain, square_domain) + domain, square_domain, is_ideal_case) !------------------------------------ ! Assuming hybrid sigma-P coordinate: !------------------------------------ @@ -3687,13 +2847,15 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & integer, intent(in):: nq, ntp ! number of tracers (including h2o) integer, intent(in):: is,ie,isd,ied ! starting & ending X-Dir index integer, intent(in):: js,je,jsd,jed ! starting & ending Y-Dir index - logical, intent(in):: hydrostatic, make_nh, square_domain + logical, intent(in):: hydrostatic, make_nh, square_domain, is_ideal_case real, intent(IN) :: ptop real, intent(in) :: ak_r(km+1) real, intent(in) :: bk_r(km+1) real, intent(in) :: ak(kn+1) real, intent(in) :: bk(kn+1) real, intent(in):: delp_r(is:ie,js:je,km) ! pressure thickness + real, intent(in):: u0_r(is:ie, js:je+1,km) ! initial (t=0) u-wind (m/s) + real, intent(in):: v0_r(is:ie+1,js:je ,km) ! initial (t=0) v-wind (m/s) real, intent(in):: u_r(is:ie, js:je+1,km) ! u-wind (m/s) real, intent(in):: v_r(is:ie+1,js:je ,km) ! v-wind (m/s) real, intent(inout):: pt_r(is:ie,js:je,km) @@ -3704,6 +2866,8 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & type(domain2d), intent(INOUT) :: domain ! Output: real, intent(out):: delp(isd:ied,jsd:jed,kn) ! pressure thickness + real, intent(out):: u0(isd:,jsd:,1:) ! initial (t=0) u-wind (m/s) + real, intent(out):: v0(isd:,jsd:,1:) ! initial (t=0) v-wind (m/s) real, intent(out):: u(isd:ied ,jsd:jed+1,kn) ! u-wind (m/s) real, intent(out):: v(isd:ied+1,jsd:jed ,kn) ! v-wind (m/s) real, intent(out):: w(isd: ,jsd: ,1:) ! vertical velocity (m/s) @@ -3720,7 +2884,8 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & real pv2(is:ie+1,kn+1) integer i,j,k , iq - integer, parameter:: kord=4 + !CS operator replaces original mono PPM 4 --- lmh 19apr23 + integer, parameter:: kord=4 ! 13 #ifdef HYDRO_DELZ_REMAP if (is_master() .and. .not. hydrostatic) then @@ -3783,9 +2948,9 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & enddo enddo -!$OMP parallel do default(none) shared(is,ie,js,je,km,ak_r,bk_r,ps,kn,ak,bk,u_r,u,delp, & +!$OMP parallel do default(none) shared(is,ie,js,je,km,ak_r,bk_r,ps,kn,ak,bk,u0_r,u_r,u0,u,delp, & !$OMP ntp,nq,hydrostatic,make_nh,w_r,w,delz_r,delp_r,delz, & -!$OMP pt_r,pt,v_r,v,q,q_r,qdiag,qdiag_r) & +!$OMP pt_r,pt,v0_r,v_r,v0,v,q,q_r,qdiag,qdiag_r,is_ideal_case) & !$OMP private(pe1, pe2, pv1, pv2) do 1000 j=js,je+1 !------ @@ -3803,6 +2968,12 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & enddo enddo + if (is_ideal_case) then + call remap_2d(km, pe1, u0_r(is:ie,j:j,1:km), & + kn, pe2, u0(is:ie,j:j,1:kn), & + is, ie, -1, kord) + endif + call remap_2d(km, pe1, u_r(is:ie,j:j,1:km), & kn, pe2, u(is:ie,j:j,1:kn), & is, ie, -1, kord) @@ -3931,6 +3102,12 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & enddo enddo + if (is_ideal_case) then + call remap_2d(km, pv1, v0_r(is:ie+1,j:j,1:km), & + kn, pv2, v0(is:ie+1,j:j,1:kn), & + is, ie+1, -1, kord) + endif + call remap_2d(km, pv1, v_r(is:ie+1,j:j,1:km), & kn, pv2, v(is:ie+1,j:j,1:kn), & is, ie+1, -1, kord) diff --git a/model/fv_tracer2d.F90 b/model/fv_tracer2d.F90 index ea102da32..4dcaf80f8 100644 --- a/model/fv_tracer2d.F90 +++ b/model/fv_tracer2d.F90 @@ -149,13 +149,13 @@ subroutine tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, n enddo ! k-loop if (trdm>1.e-4) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call complete_group_halo_update(dp1_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') + endif - endif call mp_reduce_max(cmax,npz) !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx, & @@ -191,11 +191,11 @@ subroutine tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, n endif enddo - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call complete_group_halo_update(q_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') ! Begin k-independent tracer transport; can not be OpenMPed because the mpp_update call. do k=1,npz @@ -268,11 +268,11 @@ subroutine tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, n dp1(i,j,k) = dp2(i,j) enddo enddo - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call mpp_update_domains(qn2, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') endif enddo ! time-split loop enddo ! k-loop @@ -445,19 +445,19 @@ subroutine tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, endif if (trdm>1.e-4) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call complete_group_halo_update(dp1_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') + endif - endif do it=1,nsplt - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call complete_group_halo_update(q_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq,ksplt,& !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) & @@ -515,11 +515,11 @@ subroutine tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, enddo ! npz if ( it /= nsplt ) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call start_group_halo_update(q_pack, q, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') endif enddo ! nsplt @@ -693,16 +693,23 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np enddo endif + if (trdm>1.e-4) then + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') + call complete_group_halo_update(dp1_pack, domain) + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') + endif do it=1,nsplt if ( gridstruct%nested ) then neststruct%tracer_nest_timestep = neststruct%tracer_nest_timestep + 1 end if - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call complete_group_halo_update(q_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') if (gridstruct%nested) then do iq=1,nq @@ -727,16 +734,6 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np enddo endif - if (trdm>1.e-4) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') - call complete_group_halo_update(dp1_pack, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') - - endif - - !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq, & !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) & !$OMP private(dp2, ra_x, ra_y, fx, fy) @@ -780,11 +777,11 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np enddo ! npz if ( it /= nsplt ) then - call timing_on('COMM_TOTAL') - call timing_on('COMM_TRACER') + call timing_on('COMM_TOTAL') + call timing_on('COMM_TRACER') call start_group_halo_update(q_pack, q, domain) - call timing_off('COMM_TRACER') - call timing_off('COMM_TOTAL') + call timing_off('COMM_TRACER') + call timing_off('COMM_TOTAL') endif enddo ! nsplt diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 60e642283..6fadf122f 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -646,7 +646,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, if ( .not.flagstruct%dwind_2d ) then - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') if ( gridstruct%square_domain ) then call start_group_halo_update(i_pack(1), u_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.true.) @@ -654,7 +654,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, call start_group_halo_update(i_pack(1), u_dt, domain, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, complete=.true.) endif - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') endif !---------------------------------------- @@ -691,7 +691,6 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, endif enddo ! j-loop - call timing_on(' Update_dwinds') if ( flagstruct%dwind_2d ) then call update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, & npx,npy,npz,domain) @@ -700,9 +699,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, !I have not seen dwind_2d be used for anything; so we will only handle nesting assuming dwind_2d == .false. call timing_on('COMM_TOTAL') - call complete_group_halo_update(i_pack(1), domain) - call timing_off('COMM_TOTAL') ! ! for regional grid need to set values for u_dt and v_dt at the edges. @@ -766,7 +763,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ! call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) endif - call timing_off(' Update_dwinds') + #ifdef GFS_PHYS call cubed_to_latlon(u, v, ua, va, gridstruct, & npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%bounded_domain, flagstruct%c2l_ord, bd) @@ -856,9 +853,9 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & enddo enddo enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(q, domain, complete=.true.) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') !$OMP parallel do default(none) shared(is,ie,js,je,km,mask,dy,sina_u,q,rdxc,gridstruct, & !$OMP sin_sg,npx,dx,npy,rdyc,sina_v,qdt,rarea,delp) & diff --git a/model/gfdl_mp.F90 b/model/gfdl_mp.F90 index eb925fdb9..aaf102c91 100644 --- a/model/gfdl_mp.F90 +++ b/model/gfdl_mp.F90 @@ -33,48 +33,50 @@ ! ======================================================================= module gfdl_mp_mod - + + use fms_mod, only: check_nml_error + implicit none - + private - + ! ----------------------------------------------------------------------- ! interface functions ! ----------------------------------------------------------------------- - + interface wqs procedure wes_t procedure wqs_trho procedure wqs_ptqv end interface wqs - + interface mqs procedure mes_t procedure mqs_trho procedure mqs_ptqv end interface mqs - + interface iqs procedure ies_t procedure iqs_trho procedure iqs_ptqv end interface iqs - + interface mhc procedure mhc3 procedure mhc4 procedure mhc6 end interface mhc - + interface wet_bulb procedure wet_bulb_dry procedure wet_bulb_moist end interface wet_bulb - + ! ----------------------------------------------------------------------- ! public subroutines, functions, and variables ! ----------------------------------------------------------------------- - + public :: gfdl_mp_init public :: gfdl_mp_driver public :: gfdl_mp_end @@ -83,27 +85,27 @@ module gfdl_mp_mod public :: c_liq, c_ice, rhow, wet_bulb public :: cv_air, cv_vap, mtetw public :: hlv, hlf, tice - + ! ----------------------------------------------------------------------- ! precision definition ! ----------------------------------------------------------------------- - + integer, parameter :: r8 = 8 ! double precision - + ! ----------------------------------------------------------------------- ! initialization conditions ! ----------------------------------------------------------------------- - + logical :: tables_are_initialized = .false. ! initialize satuation tables - + ! ----------------------------------------------------------------------- ! physics constants ! ----------------------------------------------------------------------- - + real, parameter :: grav = 9.80665 ! acceleration due to gravity (m/s^2), ref: IFS - + real, parameter :: rgrav = 1.0 / grav ! inversion of gravity acceleration (s^2/m) - + real, parameter :: pi = 4.0 * atan (1.0) ! ratio of circle circumference to diameter real, parameter :: boltzmann = 1.38064852e-23 ! boltzmann constant (J/K) @@ -111,7 +113,7 @@ module gfdl_mp_mod real, parameter :: runiver = avogadro * boltzmann ! 8.314459727525675, universal gas constant (J/K/mol) real, parameter :: mmd = 2.89644e-2 ! dry air molar mass (kg/mol), ref: IFS real, parameter :: mmv = 1.80153e-2 ! water vapor molar mass (kg/mol), ref: IFS - + real, parameter :: rdgas = 287.05 ! gas constant for dry air (J/kg/K): ref: GFDL, GFS real, parameter :: rvgas = 461.50 ! gas constant for water vapor (J/kg/K): ref: GFDL, GFS !real, parameter :: rdgas = runiver / mmd ! 287.0578961596192, gas constant for dry air (J/kg/K) @@ -120,29 +122,29 @@ module gfdl_mp_mod real, parameter :: zvir = rvgas / rdgas - 1. ! 0.6077667316114637 real, parameter :: eps = rdgas / rvgas ! 0.6219934994582882 real, parameter :: epsm1 = rdgas / rvgas - 1. ! -0.3780065005417118 - + real, parameter :: tice = 273.15 ! freezing temperature (K): ref: GFDL, GFS !real, parameter :: tice = 273.16 ! freezing temperature (K), ref: IFS - + real, parameter :: cp_air = 1004.6 ! heat capacity of dry air at constant pressure (J/kg/K): ref: GFDL, GFS real, parameter :: cv_air = cp_air - rdgas ! 717.55, heat capacity of dry air at constant volume (J/kg/K): ref: GFDL, GFS !real, parameter :: cp_air = 7. / 2. * rdgas ! 1004.7026365586671, heat capacity of dry air at constant pressure (J/kg/K) !real, parameter :: cv_air = 5. / 2. * rdgas ! 717.644740399048, heat capacity of dry air at constant volume (J/kg/K) real, parameter :: cp_vap = 4.0 * rvgas ! 1846.0885419672554, heat capacity of water vapor at constnat pressure (J/kg/K) real, parameter :: cv_vap = 3.0 * rvgas ! 1384.5664064754415, heat capacity of water vapor at constant volume (J/kg/K) - + real, parameter :: c_ice = 2.106e3 ! heat capacity of ice at 0 deg C (J/kg/K), ref: IFS real, parameter :: c_liq = 4.218e3 ! heat capacity of water at 0 deg C (J/kg/K), ref: IFS - + real, parameter :: dc_vap = cp_vap - c_liq ! - 2371.9114580327446, isobaric heating / cooling (J/kg/K) real, parameter :: dc_ice = c_liq - c_ice ! 2112.0, isobaric heating / colling (J/kg/K) real, parameter :: d2_ice = cp_vap - c_ice ! - 259.9114580327446, isobaric heating / cooling (J/kg/K) - + real, parameter :: hlv = 2.5e6 ! latent heat of evaporation at 0 deg C (J/kg): ref: GFDL, GFS real, parameter :: hlf = 3.3358e5 ! latent heat of fusion at 0 deg C (J/kg): ref: GFDL, GFS !real, parameter :: hlv = 2.5008e6 ! latent heat of evaporation at 0 deg C (J/kg), ref: IFS !real, parameter :: hlf = 3.345e5 ! latent heat of fusion at 0 deg C (J/kg), ref: IFS - + real, parameter :: visd = 1.717e-5 ! dynamics viscosity of air at 0 deg C and 1000 hPa (Mason, 1971) (kg/m/s) real, parameter :: visk = 1.35e-5 ! kinematic viscosity of air at 0 deg C and 1000 hPa (Mason, 1971) (m^2/s) real, parameter :: vdifu = 2.25e-5 ! diffusivity of water vapor in air at 0 deg C and 1000 hPa (Mason, 1971) (m^2/s) @@ -151,80 +153,82 @@ module gfdl_mp_mod real, parameter :: rho0 = 1.0 ! reference air density (kg/m^3), ref: IFS real, parameter :: cdg = 3.15121 ! drag coefficient of graupel (Locatelli and Hobbs, 1974) real, parameter :: cdh = 0.5 ! drag coefficient of hail (Heymsfield and Wright, 2014) - + real (kind = r8), parameter :: lv0 = hlv - dc_vap * tice ! 3148711.3338762247, evaporation latent heat coeff. at 0 deg K (J/kg) real (kind = r8), parameter :: li0 = hlf - dc_ice * tice ! - 242413.92000000004, fussion latent heat coeff. at 0 deg K (J/kg) real (kind = r8), parameter :: li2 = lv0 + li0 ! 2906297.413876225, sublimation latent heat coeff. at 0 deg K (J/kg) - + real (kind = r8), parameter :: e00 = 611.21 ! saturation vapor pressure at 0 deg C (Pa), ref: IFS - + ! ----------------------------------------------------------------------- ! predefined parameters ! ----------------------------------------------------------------------- - + integer, parameter :: length = 2621 ! length of the saturation table - + real, parameter :: qcmin = 1.0e-15 ! min value for cloud condensates (kg/kg) real, parameter :: qfmin = 1.0e-8 ! min value for sedimentation (kg/kg) - + real, parameter :: dz_min = 1.0e-2 ! used for correcting flipped height (m) - + real, parameter :: rhow = 1.0e3 ! density of cloud water (kg/m^3) real, parameter :: rhoi = 9.17e2 ! density of cloud ice (kg/m^3) real, parameter :: rhor = 1.0e3 ! density of rain (Lin et al. 1983) (kg/m^3) real, parameter :: rhos = 1.0e2 ! density of snow (Lin et al. 1983) (kg/m^3) real, parameter :: rhog = 4.0e2 ! density of graupel (Rutledge and Hobbs 1984) (kg/m^3) real, parameter :: rhoh = 9.17e2 ! density of hail (Lin et al. 1983) (kg/m^3) - + real, parameter :: dt_fr = 8.0 ! t_wfr - dt_fr: minimum temperature water can exist (Moore and Molinero 2011) - + real (kind = r8), parameter :: one_r8 = 1.0 ! constant 1 - + ! ----------------------------------------------------------------------- ! namelist parameters ! ----------------------------------------------------------------------- - + integer :: ntimes = 1 ! cloud microphysics sub cycles - + + integer :: nconds = 1 ! condensation sub cycles + integer :: cfflag = 1 ! cloud fraction scheme ! 1: GFDL cloud scheme ! 2: Xu and Randall (1996) ! 3: Park et al. (2016) ! 4: Gultepe and Isaac (2007) - + integer :: icloud_f = 0 ! GFDL cloud scheme ! 0: subgrid variability based scheme ! 1: same as 0, but for old fvgfs implementation ! 2: binary cloud scheme ! 3: extension of 0 - + integer :: irain_f = 0 ! cloud water to rain auto conversion scheme ! 0: subgrid variability based scheme ! 1: no subgrid varaibility - + integer :: inflag = 1 ! ice nucleation scheme ! 1: Hong et al. (2004) ! 2: Meyers et al. (1992) ! 3: Meyers et al. (1992) ! 4: Cooper (1986) ! 5: Fletcher (1962) - + integer :: igflag = 3 ! ice generation scheme ! 1: WSM6 ! 2: WSM6 with 0 at 0 C ! 3: WSM6 with 0 at 0 C and fixed value at - 10 C ! 4: combination of 1 and 3 - + integer :: ifflag = 1 ! ice fall scheme ! 1: Deng and Mace (2008) ! 2: Heymsfield and Donner (1990) - + integer :: rewflag = 1 ! cloud water effective radius scheme ! 1: Martin et al. (1994) ! 2: Martin et al. (1994), GFDL revision ! 3: Kiehl et al. (1994) ! 4: effective radius - + integer :: reiflag = 5 ! cloud ice effective radius scheme ! 1: Heymsfield and Mcfarquhar (1996) ! 2: Donner et al. (1997) @@ -233,26 +237,26 @@ module gfdl_mp_mod ! 5: Wyser (1998) ! 6: Sun and Rikus (1999), Sun (2001) ! 7: effective radius - + integer :: rerflag = 1 ! rain effective radius scheme ! 1: effective radius - + integer :: resflag = 1 ! snow effective radius scheme ! 1: effective radius - + integer :: regflag = 1 ! graupel effective radius scheme ! 1: effective radius - + integer :: radr_flag = 1 ! radar reflectivity for rain ! 1: Mark Stoelinga (2005) ! 2: Smith et al. (1975), Tong and Xue (2005) ! 3: Marshall-Palmer formula (https://en.wikipedia.org/wiki/DBZ_(meteorology)) - + integer :: rads_flag = 1 ! radar reflectivity for snow ! 1: Mark Stoelinga (2005) ! 2: Smith et al. (1975), Tong and Xue (2005) ! 3: Marshall-Palmer formula (https://en.wikipedia.org/wiki/DBZ_(meteorology)) - + integer :: radg_flag = 1 ! radar reflectivity for graupel ! 1: Mark Stoelinga (2005) ! 2: Smith et al. (1975), Tong and Xue (2005) @@ -263,50 +267,51 @@ module gfdl_mp_mod ! 2: explicit scheme ! 3: lagrangian scheme ! 4: combined implicit and lagrangian scheme - + integer :: vdiffflag = 1 ! wind difference scheme in accretion ! 1: Wisner et al. (1972) ! 2: Mizuno (1990) ! 3: Murakami (1990) - + logical :: do_sedi_uv = .true. ! transport of horizontal momentum in sedimentation logical :: do_sedi_w = .true. ! transport of vertical momentum in sedimentation logical :: do_sedi_heat = .true. ! transport of heat in sedimentation logical :: do_sedi_melt = .true. ! melt cloud ice, snow, and graupel during sedimentation - + logical :: do_qa = .true. ! do inline cloud fraction logical :: rad_snow = .true. ! include snow in cloud fraciton calculation logical :: rad_graupel = .true. ! include graupel in cloud fraction calculation logical :: rad_rain = .true. ! include rain in cloud fraction calculation logical :: do_cld_adj = .false. ! do cloud fraction adjustment - + logical :: z_slope_liq = .true. ! use linear mono slope for autocconversions logical :: z_slope_ice = .true. ! use linear mono slope for autocconversions - + logical :: use_rhc_cevap = .false. ! cap of rh for cloud water evaporation logical :: use_rhc_revap = .false. ! cap of rh for rain evaporation - + logical :: const_vw = .false. ! if .ture., the constants are specified by v * _fac logical :: const_vi = .false. ! if .ture., the constants are specified by v * _fac logical :: const_vs = .false. ! if .ture., the constants are specified by v * _fac logical :: const_vg = .false. ! if .ture., the constants are specified by v * _fac logical :: const_vr = .false. ! if .ture., the constants are specified by v * _fac - + logical :: liq_ice_combine = .false. ! combine all liquid water, combine all solid water logical :: snow_grauple_combine = .true. ! combine snow and graupel - + logical :: prog_ccn = .false. ! do prognostic ccn (Yi Ming's method) - + logical :: fix_negative = .true. ! fix negative water species - + + logical :: do_evap_timescale = .true. ! whether to apply a timescale to evaporation logical :: do_cond_timescale = .false. ! whether to apply a timescale to condensation - + logical :: do_hail = .false. ! use hail parameters instead of graupel - + logical :: consv_checker = .false. ! turn on energy and water conservation checker - + logical :: do_warm_rain_mp = .false. ! do warm rain cloud microphysics only - + logical :: do_wbf = .false. ! do Wegener Bergeron Findeisen process logical :: do_psd_water_fall = .false. ! calculate cloud water terminal velocity based on PSD @@ -317,11 +322,15 @@ module gfdl_mp_mod logical :: do_new_acc_water = .false. ! perform the new accretion for cloud water logical :: do_new_acc_ice = .false. ! perform the new accretion for cloud ice - + logical :: cp_heating = .false. ! update temperature based on constant pressure - + + logical :: delay_cond_evap = .false. ! do condensation evaporation only at the last time step + + logical :: do_subgrid_proc = .true. ! do temperature sentive high vertical resolution processes + real :: mp_time = 150.0 ! maximum microphysics time step (s) - + real :: n0w_sig = 1.1 ! intercept parameter (significand) of cloud water (Lin et al. 1983) (1/m^4) (Martin et al. 1994) !real :: n0w_sig = 1.4 ! intercept parameter (significand) of cloud water (Lin et al. 1983) (1/m^4) (Martin et al. 1994) real :: n0i_sig = 1.3 ! intercept parameter (significand) of cloud ice (Lin et al. 1983) (1/m^4) (McFarquhar et al. 2015) @@ -330,7 +339,7 @@ module gfdl_mp_mod real :: n0s_sig = 3.0 ! intercept parameter (significand) of snow (Lin et al. 1983) (1/m^4) (Gunn and Marshall 1958) real :: n0g_sig = 4.0 ! intercept parameter (significand) of graupel (Rutledge and Hobbs 1984) (1/m^4) (Houze et al. 1979) real :: n0h_sig = 4.0 ! intercept parameter (significand) of hail (Lin et al. 1983) (1/m^4) (Federer and Waldvogel 1975) - + real :: n0w_exp = 41 ! intercept parameter (exponent) of cloud water (Lin et al. 1983) (1/m^4) (Martin et al. 1994) !real :: n0w_exp = 91 ! intercept parameter (exponent) of cloud water (Lin et al. 1983) (1/m^4) (Martin et al. 1994) real :: n0i_exp = 18 ! intercept parameter (exponent) of cloud ice (Lin et al. 1983) (1/m^4) (McFarquhar et al. 2015) @@ -339,7 +348,7 @@ module gfdl_mp_mod real :: n0s_exp = 6 ! intercept parameter (exponent) of snow (Lin et al. 1983) (1/m^4) (Gunn and Marshall 1958) real :: n0g_exp = 6 ! intercept parameter (exponent) of graupel (Rutledge and Hobbs 1984) (1/m^4) (Houze et al. 1979) real :: n0h_exp = 4 ! intercept parameter (exponent) of hail (Lin et al. 1983) (1/m^4) (Federer and Waldvogel 1975) - + real :: muw = 6.0 ! shape parameter of cloud water in Gamma distribution (Martin et al. 1994) !real :: muw = 16.0 ! shape parameter of cloud water in Gamma distribution (Martin et al. 1994) real :: mui = 3.35 ! shape parameter of cloud ice in Gamma distribution (McFarquhar et al. 2015) @@ -348,7 +357,7 @@ module gfdl_mp_mod real :: mus = 1.0 ! shape parameter of snow in Gamma distribution (Gunn and Marshall 1958) real :: mug = 1.0 ! shape parameter of graupel in Gamma distribution (Houze et al. 1979) real :: muh = 1.0 ! shape parameter of hail in Gamma distribution (Federer and Waldvogel 1975) - + real :: alinw = 3.e7 ! "a" in Lin et al. (1983) for cloud water (Ikawa and Saito 1990) real :: alini = 7.e2 ! "a" in Lin et al. (1983) for cloud ice (Ikawa and Saita 1990) real :: alinr = 842.0 ! "a" in Lin et al. (1983) for rain (Liu and Orville 1969) @@ -362,16 +371,16 @@ module gfdl_mp_mod real :: blins = 0.25 ! "b" in Lin et al. (1983) for snow (straka 2009) real :: bling = 0.5 ! "b" in Lin et al. (1983), similar to b, but for graupel (Pruppacher and Klett 2010) real :: blinh = 0.5 ! "b" in Lin et al. (1983), similar to b, but for hail (Pruppacher and Klett 2010) - + real :: tice_mlt = 273.16 ! can set ice melting temperature to 268 based on observation (Kay et al. 2016) (K) - + real :: t_min = 178.0 ! minimum temperature to freeze - dry all water vapor (K) real :: t_sub = 184.0 ! minimum temperature for sublimation of cloud ice (K) - + real :: rh_inc = 0.25 ! rh increment for complete evaporation of cloud water and cloud ice real :: rh_inr = 0.25 ! rh increment for minimum evaporation of rain real :: rh_ins = 0.25 ! rh increment for sublimation of snow - + real :: tau_r2g = 900.0 ! rain freezing to graupel time scale (s) real :: tau_i2s = 1000.0 ! cloud ice to snow autoconversion time scale (s) real :: tau_l2r = 900.0 ! cloud water to rain autoconversion time scale (s) @@ -382,30 +391,30 @@ module gfdl_mp_mod real :: tau_smlt = 900.0 ! snow melting time scale (s) real :: tau_gmlt = 600.0 ! graupel melting time scale (s) real :: tau_wbf = 300.0 ! graupel melting time scale (s) - + real :: dw_land = 0.20 ! base value for subgrid deviation / variability over land real :: dw_ocean = 0.10 ! base value for subgrid deviation / variability over ocean - + real :: ccn_o = 90.0 ! ccn over ocean (1/cm^3) real :: ccn_l = 270.0 ! ccn over land (1/cm^3) - + real :: rthresh = 10.0e-6 ! critical cloud drop radius (micron) for autoconversion - + real :: cld_min = 0.05 ! minimum cloud fraction - + real :: qi_lim = 1.0 ! cloud ice limiter (0: no, 1: full, >1: extra) to prevent large ice build up - + real :: ql_mlt = 2.0e-3 ! maximum cloud water allowed from melted cloud ice (kg/kg) real :: qs_mlt = 1.0e-6 ! maximum cloud water allowed from melted snow (kg/kg) - + real :: ql_gen = 1.0e-3 ! maximum cloud water generation during remapping step (kg/kg) - + real :: ql0_max = 2.0e-3 ! maximum cloud water value (autoconverted to rain) (kg/kg) real :: qi0_max = 1.0e-4 ! maximum cloud ice value (autoconverted to snow) (kg/m^3) - + real :: qi0_crt = 1.0e-4 ! cloud ice to snow autoconversion threshold (kg/m^3) real :: qs0_crt = 1.0e-3 ! snow to graupel autoconversion threshold (0.6e-3 in Purdue Lin scheme) (kg/m^3) - + real :: c_paut = 0.55 ! cloud water to rain autoconversion efficiency real :: c_psacw = 1.0 ! cloud water to snow accretion efficiency real :: c_psaci = 0.05 ! cloud ice to snow accretion efficiency (was 0.1 in ZETAC) @@ -422,42 +431,43 @@ module gfdl_mp_mod real :: ss_fac = 0.2 ! snow sublimation temperature factor real :: gs_fac = 0.2 ! graupel sublimation temperature factor - real :: rh_fac = 10.0 ! cloud water condensation / evaporation relative humidity factor + real :: rh_fac_evap = 10.0 ! cloud water evaporation relative humidity factor + real :: rh_fac_cond = 10.0 ! cloud water condensation relative humidity factor real :: sed_fac = 1.0 ! coefficient for sedimentation fall, scale from 1.0 (implicit) to 0.0 (lagrangian) - + real :: vw_fac = 1.0 real :: vi_fac = 1.0 ! IFS: if const_vi: 1 / 3 real :: vs_fac = 1.0 ! IFS: if const_vs: 1. real :: vg_fac = 1.0 ! IFS: if const_vg: 2. real :: vr_fac = 1.0 ! IFS: if const_vr: 4. - + real :: vw_max = 0.01 ! maximum fall speed for cloud water (m/s) real :: vi_max = 0.5 ! maximum fall speed for cloud ice (m/s) real :: vs_max = 5.0 ! maximum fall speed for snow (m/s) real :: vg_max = 8.0 ! maximum fall speed for graupel (m/s) real :: vr_max = 12.0 ! maximum fall speed for rain (m/s) - + real :: xr_a = 0.25 ! p value in Xu and Randall (1996) real :: xr_b = 100.0 ! alpha_0 value in Xu and Randall (1996) real :: xr_c = 0.49 ! gamma value in Xu and Randall (1996) - + real :: te_err = 1.e-5 ! 64bit: 1.e-14, 32bit: 1.e-7; turn off to save computer time real :: tw_err = 1.e-8 ! 64bit: 1.e-14, 32bit: 1.e-7; turn off to save computer time - + real :: rh_thres = 0.75 ! minimum relative humidity for cloud fraction real :: rhc_cevap = 0.85 ! maximum relative humidity for cloud water evaporation real :: rhc_revap = 0.85 ! maximum relative humidity for rain evaporation - + real :: f_dq_p = 1.0 ! cloud fraction adjustment for supersaturation real :: f_dq_m = 1.0 ! cloud fraction adjustment for undersaturation - + real :: fi2s_fac = 1.0 ! maximum sink of cloud ice to form snow: 0-1 real :: fi2g_fac = 1.0 ! maximum sink of cloud ice to form graupel: 0-1 real :: fs2g_fac = 1.0 ! maximum sink of snow to form graupel: 0-1 - + real :: beta = 1.22 ! defined in Heymsfield and Mcfarquhar (1996) - + real :: rewmin = 5.0, rewmax = 15.0 ! minimum and maximum effective radius for cloud water (micron) real :: reimin = 10.0, reimax = 150.0 ! minimum and maximum effective radius for cloud ice (micron) real :: rermin = 15.0, rermax = 10000.0 ! minimum and maximum effective radius for rain (micron) @@ -473,17 +483,17 @@ module gfdl_mp_mod ! GFDL MP's PSD and cloud ice radiative property's PSD assumption. ! after the cloud ice radiative property's PSD is rebuilt, ! this parameter should be 1.0. - + ! ----------------------------------------------------------------------- ! local shared variables ! ----------------------------------------------------------------------- - + real :: acco (3, 10), acc (20) real :: cracs, csacr, cgacr, cgacs, csacw, craci, csaci, cgacw, cgaci, cracw real :: cssub (5), cgsub (5), crevp (5), cgfr (2), csmlt (4), cgmlt (4) - + real :: t_wfr, fac_rc, c_air, c_vap, d0_vap - + real (kind = r8) :: lv00, li00, li20, cpaut real (kind = r8) :: d1_vap, d1_ice, c1_vap, c1_liq, c1_ice real (kind = r8) :: normw, normr, normi, norms, normg, normh @@ -498,14 +508,14 @@ module gfdl_mp_mod real (kind = r8) :: rrbw, rrbr, rrbi, rrbs, rrbg, rrbh real (kind = r8) :: tvaw, tvar, tvai, tvas, tvag, tvah real (kind = r8) :: tvbw, tvbr, tvbi, tvbs, tvbg, tvbh - + real, allocatable :: table0 (:), table1 (:), table2 (:), table3 (:), table4 (:) real, allocatable :: des0 (:), des1 (:), des2 (:), des3 (:), des4 (:) - + ! ----------------------------------------------------------------------- ! namelist ! ----------------------------------------------------------------------- - + namelist / gfdl_mp_nml / & t_min, t_sub, tau_r2g, tau_smlt, tau_gmlt, dw_land, dw_ocean, vw_fac, vi_fac, & vr_fac, vs_fac, vg_fac, ql_mlt, do_qa, fix_negative, vw_max, vi_max, vs_max, & @@ -526,10 +536,10 @@ module gfdl_mp_mod n0w_sig, n0i_sig, n0r_sig, n0s_sig, n0g_sig, n0h_sig, n0w_exp, n0i_exp, & n0r_exp, n0s_exp, n0g_exp, n0h_exp, muw, mui, mur, mus, mug, muh, & alinw, alini, alinr, alins, aling, alinh, blinw, blini, blinr, blins, bling, blinh, & - do_new_acc_water, do_new_acc_ice, is_fac, ss_fac, gs_fac, rh_fac, & + do_new_acc_water, do_new_acc_ice, is_fac, ss_fac, gs_fac, rh_fac_evap, rh_fac_cond, & snow_grauple_combine, do_psd_water_num, do_psd_ice_num, vdiffflag, rewfac, reifac, & - cp_heating - + cp_heating, nconds, do_evap_timescale, delay_cond_evap, do_subgrid_proc + contains ! ======================================================================= @@ -537,53 +547,54 @@ module gfdl_mp_mod ! ======================================================================= subroutine gfdl_mp_init (input_nml_file, logunit, hydrostatic) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + logical, intent (in) :: hydrostatic - + integer, intent (in) :: logunit - + character (len = *), intent (in) :: input_nml_file (:) - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - - logical :: exists - + + integer :: ios, ierr + ! ----------------------------------------------------------------------- ! read namelist ! ----------------------------------------------------------------------- - - read (input_nml_file, nml = gfdl_mp_nml) - + + read (input_nml_file, nml = gfdl_mp_nml, iostat = ios) + ierr = check_nml_error (ios, 'gfdl_mp_nml') + ! ----------------------------------------------------------------------- ! write namelist to log file ! ----------------------------------------------------------------------- - + write (logunit, *) " ================================================================== " write (logunit, *) "gfdl_mp_mod" write (logunit, nml = gfdl_mp_nml) - + ! ----------------------------------------------------------------------- ! initialize microphysics variables ! ----------------------------------------------------------------------- - + if (.not. tables_are_initialized) call qs_init - + call setup_mp - + ! ----------------------------------------------------------------------- ! define various heat capacities and latent heat coefficients at 0 deg K ! ----------------------------------------------------------------------- - + call setup_mhc_lhc (hydrostatic) - + end subroutine gfdl_mp_init ! ======================================================================= @@ -593,58 +604,45 @@ end subroutine gfdl_mp_init subroutine gfdl_mp_driver (qv, ql, qr, qi, qs, qg, qa, qnl, qni, pt, wa, & ua, va, delz, delp, gsize, dtm, hs, water, rain, ice, snow, graupel, & hydrostatic, is, ie, ks, ke, q_con, cappa, consv_te, adj_vmr, te, dte, & - pcw, edw, oew, rrw, tvw, pci, edi, oei, rri, tvi, pcr, edr, oer, rrr, tvr, & - pcs, eds, oes, rrs, tvs, pcg, edg, oeg, rrg, tvg, & - prefluxw, prefluxr, prefluxi, prefluxs, prefluxg, condensation, & - deposition, evaporation, sublimation, last_step, do_inline_mp) - + prefluxw, prefluxr, prefluxi, prefluxs, prefluxg, last_step, do_inline_mp) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: is, ie, ks, ke - + logical, intent (in) :: hydrostatic, last_step, consv_te, do_inline_mp - + real, intent (in) :: dtm - + real, intent (in), dimension (is:ie) :: hs, gsize - + real, intent (in), dimension (is:ie, ks:ke) :: qnl, qni - + real, intent (inout), dimension (is:ie, ks:ke) :: delp, delz, pt, ua, va, wa, te real, intent (inout), dimension (is:ie, ks:ke) :: qv, ql, qr, qi, qs, qg, qa real, intent (inout), dimension (is:ie, ks:ke) :: prefluxw, prefluxr, prefluxi, prefluxs, prefluxg - + real, intent (inout), dimension (is:, ks:) :: q_con, cappa - + real, intent (inout), dimension (is:ie) :: water, rain, ice, snow, graupel - real, intent (inout), dimension (is:ie) :: condensation, deposition - real, intent (inout), dimension (is:ie) :: evaporation, sublimation - + real, intent (out), dimension (is:ie, ks:ke) :: adj_vmr - real, intent (out), dimension (is:ie, ks:ke) :: pcw, edw, oew, rrw, tvw - real, intent (out), dimension (is:ie, ks:ke) :: pci, edi, oei, rri, tvi - real, intent (out), dimension (is:ie, ks:ke) :: pcr, edr, oer, rrr, tvr - real, intent (out), dimension (is:ie, ks:ke) :: pcs, eds, oes, rrs, tvs - real, intent (out), dimension (is:ie, ks:ke) :: pcg, edg, oeg, rrg, tvg real (kind = r8), intent (out), dimension (is:ie) :: dte ! ----------------------------------------------------------------------- ! major cloud microphysics driver ! ----------------------------------------------------------------------- - + call mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, qa, & qnl, qni, delz, is, ie, ks, ke, dtm, water, rain, ice, snow, graupel, & - gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, pcw, edw, oew, rrw, tvw, & - pci, edi, oei, rri, tvi, pcr, edr, oer, rrr, tvr, pcs, eds, oes, rrs, tvs, & - pcg, edg, oeg, rrg, tvg, prefluxw, prefluxr, prefluxi, & - prefluxs, prefluxg, condensation, deposition, evaporation, sublimation, & - last_step, do_inline_mp, .false., .true.) - + gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, prefluxw, prefluxr, & + prefluxi, prefluxs, prefluxg, last_step, do_inline_mp, .false., .true.) + end subroutine gfdl_mp_driver ! ======================================================================= @@ -652,13 +650,13 @@ end subroutine gfdl_mp_driver ! ======================================================================= subroutine gfdl_mp_end - + implicit none - + ! ----------------------------------------------------------------------- ! free up memory ! ----------------------------------------------------------------------- - + deallocate (table0) deallocate (table1) deallocate (table2) @@ -669,9 +667,9 @@ subroutine gfdl_mp_end deallocate (des2) deallocate (des3) deallocate (des4) - + tables_are_initialized = .false. - + end subroutine gfdl_mp_end ! ======================================================================= @@ -679,43 +677,43 @@ end subroutine gfdl_mp_end ! ======================================================================= subroutine setup_mp - + implicit none - + integer :: i, k - + real :: gcon, hcon, scm3, pisq, act (20), ace (20), occ (3), aone - + ! ----------------------------------------------------------------------- ! complete freezing temperature ! ----------------------------------------------------------------------- - + if (do_warm_rain_mp) then t_wfr = t_min else t_wfr = tice - 40.0 endif - + ! ----------------------------------------------------------------------- ! cloud water autoconversion, Hong et al. (2004) ! ----------------------------------------------------------------------- - + fac_rc = (4. / 3.) * pi * rhow * rthresh ** 3 - + aone = 2. / 9. * (3. / 4.) ** (4. / 3.) / pi ** (1. / 3.) cpaut = c_paut * aone * grav / visd - + ! ----------------------------------------------------------------------- ! terminal velocities parameters, Lin et al. (1983) ! ----------------------------------------------------------------------- - + gcon = (4. * grav * rhog / (3. * cdg * rho0)) ** 0.5 hcon = (4. * grav * rhoh / (3. * cdh * rho0)) ** 0.5 - + ! ----------------------------------------------------------------------- ! part of the slope parameters ! ----------------------------------------------------------------------- - + normw = pi * rhow * n0w_sig * gamma (muw + 3) normi = pi * rhoi * n0i_sig * gamma (mui + 3) normr = pi * rhor * n0r_sig * gamma (mur + 3) @@ -735,7 +733,7 @@ subroutine setup_mp ! optical extinction (oe), radar reflectivity factor (rr), and ! mass-weighted terminal velocity (tv) ! ----------------------------------------------------------------------- - + pcaw = exp (3 / (muw + 3) * log (n0w_sig)) * gamma (muw) * exp (3 * n0w_exp / (muw + 3) * log (10.)) pcai = exp (3 / (mui + 3) * log (n0i_sig)) * gamma (mui) * exp (3 * n0i_exp / (mui + 3) * log (10.)) pcar = exp (3 / (mur + 3) * log (n0r_sig)) * gamma (mur) * exp (3 * n0r_exp / (mur + 3) * log (10.)) @@ -749,7 +747,7 @@ subroutine setup_mp pcbs = exp (mus / (mus + 3) * log (pi * rhos * gamma (mus + 3))) pcbg = exp (mug / (mug + 3) * log (pi * rhog * gamma (mug + 3))) pcbh = exp (muh / (muh + 3) * log (pi * rhoh * gamma (muh + 3))) - + edaw = exp (- 1. / (muw + 3) * log (n0w_sig)) * (muw + 2) * exp (- n0w_exp / (muw + 3) * log (10.)) edai = exp (- 1. / (mui + 3) * log (n0i_sig)) * (mui + 2) * exp (- n0i_exp / (mui + 3) * log (10.)) edar = exp (- 1. / (mur + 3) * log (n0r_sig)) * (mur + 2) * exp (- n0r_exp / (mur + 3) * log (10.)) @@ -763,7 +761,7 @@ subroutine setup_mp edbs = exp (1. / (mus + 3) * log (pi * rhos * gamma (mus + 3))) edbg = exp (1. / (mug + 3) * log (pi * rhog * gamma (mug + 3))) edbh = exp (1. / (muh + 3) * log (pi * rhoh * gamma (muh + 3))) - + oeaw = exp (1. / (muw + 3) * log (n0w_sig)) * pi * gamma (muw + 2) * & exp (n0w_exp / (muw + 3) * log (10.)) oeai = exp (1. / (mui + 3) * log (n0i_sig)) * pi * gamma (mui + 2) * & @@ -783,7 +781,7 @@ subroutine setup_mp oebs = 2 * exp ((mus + 2) / (mus + 3) * log (pi * rhos * gamma (mus + 3))) oebg = 2 * exp ((mug + 2) / (mug + 3) * log (pi * rhog * gamma (mug + 3))) oebh = 2 * exp ((muh + 2) / (muh + 3) * log (pi * rhoh * gamma (muh + 3))) - + rraw = exp (- 3 / (muw + 3) * log (n0w_sig)) * gamma (muw + 6) * & exp (- 3 * n0w_exp / (muw + 3) * log (10.)) rrai = exp (- 3 / (mui + 3) * log (n0i_sig)) * gamma (mui + 6) * & @@ -803,7 +801,7 @@ subroutine setup_mp rrbs = exp ((mus + 6) / (mus + 3) * log (pi * rhos * gamma (mus + 3))) rrbg = exp ((mug + 6) / (mug + 3) * log (pi * rhog * gamma (mug + 3))) rrbh = exp ((muh + 6) / (muh + 3) * log (pi * rhoh * gamma (muh + 3))) - + tvaw = exp (- blinw / (muw + 3) * log (n0w_sig)) * alinw * gamma (muw + blinw + 3) * & exp (- blinw * n0w_exp / (muw + 3) * log (10.)) tvai = exp (- blini / (mui + 3) * log (n0i_sig)) * alini * gamma (mui + blini + 3) * & @@ -823,15 +821,15 @@ subroutine setup_mp tvbs = exp (blins / (mus + 3) * log (pi * rhos * gamma (mus + 3))) * gamma (mus + 3) tvbg = exp (bling / (mug + 3) * log (pi * rhog * gamma (mug + 3))) * gamma (mug + 3) tvbh = exp (blinh / (muh + 3) * log (pi * rhoh * gamma (muh + 3))) * gamma (muh + 3) - + ! ----------------------------------------------------------------------- ! Schmidt number, Sc ** (1 / 3) in Lin et al. (1983) ! ----------------------------------------------------------------------- - + scm3 = exp (1. / 3. * log (visk / vdifu)) - + pisq = pi * pi - + ! ----------------------------------------------------------------------- ! accretion between cloud water, cloud ice, rain, snow, and graupel or hail, Lin et al. (1983) ! ----------------------------------------------------------------------- @@ -865,7 +863,7 @@ subroutine setup_mp endif if (do_new_acc_water) then - + cracw = pisq * n0r_sig * n0w_sig * rhow / 24. csacw = pisq * n0s_sig * n0w_sig * rhow / 24. if (do_hail) then @@ -877,7 +875,7 @@ subroutine setup_mp endif if (do_new_acc_ice) then - + craci = pisq * n0r_sig * n0i_sig * rhoi / 24. csaci = pisq * n0s_sig * n0i_sig * rhoi / 24. if (do_hail) then @@ -898,7 +896,7 @@ subroutine setup_mp ! ----------------------------------------------------------------------- ! accretion between cloud water, cloud ice, rain, snow, and graupel or hail, Lin et al. (1983) ! ----------------------------------------------------------------------- - + cracs = pisq * n0r_sig * n0s_sig * rhos / 24. csacr = pisq * n0s_sig * n0r_sig * rhor / 24. if (do_hail) then @@ -908,12 +906,12 @@ subroutine setup_mp cgacr = pisq * n0g_sig * n0r_sig * rhor / 24. cgacs = pisq * n0g_sig * n0s_sig * rhos / 24. endif - + cracs = cracs * c_pracs csacr = csacr * c_psacr cgacr = cgacr * c_pgacr cgacs = cgacs * c_pgacs - + ! act / ace / acc: ! 1 - 2: racs (s - r) ! 3 - 4: sacr (r - s) @@ -925,7 +923,7 @@ subroutine setup_mp ! 15 - 16: saci (i - s) ! 17 - 18: sacw (w - g) ! 19 - 20: saci (i - g) - + act (1) = norms act (2) = normr act (3) = act (2) @@ -950,7 +948,7 @@ subroutine setup_mp act (18) = act (6) act (19) = act (11) act (20) = act (6) - + ace (1) = expos ace (2) = expor ace (3) = ace (2) @@ -975,7 +973,7 @@ subroutine setup_mp ace (18) = ace (6) ace (19) = ace (11) ace (20) = ace (6) - + acc (1) = mus acc (2) = mur acc (3) = acc (2) @@ -1000,11 +998,11 @@ subroutine setup_mp acc (18) = acc (6) acc (19) = acc (11) acc (20) = acc (6) - + occ (1) = 1. occ (2) = 2. occ (3) = 1. - + do i = 1, 3 do k = 1, 10 acco (i, k) = occ (i) * gamma (6 + acc (2 * k - 1) - i) * gamma (acc (2 * k) + i - 1) / & @@ -1013,11 +1011,11 @@ subroutine setup_mp exp ((i - 3) * log (ace (2 * k - 1))) * exp ((4 - i) * log (ace (2 * k))) enddo enddo - + ! ----------------------------------------------------------------------- ! rain evaporation, snow sublimation, and graupel or hail sublimation, Lin et al. (1983) ! ----------------------------------------------------------------------- - + crevp (1) = 2. * pi * vdifu * tcond * rvgas * n0r_sig * gamma (1 + mur) / & exp ((1 + mur) / (mur + 3) * log (normr)) * exp (2.0 * log (expor)) crevp (2) = 0.78 @@ -1027,7 +1025,7 @@ subroutine setup_mp exp ((- 1 - blinr) / 2. * log (expor)) crevp (4) = tcond * rvgas crevp (5) = vdifu - + cssub (1) = 2. * pi * vdifu * tcond * rvgas * n0s_sig * gamma (1 + mus) / & exp ((1 + mus) / (mus + 3) * log (norms)) * exp (2.0 * log (expos)) cssub (2) = 0.78 @@ -1037,7 +1035,7 @@ subroutine setup_mp exp ((- 1 - blins) / 2. * log (expos)) cssub (4) = tcond * rvgas cssub (5) = vdifu - + if (do_hail) then cgsub (1) = 2. * pi * vdifu * tcond * rvgas * n0h_sig * gamma (1 + muh) / & exp ((1 + muh) / (muh + 3) * log (normh)) * exp (2.0 * log (expoh)) @@ -1057,22 +1055,22 @@ subroutine setup_mp endif cgsub (4) = tcond * rvgas cgsub (5) = vdifu - + ! ----------------------------------------------------------------------- ! snow melting, Lin et al. (1983) ! ----------------------------------------------------------------------- - + csmlt (1) = 2. * pi * tcond * n0s_sig * gamma (1 + mus) / & exp ((1 + mus) / (mus + 3) * log (norms)) * exp (2.0 * log (expos)) csmlt (2) = 2. * pi * vdifu * n0s_sig * gamma (1 + mus) / & exp ((1 + mus) / (mus + 3) * log (norms)) * exp (2.0 * log (expos)) csmlt (3) = cssub (2) csmlt (4) = cssub (3) - + ! ----------------------------------------------------------------------- ! graupel or hail melting, Lin et al. (1983) ! ----------------------------------------------------------------------- - + if (do_hail) then cgmlt (1) = 2. * pi * tcond * n0h_sig * gamma (1 + muh) / & exp ((1 + muh) / (muh + 3) * log (normh)) * exp (2.0 * log (expoh)) @@ -1086,15 +1084,15 @@ subroutine setup_mp endif cgmlt (3) = cgsub (2) cgmlt (4) = cgsub (3) - + ! ----------------------------------------------------------------------- ! rain freezing, Lin et al. (1983) ! ----------------------------------------------------------------------- - + cgfr (1) = 1.e2 / 36 * pisq * n0r_sig * rhor * gamma (6 + mur) / & exp ((6 + mur) / (mur + 3) * log (normr)) * exp (- 3.0 * log (expor)) cgfr (2) = 0.66 - + end subroutine setup_mp ! ======================================================================= @@ -1102,15 +1100,15 @@ end subroutine setup_mp ! ======================================================================= subroutine setup_mhc_lhc (hydrostatic) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + logical, intent (in) :: hydrostatic - + if (hydrostatic) then c_air = cp_air c_vap = cp_vap @@ -1120,20 +1118,20 @@ subroutine setup_mhc_lhc (hydrostatic) c_vap = cv_vap endif d0_vap = c_vap - c_liq - + ! scaled constants (to reduce float point errors for 32-bit) - + d1_vap = d0_vap / c_air d1_ice = dc_ice / c_air - + lv00 = (hlv - d0_vap * tice) / c_air li00 = (hlf - dc_ice * tice) / c_air li20 = lv00 + li00 - + c1_vap = c_vap / c_air c1_liq = c_liq / c_air c1_ice = c_ice / c_air - + end subroutine setup_mhc_lhc ! ======================================================================= @@ -1142,99 +1140,103 @@ end subroutine setup_mhc_lhc subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & qa, qnl, qni, delz, is, ie, ks, ke, dtm, water, rain, ice, snow, graupel, & - gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, pcw, edw, oew, rrw, tvw, & - pci, edi, oei, rri, tvi, pcr, edr, oer, rrr, tvr, pcs, eds, oes, rrs, tvs, & - pcg, edg, oeg, rrg, tvg, prefluxw, prefluxr, prefluxi, prefluxs, prefluxg, & - condensation, deposition, evaporation, sublimation, last_step, do_inline_mp, & - do_mp_fast, do_mp_full) - + gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, prefluxw, prefluxr, & + prefluxi, prefluxs, prefluxg, last_step, do_inline_mp, do_mp_fast, do_mp_full) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: is, ie, ks, ke - + logical, intent (in) :: hydrostatic, last_step, consv_te, do_inline_mp logical, intent (in) :: do_mp_fast, do_mp_full - + real, intent (in) :: dtm - + real, intent (in), dimension (is:ie) :: gsize, hs - + real, intent (in), dimension (is:ie, ks:ke) :: qnl, qni - + real, intent (inout), dimension (is:ie, ks:ke) :: delp, delz, pt, ua, va, wa real, intent (inout), dimension (is:ie, ks:ke) :: qv, ql, qr, qi, qs, qg, qa real, intent (inout), dimension (is:ie, ks:ke) :: prefluxw, prefluxr, prefluxi, prefluxs, prefluxg - + real, intent (inout), dimension (is:, ks:) :: q_con, cappa - + real, intent (inout), dimension (is:ie) :: water, rain, ice, snow, graupel - real, intent (inout), dimension (is:ie) :: condensation, deposition - real, intent (inout), dimension (is:ie) :: evaporation, sublimation - + real, intent (out), dimension (is:ie, ks:ke) :: te, adj_vmr - real, intent (out), dimension (is:ie, ks:ke) :: pcw, edw, oew, rrw, tvw - real, intent (out), dimension (is:ie, ks:ke) :: pci, edi, oei, rri, tvi - real, intent (out), dimension (is:ie, ks:ke) :: pcr, edr, oer, rrr, tvr - real, intent (out), dimension (is:ie, ks:ke) :: pcs, eds, oes, rrs, tvs - real, intent (out), dimension (is:ie, ks:ke) :: pcg, edg, oeg, rrg, tvg - + real (kind = r8), intent (out), dimension (is:ie) :: dte ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - - integer :: i, k, n - + + integer :: i, k + real :: rh_adj, rh_rain, ccn0, cin0, cond, q1, q2 real :: convt, dts, q_cond, t_lnd, t_ocn, h_var, tmp, nl, ni - + real, dimension (ks:ke) :: q_liq, q_sol, dp, dz, dp0 real, dimension (ks:ke) :: qvz, qlz, qrz, qiz, qsz, qgz, qaz real, dimension (ks:ke) :: den, pz, denfac, ccn, cin real, dimension (ks:ke) :: u, v, w - + + real, dimension (is:ie, ks:ke) :: pcw, edw, oew, rrw, tvw + real, dimension (is:ie, ks:ke) :: pci, edi, oei, rri, tvi + real, dimension (is:ie, ks:ke) :: pcr, edr, oer, rrr, tvr + real, dimension (is:ie, ks:ke) :: pcs, eds, oes, rrs, tvs + real, dimension (is:ie, ks:ke) :: pcg, edg, oeg, rrg, tvg + + real, dimension (is:ie) :: condensation, deposition + real, dimension (is:ie) :: evaporation, sublimation + real (kind = r8) :: con_r8, c8, cp8 - + real (kind = r8), dimension (is:ie, ks:ke) :: te_beg_d, te_end_d, tw_beg_d, tw_end_d real (kind = r8), dimension (is:ie, ks:ke) :: te_beg_m, te_end_m, tw_beg_m, tw_end_m - + real (kind = r8), dimension (is:ie) :: te_b_beg_d, te_b_end_d, tw_b_beg_d, tw_b_end_d, te_loss real (kind = r8), dimension (is:ie) :: te_b_beg_m, te_b_end_m, tw_b_beg_m, tw_b_end_m - + real (kind = r8), dimension (ks:ke) :: tz, tzuv, tzw - + ! ----------------------------------------------------------------------- ! time steps ! ----------------------------------------------------------------------- - + ntimes = max (ntimes, int (dtm / min (dtm, mp_time))) dts = dtm / real (ntimes) - + ! ----------------------------------------------------------------------- ! initialization of total energy difference and condensation diag ! ----------------------------------------------------------------------- - + dte = 0.0 cond = 0.0 adj_vmr = 1.0 - + + condensation = 0.0 + deposition = 0.0 + evaporation = 0.0 + sublimation = 0.0 + ! ----------------------------------------------------------------------- ! unit convert to mm/day ! ----------------------------------------------------------------------- - + convt = 86400. * rgrav / dts - + do i = is, ie - + ! ----------------------------------------------------------------------- ! conversion of temperature ! ----------------------------------------------------------------------- - + if (do_inline_mp) then do k = ks, ke q_cond = ql (i, k) + qr (i, k) + qi (i, k) + qs (i, k) + qg (i, k) @@ -1245,11 +1247,11 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & tz (k) = pt (i, k) enddo endif - + ! ----------------------------------------------------------------------- ! calculate base total energy ! ----------------------------------------------------------------------- - + if (consv_te) then if (hydrostatic) then do k = ks, ke @@ -1262,25 +1264,25 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & enddo endif endif - + ! ----------------------------------------------------------------------- ! total energy checker ! ----------------------------------------------------------------------- - + if (consv_checker) then call mtetw (ks, ke, qv (i, :), ql (i, :), qr (i, :), qi (i, :), & qs (i, :), qg (i, :), tz, ua (i, :), va (i, :), wa (i, :), & - delp (i, :), gsize (i), dte (i), 0.0, water (i), rain (i), & - ice (i), snow (i), graupel (i), 0.0, 0.0, dtm, te_beg_m (i, :), & + delp (i, :), dte (i), 0.0, water (i), rain (i), ice (i), & + snow (i), graupel (i), 0.0, 0.0, dtm, te_beg_m (i, :), & tw_beg_m (i, :), te_b_beg_m (i), tw_b_beg_m (i), .true., hydrostatic) endif - + do k = ks, ke - + ! ----------------------------------------------------------------------- ! convert specific ratios to mass mixing ratios ! ----------------------------------------------------------------------- - + qvz (k) = qv (i, k) qlz (k) = ql (i, k) qrz (k) = qr (i, k) @@ -1288,13 +1290,13 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & qsz (k) = qs (i, k) qgz (k) = qg (i, k) qaz (k) = qa (i, k) - + if (do_inline_mp) then q_cond = qlz (k) + qrz (k) + qiz (k) + qsz (k) + qgz (k) con_r8 = one_r8 - (qvz (k) + q_cond) else con_r8 = one_r8 - qvz (k) - endif + endif dp0 (k) = delp (i, k) dp (k) = delp (i, k) * con_r8 @@ -1305,46 +1307,46 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & qiz (k) = qiz (k) * con_r8 qsz (k) = qsz (k) * con_r8 qgz (k) = qgz (k) * con_r8 - + ! ----------------------------------------------------------------------- ! dry air density and layer-mean pressure thickness ! ----------------------------------------------------------------------- - + dz (k) = delz (i, k) den (k) = - dp (k) / (grav * dz (k)) pz (k) = den (k) * rdgas * tz (k) - + ! ----------------------------------------------------------------------- ! for sedi_momentum transport ! ----------------------------------------------------------------------- - + u (k) = ua (i, k) v (k) = va (i, k) if (.not. hydrostatic) then w (k) = wa (i, k) endif - + enddo - + do k = ks, ke denfac (k) = sqrt (den (ke) / den (k)) enddo - + ! ----------------------------------------------------------------------- ! total energy checker ! ----------------------------------------------------------------------- - + if (consv_checker) then call mtetw (ks, ke, qvz, qlz, qrz, qiz, qsz, qgz, tz, u, v, w, & - dp, gsize (i), dte (i), 0.0, water (i), rain (i), ice (i), & - snow (i), graupel (i), 0.0, 0.0, dtm, te_beg_d (i, :), tw_beg_d (i, :), & + dp, dte (i), 0.0, water (i), rain (i), ice (i), snow (i), & + graupel (i), 0.0, 0.0, dtm, te_beg_d (i, :), tw_beg_d (i, :), & te_b_beg_d (i), tw_b_beg_d (i), .false., hydrostatic) endif - + ! ----------------------------------------------------------------------- ! cloud condensation nuclei (CCN), cloud ice nuclei (CIN) ! ----------------------------------------------------------------------- - + if (prog_ccn) then do k = ks, ke ! boucher and lohmann (1995) @@ -1367,69 +1369,70 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & cin (k) = cin0 / den (k) enddo endif - + ! ----------------------------------------------------------------------- ! subgrid deviation in horizontal direction ! default area dependent form: use dx ~ 100 km as the base ! ----------------------------------------------------------------------- - + t_lnd = dw_land * sqrt (gsize (i) / 1.e5) t_ocn = dw_ocean * sqrt (gsize (i) / 1.e5) tmp = min (1., abs (hs (i)) / (10. * grav)) h_var = t_lnd * tmp + t_ocn * (1. - tmp) h_var = min (0.20, max (0.01, h_var)) - + ! ----------------------------------------------------------------------- ! relative humidity thresholds ! ----------------------------------------------------------------------- - + rh_adj = 1. - h_var - rh_inc rh_rain = max (0.35, rh_adj - rh_inr) - + ! ----------------------------------------------------------------------- ! fix negative water species from outside ! ----------------------------------------------------------------------- - + if (fix_negative) & call neg_adj (ks, ke, tz, dp, qvz, qlz, qrz, qiz, qsz, qgz, cond) - + condensation (i) = condensation (i) + cond * convt * ntimes - + ! ----------------------------------------------------------------------- ! fast microphysics loop ! ----------------------------------------------------------------------- - + if (do_mp_fast) then - + call mp_fast (ks, ke, tz, qvz, qlz, qrz, qiz, qsz, qgz, dtm, dp, den, & ccn, cin, condensation (i), deposition (i), evaporation (i), & - sublimation (i), convt) - + sublimation (i), denfac, convt, last_step) + endif - + ! ----------------------------------------------------------------------- ! full microphysics loop ! ----------------------------------------------------------------------- - + if (do_mp_full) then - + call mp_full (ks, ke, ntimes, tz, qvz, qlz, qrz, qiz, qsz, qgz, dp, dz, & u, v, w, den, denfac, ccn, cin, dts, rh_adj, rh_rain, h_var, dte (i), & water (i), rain (i), ice (i), snow (i), graupel (i), prefluxw (i, :), & prefluxr (i, :), prefluxi (i, :), prefluxs (i, :), prefluxg (i, :), & - condensation (i), deposition (i), evaporation (i), sublimation (i), convt) - + condensation (i), deposition (i), evaporation (i), sublimation (i), & + convt, last_step) + endif - + ! ----------------------------------------------------------------------- ! cloud fraction diagnostic ! ----------------------------------------------------------------------- - + if (do_qa .and. last_step) then call cloud_fraction (ks, ke, pz, den, qvz, qlz, qrz, qiz, qsz, qgz, qaz, & tz, h_var, gsize (i)) endif - + ! ======================================================================= ! calculation of particle concentration (pc), effective diameter (ed), ! optical extinction (oe), radar reflectivity factor (rr), and @@ -1461,7 +1464,7 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & oeg (i, :) = 0.0 rrg (i, :) = 0.0 tvg (i, :) = 0.0 - + do k = ks, ke if (qlz (k) .gt. qcmin) then call cal_pc_ed_oe_rr_tv (qlz (k), den (k), blinw, muw, pcaw, pcbw, pcw (i, k), & @@ -1502,7 +1505,7 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & ! momentum transportation during sedimentation ! update temperature before delp and q update ! ----------------------------------------------------------------------- - + if (do_sedi_uv) then do k = ks, ke c8 = mhc (qvz (k), qlz (k), qrz (k), qiz (k), qsz (k), qgz (k)) * c_air @@ -1510,7 +1513,7 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & tz (k) = tz (k) + tzuv (k) enddo endif - + if (do_sedi_w) then do k = ks, ke c8 = mhc (qvz (k), qlz (k), qrz (k), qiz (k), qsz (k), qgz (k)) * c_air @@ -1518,31 +1521,31 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & tz (k) = tz (k) + tzw (k) enddo endif - + ! ----------------------------------------------------------------------- ! total energy checker ! ----------------------------------------------------------------------- - + if (consv_checker) then call mtetw (ks, ke, qvz, qlz, qrz, qiz, qsz, qgz, tz, u, v, w, & - dp, gsize (i), dte (i), 0.0, water (i), rain (i), ice (i), & - snow (i), graupel (i), 0.0, 0.0, dtm, te_end_d (i, :), tw_end_d (i, :), & + dp, dte (i), 0.0, water (i), rain (i), ice (i), snow (i), & + graupel (i), 0.0, 0.0, dtm, te_end_d (i, :), tw_end_d (i, :), & te_b_end_d (i), tw_b_end_d (i), .false., hydrostatic, te_loss (i)) endif - + do k = ks, ke - + ! ----------------------------------------------------------------------- ! convert mass mixing ratios back to specific ratios ! ----------------------------------------------------------------------- - + if (do_inline_mp) then q_cond = qlz (k) + qrz (k) + qiz (k) + qsz (k) + qgz (k) con_r8 = one_r8 + qvz (k) + q_cond else con_r8 = one_r8 + qvz (k) endif - + delp (i, k) = dp (k) * con_r8 con_r8 = one_r8 / con_r8 qvz (k) = qvz (k) * con_r8 @@ -1551,7 +1554,7 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & qiz (k) = qiz (k) * con_r8 qsz (k) = qsz (k) * con_r8 qgz (k) = qgz (k) * con_r8 - + q1 = qv (i, k) + ql (i, k) + qr (i, k) + qi (i, k) + qs (i, k) + qg (i, k) q2 = qvz (k) + qlz (k) + qrz (k) + qiz (k) + qsz (k) + qgz (k) adj_vmr (i, k) = ((one_r8 - q1) / (one_r8 - q2)) / (one_r8 + q2 - q1) @@ -1563,17 +1566,17 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & qs (i, k) = qsz (k) qg (i, k) = qgz (k) qa (i, k) = qaz (k) - + ! ----------------------------------------------------------------------- ! calculate some more variables needed outside ! ----------------------------------------------------------------------- - + q_liq (k) = qlz (k) + qrz (k) q_sol (k) = qiz (k) + qsz (k) + qgz (k) q_cond = q_liq (k) + q_sol (k) con_r8 = one_r8 - (qvz (k) + q_cond) c8 = mhc (con_r8, qvz (k), q_liq (k), q_sol (k)) * c_air - + #ifdef USE_COND q_con (i, k) = q_cond #endif @@ -1581,14 +1584,14 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & tmp = rdgas * (1. + zvir * qvz (k)) cappa (i, k) = tmp / (tmp + c8) #endif - + enddo - + ! ----------------------------------------------------------------------- ! momentum transportation during sedimentation ! update temperature after delp and q update ! ----------------------------------------------------------------------- - + if (do_sedi_uv) then do k = ks, ke tz (k) = tz (k) - tzuv (k) @@ -1606,7 +1609,7 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & va (i, k) = v (k) enddo endif - + if (do_sedi_w) then do k = ks, ke tz (k) = tz (k) - tzw (k) @@ -1623,23 +1626,23 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & wa (i, k) = w (k) enddo endif - + ! ----------------------------------------------------------------------- ! total energy checker ! ----------------------------------------------------------------------- - + if (consv_checker) then call mtetw (ks, ke, qv (i, :), ql (i, :), qr (i, :), qi (i, :), & qs (i, :), qg (i, :), tz, ua (i, :), va (i, :), wa (i, :), & - delp (i, :), gsize (i), dte (i), 0.0, water (i), rain (i), & - ice (i), snow (i), graupel (i), 0.0, 0.0, dtm, te_end_m (i, :), & + delp (i, :), dte (i), 0.0, water (i), rain (i), ice (i), & + snow (i), graupel (i), 0.0, 0.0, dtm, te_end_m (i, :), & tw_end_m (i, :), te_b_end_m (i), tw_b_end_m (i), .true., hydrostatic) endif - + ! ----------------------------------------------------------------------- ! calculate total energy loss or gain ! ----------------------------------------------------------------------- - + if (consv_te) then if (hydrostatic) then do k = ks, ke @@ -1652,11 +1655,11 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & enddo endif endif - + ! ----------------------------------------------------------------------- ! conversion of temperature ! ----------------------------------------------------------------------- - + if (do_inline_mp) then do k = ks, ke q_cond = qlz (k) + qrz (k) + qiz (k) + qsz (k) + qgz (k) @@ -1681,25 +1684,25 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & pt (i, k) = pt (i, k) + (tz (k) - pt (i, k)) * c8 / cp_air enddo endif - + ! ----------------------------------------------------------------------- ! total energy checker ! ----------------------------------------------------------------------- - + if (consv_checker) then if (abs (sum (te_end_d (i, :)) + te_b_end_d (i) - sum (te_beg_d (i, :)) - te_b_beg_d (i)) / & (sum (te_beg_d (i, :)) + te_b_beg_d (i)) .gt. te_err) then print*, "GFDL-MP-DRY TE: ", & - !(sum (te_beg_d (i, :)) + te_b_beg_d (i)) / (gsize (i) ** 2), & - !(sum (te_end_d (i, :)) + te_b_end_d (i)) / (gsize (i) ** 2), & + !(sum (te_beg_d (i, :)) + te_b_beg_d (i)), & + !(sum (te_end_d (i, :)) + te_b_end_d (i)), & (sum (te_end_d (i, :)) + te_b_end_d (i) - sum (te_beg_d (i, :)) - te_b_beg_d (i)) / & (sum (te_beg_d (i, :)) + te_b_beg_d (i)) endif if (abs (sum (tw_end_d (i, :)) + tw_b_end_d (i) - sum (tw_beg_d (i, :)) - tw_b_beg_d (i)) / & (sum (tw_beg_d (i, :)) + tw_b_beg_d (i)) .gt. tw_err) then print*, "GFDL-MP-DRY TW: ", & - !(sum (tw_beg_d (i, :)) + tw_b_beg_d (i)) / (gsize (i) ** 2), & - !(sum (tw_end_d (i, :)) + tw_b_end_d (i)) / (gsize (i) ** 2), & + !(sum (tw_beg_d (i, :)) + tw_b_beg_d (i)), & + !(sum (tw_end_d (i, :)) + tw_b_end_d (i)), & (sum (tw_end_d (i, :)) + tw_b_end_d (i) - sum (tw_beg_d (i, :)) - tw_b_beg_d (i)) / & (sum (tw_beg_d (i, :)) + tw_b_beg_d (i)) endif @@ -1707,24 +1710,24 @@ subroutine mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, & if (abs (sum (te_end_m (i, :)) + te_b_end_m (i) - sum (te_beg_m (i, :)) - te_b_beg_m (i)) / & (sum (te_beg_m (i, :)) + te_b_beg_m (i)) .gt. te_err) then print*, "GFDL-MP-WET TE: ", & - !(sum (te_beg_m (i, :)) + te_b_beg_m (i)) / (gsize (i) ** 2), & - !(sum (te_end_m (i, :)) + te_b_end_m (i)) / (gsize (i) ** 2), & + !(sum (te_beg_m (i, :)) + te_b_beg_m (i)), & + !(sum (te_end_m (i, :)) + te_b_end_m (i)), & (sum (te_end_m (i, :)) + te_b_end_m (i) - sum (te_beg_m (i, :)) - te_b_beg_m (i)) / & (sum (te_beg_m (i, :)) + te_b_beg_m (i)) endif if (abs (sum (tw_end_m (i, :)) + tw_b_end_m (i) - sum (tw_beg_m (i, :)) - tw_b_beg_m (i)) / & (sum (tw_beg_m (i, :)) + tw_b_beg_m (i)) .gt. tw_err) then print*, "GFDL-MP-WET TW: ", & - !(sum (tw_beg_m (i, :)) + tw_b_beg_m (i)) / (gsize (i) ** 2), & - !(sum (tw_end_m (i, :)) + tw_b_end_m (i)) / (gsize (i) ** 2), & + !(sum (tw_beg_m (i, :)) + tw_b_beg_m (i)), & + !(sum (tw_end_m (i, :)) + tw_b_end_m (i)), & (sum (tw_end_m (i, :)) + tw_b_end_m (i) - sum (tw_beg_m (i, :)) - tw_b_beg_m (i)) / & (sum (tw_beg_m (i, :)) + tw_b_beg_m (i)) endif !print*, "GFDL MP TE WET LOSS (%) : ", te_loss_0 (i) / (sum (te_beg_m (i, :)) + te_b_beg_m (i)) * 100.0 endif - + enddo ! i loop - + end subroutine mpdrv ! ======================================================================= @@ -1732,68 +1735,68 @@ end subroutine mpdrv ! ======================================================================= subroutine neg_adj (ks, ke, tz, dp, qv, ql, qr, qi, qs, qg, cond) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in), dimension (ks:ke) :: dp - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real, intent (out) :: cond - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: dq, sink - + real, dimension (ks:ke) :: q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + cond = 0 - + ! ----------------------------------------------------------------------- ! calculate moist heat capacity and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + do k = ks, ke - + ! ----------------------------------------------------------------------- ! fix negative solid-phase hydrometeors ! ----------------------------------------------------------------------- - + ! if cloud ice < 0, borrow from snow if (qi (k) .lt. 0.) then sink = min (- qi (k), max (0., qs (k))) call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., sink, - sink, 0.) endif - + ! if snow < 0, borrow from graupel if (qs (k) .lt. 0.) then sink = min (- qs (k), max (0., qg (k))) call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., 0., sink, - sink) endif - + ! if graupel < 0, borrow from rain if (qg (k) .lt. 0.) then sink = min (- qg (k), max (0., qr (k))) @@ -1801,18 +1804,18 @@ subroutine neg_adj (ks, ke, tz, dp, qv, ql, qr, qi, qs, qg, cond) 0., 0., - sink, 0., 0., sink, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) endif - + ! ----------------------------------------------------------------------- ! fix negative liquid-phase hydrometeors ! ----------------------------------------------------------------------- - + ! if rain < 0, borrow from cloud water if (qr (k) .lt. 0.) then sink = min (- qr (k), max (0., ql (k))) call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, sink, 0., 0., 0.) endif - + ! if cloud water < 0, borrow from water vapor if (ql (k) .lt. 0.) then sink = min (- ql (k), max (0., qv (k))) @@ -1821,13 +1824,13 @@ subroutine neg_adj (ks, ke, tz, dp, qv, ql, qr, qi, qs, qg, cond) - sink, sink, 0., 0., 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) endif - + enddo - + ! ----------------------------------------------------------------------- ! fix negative water vapor ! ----------------------------------------------------------------------- - + ! if water vapor < 0, borrow water vapor from below do k = ks, ke - 1 if (qv (k) .lt. 0.) then @@ -1835,14 +1838,14 @@ subroutine neg_adj (ks, ke, tz, dp, qv, ql, qr, qi, qs, qg, cond) qv (k) = 0. endif enddo - + ! if water vapor < 0, borrow water vapor from above if (qv (ke) .lt. 0. .and. qv (ke - 1) .gt. 0.) then dq = min (- qv (ke) * dp (ke), qv (ke - 1) * dp (ke - 1)) qv (ke - 1) = qv (ke - 1) - dq / dp (ke - 1) qv (ke) = qv (ke) + dq / dp (ke) endif - + end subroutine neg_adj ! ======================================================================= @@ -1852,93 +1855,99 @@ end subroutine neg_adj subroutine mp_full (ks, ke, ntimes, tz, qv, ql, qr, qi, qs, qg, dp, dz, u, v, w, & den, denfac, ccn, cin, dts, rh_adj, rh_rain, h_var, dte, water, rain, ice, & snow, graupel, prefluxw, prefluxr, prefluxi, prefluxs, prefluxg, & - condensation, deposition, evaporation, sublimation, convt) - + condensation, deposition, evaporation, sublimation, convt, last_step) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + + logical, intent (in) :: last_step + integer, intent (in) :: ks, ke, ntimes - + real, intent (in) :: dts, rh_adj, rh_rain, h_var, convt - + real, intent (in), dimension (ks:ke) :: dp, dz, den, denfac - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, u, v, w, ccn, cin real, intent (inout), dimension (ks:ke) :: prefluxw, prefluxr, prefluxi, prefluxs, prefluxg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (inout) :: water, rain, ice, snow, graupel real, intent (inout) :: condensation, deposition real, intent (inout) :: evaporation, sublimation - + real (kind = r8), intent (inout) :: dte - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: n - + real :: w1, r1, i1, s1, g1, cond, dep, reevap, sub - + real, dimension (ks:ke) :: vtw, vtr, vti, vts, vtg, pfw, pfr, pfi, pfs, pfg - + do n = 1, ntimes - + ! ----------------------------------------------------------------------- ! sedimentation of cloud ice, snow, graupel or hail, and rain ! ----------------------------------------------------------------------- - + call sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, & dz, dp, vtw, vtr, vti, vts, vtg, w1, r1, i1, s1, g1, pfw, pfr, pfi, pfs, pfg, & u, v, w, den, denfac, dte) - + water = water + w1 * convt rain = rain + r1 * convt ice = ice + i1 * convt snow = snow + s1 * convt graupel = graupel + g1 * convt - + prefluxw = prefluxw + pfw * convt prefluxr = prefluxr + pfr * convt prefluxi = prefluxi + pfi * convt prefluxs = prefluxs + pfs * convt prefluxg = prefluxg + pfg * convt - + ! ----------------------------------------------------------------------- ! warm rain cloud microphysics ! ----------------------------------------------------------------------- - + call warm_rain (dts, ks, ke, dp, dz, tz, qv, ql, qr, qi, qs, qg, & den, denfac, vtw, vtr, ccn, rh_rain, h_var, reevap) - + evaporation = evaporation + reevap * convt - + ! ----------------------------------------------------------------------- ! ice cloud microphysics ! ----------------------------------------------------------------------- - + call ice_cloud (ks, ke, tz, qv, ql, qr, qi, qs, qg, den, & denfac, vtw, vtr, vti, vts, vtg, dts, h_var) - - ! ----------------------------------------------------------------------- - ! temperature sentive high vertical resolution processes - ! ----------------------------------------------------------------------- - - call subgrid_z_proc (ks, ke, den, denfac, dts, rh_adj, tz, qv, ql, & - qr, qi, qs, qg, dp, ccn, cin, cond, dep, reevap, sub) - - condensation = condensation + cond * convt - deposition = deposition + dep * convt - evaporation = evaporation + reevap * convt - sublimation = sublimation + sub * convt - + + if (do_subgrid_proc) then + + ! ----------------------------------------------------------------------- + ! temperature sentive high vertical resolution processes + ! ----------------------------------------------------------------------- + + call subgrid_z_proc (ks, ke, den, denfac, dts, rh_adj, tz, qv, ql, & + qr, qi, qs, qg, dp, ccn, cin, cond, dep, reevap, sub, last_step) + + condensation = condensation + cond * convt + deposition = deposition + dep * convt + evaporation = evaporation + reevap * convt + sublimation = sublimation + sub * convt + + endif + enddo - + end subroutine mp_full ! ======================================================================= @@ -1946,146 +1955,177 @@ end subroutine mp_full ! ======================================================================= subroutine mp_fast (ks, ke, tz, qv, ql, qr, qi, qs, qg, dtm, dp, den, & - ccn, cin, condensation, deposition, evaporation, sublimation, convt) - + ccn, cin, condensation, deposition, evaporation, sublimation, & + denfac, convt, last_step) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + + logical, intent (in) :: last_step + integer, intent (in) :: ks, ke - + real, intent (in) :: dtm, convt - - real, intent (in), dimension (ks:ke) :: dp, den - + + real, intent (in), dimension (ks:ke) :: dp, den, denfac + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ccn, cin - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (inout) :: condensation, deposition real, intent (inout) :: evaporation, sublimation - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + + logical :: cond_evap + + integer :: n + real :: cond, dep, reevap, sub - + real, dimension (ks:ke) :: q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + cond = 0 dep = 0 reevap = 0 sub = 0 - + ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + if (.not. do_warm_rain_mp) then - + ! ----------------------------------------------------------------------- ! cloud ice melting to form cloud water and rain ! ----------------------------------------------------------------------- - + call pimlt (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! enforce complete freezing below t_wfr ! ----------------------------------------------------------------------- - + call pcomp (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + endif - + ! ----------------------------------------------------------------------- ! cloud water condensation and evaporation ! ----------------------------------------------------------------------- - - call pcond_pevap (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & - lcpk, icpk, tcpk, tcp3, cond, reevap) - + + if (delay_cond_evap) then + cond_evap = last_step + else + cond_evap = .true. + endif + + if (cond_evap) then + do n = 1, nconds + call pcond_pevap (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & + lcpk, icpk, tcpk, tcp3, cond, reevap) + enddo + endif + condensation = condensation + cond * convt evaporation = evaporation + reevap * convt - + if (.not. do_warm_rain_mp) then - + ! ----------------------------------------------------------------------- ! cloud water freezing to form cloud ice and snow ! ----------------------------------------------------------------------- - + call pifr (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! Wegener Bergeron Findeisen process ! ----------------------------------------------------------------------- - + call pwbf (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! Bigg freezing mechanism ! ----------------------------------------------------------------------- - + call pbigg (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, ccn, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! rain freezing to form graupel ! ----------------------------------------------------------------------- - + call pgfr_simp (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! snow melting to form cloud water and rain ! ----------------------------------------------------------------------- - + call psmlt_simp (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + endif - + ! ----------------------------------------------------------------------- ! cloud water to rain autoconversion ! ----------------------------------------------------------------------- - + call praut_simp (ks, ke, dtm, tz, qv, ql, qr, qi, qs, qg) - + if (.not. do_warm_rain_mp) then - + ! ----------------------------------------------------------------------- ! cloud ice deposition and sublimation ! ----------------------------------------------------------------------- - + call pidep_pisub (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, cin, dep, sub) - + deposition = deposition + dep * convt sublimation = sublimation + sub * convt - + ! ----------------------------------------------------------------------- ! cloud ice to snow autoconversion ! ----------------------------------------------------------------------- - + call psaut_simp (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, den) - + + ! ----------------------------------------------------------------------- + ! snow deposition and sublimation + ! ----------------------------------------------------------------------- + + call psdep_pssub (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & + denfac, lcpk, icpk, tcpk, tcp3, dep, sub) + + ! ----------------------------------------------------------------------- + ! graupel deposition and sublimation + ! ----------------------------------------------------------------------- + + call pgdep_pgsub (ks, ke, dtm, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & + denfac, lcpk, icpk, tcpk, tcp3, dep, sub) + endif - + end subroutine mp_fast ! ======================================================================= @@ -2095,51 +2135,51 @@ end subroutine mp_fast subroutine sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vtw, vtr, vti, vts, vtg, w1, r1, i1, s1, g1, pfw, pfr, pfi, pfs, pfg, & u, v, w, den, denfac, dte) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: dp, dz, den, denfac - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, u, v, w - + real, intent (out) :: w1, r1, i1, s1, g1 - + real, intent (out), dimension (ks:ke) :: vtw, vtr, vti, vts, vtg, pfw, pfr, pfi, pfs, pfg - + real (kind = r8), intent (inout) :: dte - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real, dimension (ks:ke) :: q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: te8, cvm - + w1 = 0. r1 = 0. i1 = 0. s1 = 0. g1 = 0. - + vtw = 0. vtr = 0. vti = 0. vts = 0. vtg = 0. - + pfw = 0. pfr = 0. pfi = 0. @@ -2149,28 +2189,28 @@ subroutine sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! terminal fall and melting of falling cloud ice into rain ! ----------------------------------------------------------------------- - + if (do_psd_ice_fall) then call term_rsg (ks, ke, qi, den, denfac, vi_fac, blini, mui, tvai, tvbi, vi_max, const_vi, vti) else call term_ice (ks, ke, tz, qi, den, vi_fac, vi_max, const_vi, vti) endif - + if (do_sedi_melt) then call sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vti, r1, tau_imlt, icpk, "qi") endif - + call terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vti, i1, pfi, u, v, w, dte, "qi") - + pfi (ks) = max (0.0, pfi (ks)) do k = ke, ks + 1, -1 pfi (k) = max (0.0, pfi (k) - pfi (k - 1)) @@ -2179,17 +2219,17 @@ subroutine sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & ! ----------------------------------------------------------------------- ! terminal fall and melting of falling snow into rain ! ----------------------------------------------------------------------- - + call term_rsg (ks, ke, qs, den, denfac, vs_fac, blins, mus, tvas, tvbs, vs_max, const_vs, vts) - + if (do_sedi_melt) then call sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vts, r1, tau_smlt, icpk, "qs") endif - + call terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vts, s1, pfs, u, v, w, dte, "qs") - + pfs (ks) = max (0.0, pfs (ks)) do k = ke, ks + 1, -1 pfs (k) = max (0.0, pfs (k) - pfs (k - 1)) @@ -2198,34 +2238,34 @@ subroutine sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & ! ----------------------------------------------------------------------- ! terminal fall and melting of falling graupel into rain ! ----------------------------------------------------------------------- - + if (do_hail) then call term_rsg (ks, ke, qg, den, denfac, vg_fac, blinh, muh, tvah, tvbh, vg_max, const_vg, vtg) else call term_rsg (ks, ke, qg, den, denfac, vg_fac, bling, mug, tvag, tvbg, vg_max, const_vg, vtg) endif - + if (do_sedi_melt) then call sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vtg, r1, tau_gmlt, icpk, "qg") endif - + call terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vtg, g1, pfg, u, v, w, dte, "qg") - + pfg (ks) = max (0.0, pfg (ks)) do k = ke, ks + 1, -1 pfg (k) = max (0.0, pfg (k) - pfg (k - 1)) enddo - + ! ----------------------------------------------------------------------- ! terminal fall of cloud water ! ----------------------------------------------------------------------- - + if (do_psd_water_fall) then call term_rsg (ks, ke, ql, den, denfac, vw_fac, blinw, muw, tvaw, tvbw, vw_max, const_vw, vtw) - + call terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vtw, w1, pfw, u, v, w, dte, "ql") @@ -2235,16 +2275,16 @@ subroutine sedimentation (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & enddo endif - + ! ----------------------------------------------------------------------- ! terminal fall of rain ! ----------------------------------------------------------------------- - + call term_rsg (ks, ke, qr, den, denfac, vr_fac, blinr, mur, tvar, tvbr, vr_max, const_vr, vtr) - + call terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vtr, r1, pfr, u, v, w, dte, "qr") - + pfr (ks) = max (0.0, pfr (ks)) do k = ke, ks + 1, -1 pfr (k) = max (0.0, pfr (k) - pfr (k - 1)) @@ -2257,41 +2297,41 @@ end subroutine sedimentation ! ======================================================================= subroutine term_ice (ks, ke, tz, q, den, v_fac, v_max, const_v, vt) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + logical, intent (in) :: const_v - + real, intent (in) :: v_fac, v_max - + real, intent (in), dimension (ks:ke) :: q, den - + real (kind = r8), intent (in), dimension (ks:ke) :: tz - + real, intent (out), dimension (ks:ke) :: vt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: qden - + real, parameter :: aa = - 4.14122e-5 real, parameter :: bb = - 0.00538922 real, parameter :: cc = - 0.0516344 real, parameter :: dd = 0.00216078 real, parameter :: ee = 1.9714 - + real, dimension (ks:ke) :: tc - + if (const_v) then vt (:) = v_fac else @@ -2312,7 +2352,7 @@ subroutine term_ice (ks, ke, tz, q, den, v_fac, v_max, const_v, vt) endif enddo endif - + end subroutine term_ice ! ======================================================================= @@ -2320,31 +2360,31 @@ end subroutine term_ice ! ======================================================================= subroutine term_rsg (ks, ke, q, den, denfac, v_fac, blin, mu, tva, tvb, v_max, const_v, vt) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + logical, intent (in) :: const_v - + real, intent (in) :: v_fac, blin, v_max, mu - + real (kind = r8), intent (in) :: tva, tvb - + real, intent (in), dimension (ks:ke) :: q, den, denfac - + real, intent (out), dimension (ks:ke) :: vt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + if (const_v) then vt (:) = v_fac else @@ -2359,7 +2399,7 @@ subroutine term_rsg (ks, ke, q, den, denfac, v_fac, blin, mu, tva, tvb, v_max, c endif enddo endif - + end subroutine term_rsg ! ======================================================================= @@ -2368,43 +2408,43 @@ end subroutine term_rsg subroutine sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vt, r1, tau_mlt, icpk, qflag) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, tau_mlt - + real, intent (in), dimension (ks:ke) :: vt, dp, dz, icpk - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real, intent (inout) :: r1 - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + character (len = 2), intent (in) :: qflag - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k, m - + real :: dtime, sink, zs - + real, dimension (ks:ke) :: q - + real, dimension (ks:ke + 1) :: ze, zt - + real (kind = r8), dimension (ks:ke) :: cvm - + call zezt (ks, ke, dts, zs, dz, vt, ze, zt) - + select case (qflag) case ("qi") q = qi @@ -2415,11 +2455,11 @@ subroutine sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & case default print *, "gfdl_mp: qflag error!" end select - + ! ----------------------------------------------------------------------- ! melting to rain ! ----------------------------------------------------------------------- - + do k = ke - 1, ks, - 1 if (vt (k) .lt. 1.e-10) cycle if (q (k) .gt. qcmin) then @@ -2456,7 +2496,7 @@ subroutine sedi_melt (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & enddo endif enddo - + end subroutine sedi_melt ! ======================================================================= @@ -2465,51 +2505,51 @@ end subroutine sedi_melt subroutine terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & vt, x1, m1, u, v, w, dte, qflag) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: vt, dp, dz - + character (len = 2), intent (in) :: qflag - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, u, v, w - + real, intent (inout) :: x1 - + real (kind = r8), intent (inout) :: dte - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (out), dimension (ks:ke) :: m1 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + logical :: no_fall - + real :: zs - + real, dimension (ks:ke) :: dm, q - + real, dimension (ks:ke + 1) :: ze, zt - + real (kind = r8), dimension (ks:ke) :: te1, te2 m1 = 0.0 - + call zezt (ks, ke, dts, zs, dz, vt, ze, zt) - + select case (qflag) case ("ql") q = ql @@ -2524,33 +2564,33 @@ subroutine terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & case default print *, "gfdl_mp: qflag error!" end select - + call check_column (ks, ke, q, no_fall) - + if (no_fall) return - + ! ----------------------------------------------------------------------- ! momentum transportation during sedimentation ! ----------------------------------------------------------------------- - + if (do_sedi_w) then do k = ks, ke dm (k) = dp (k) * (1. + qv (k) + ql (k) + qr (k) + qi (k) + qs (k) + qg (k)) enddo endif - + ! ----------------------------------------------------------------------- ! energy change during sedimentation ! ----------------------------------------------------------------------- - + do k = ks, ke te1 (k) = mte (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), tz (k), dp (k), .false.) enddo - + ! ----------------------------------------------------------------------- ! sedimentation ! ----------------------------------------------------------------------- - + select case (qflag) case ("ql") q = ql @@ -2575,7 +2615,7 @@ subroutine terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & if (sedflag .eq. 4) & call implicit_lagrangian_fall (dts, ks, ke, zs, ze, zt, vt, dp, q, & x1, m1, sed_fac) - + select case (qflag) case ("ql") ql = q @@ -2590,53 +2630,53 @@ subroutine terminal_fall (dts, ks, ke, tz, qv, ql, qr, qi, qs, qg, dz, dp, & case default print *, "gfdl_mp: qflag error!" end select - + ! ----------------------------------------------------------------------- ! energy change during sedimentation ! ----------------------------------------------------------------------- - + do k = ks, ke te2 (k) = mte (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), tz (k), dp (k), .false.) enddo dte = dte + sum (te1) - sum (te2) - + ! ----------------------------------------------------------------------- ! momentum transportation during sedimentation ! ----------------------------------------------------------------------- - + if (do_sedi_uv) then call sedi_uv (ks, ke, m1, dp, u, v) endif - + if (do_sedi_w) then call sedi_w (ks, ke, m1, w, vt, dm) endif - + ! ----------------------------------------------------------------------- ! energy change during sedimentation heating ! ----------------------------------------------------------------------- - + do k = ks, ke te1 (k) = mte (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), tz (k), dp (k), .false.) enddo - + ! ----------------------------------------------------------------------- ! heat exchanges during sedimentation ! ----------------------------------------------------------------------- - + if (do_sedi_heat) then call sedi_heat (ks, ke, dp, m1, dz, tz, qv, ql, qr, qi, qs, qg, c_ice) endif - + ! ----------------------------------------------------------------------- ! energy change during sedimentation heating ! ----------------------------------------------------------------------- - + do k = ks, ke te2 (k) = mte (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), tz (k), dp (k), .false.) enddo dte = dte + sum (te1) - sum (te2) - + end subroutine terminal_fall ! ======================================================================= @@ -2644,31 +2684,31 @@ end subroutine terminal_fall ! ======================================================================= subroutine zezt (ks, ke, dts, zs, dz, vt, ze, zt) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: dz, vt - + real, intent (out) :: zs - + real, intent (out), dimension (ks:ke + 1) :: ze, zt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: dt5 - + dt5 = 0.5 * dts zs = 0.0 ze (ke + 1) = zs @@ -2683,7 +2723,7 @@ subroutine zezt (ks, ke, dts, zs, dz, vt, ze, zt) do k = ks, ke if (zt (k + 1) .ge. zt (k)) zt (k + 1) = zt (k) - dz_min enddo - + end subroutine zezt ! ======================================================================= @@ -2691,34 +2731,34 @@ end subroutine zezt ! ======================================================================= subroutine check_column (ks, ke, q, no_fall) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: q (ks:ke) - + logical, intent (out) :: no_fall - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + no_fall = .true. - + do k = ks, ke if (q (k) .gt. qfmin) then no_fall = .false. exit endif enddo - + end subroutine check_column ! ======================================================================= @@ -2727,49 +2767,49 @@ end subroutine check_column subroutine warm_rain (dts, ks, ke, dp, dz, tz, qv, ql, qr, qi, qs, qg, & den, denfac, vtw, vtr, ccn, rh_rain, h_var, reevap) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, rh_rain, h_var - + real, intent (in), dimension (ks:ke) :: dp, dz, den, denfac, vtw, vtr - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ccn - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (out) :: reevap - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + reevap = 0 - + ! ----------------------------------------------------------------------- ! rain evaporation to form water vapor ! ----------------------------------------------------------------------- - + call prevp (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, h_var, dp, reevap) - + ! ----------------------------------------------------------------------- ! rain accretion with cloud water ! ----------------------------------------------------------------------- - + call pracw (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, vtw, vtr) - + ! ----------------------------------------------------------------------- ! cloud water to rain autoconversion ! ----------------------------------------------------------------------- - + call praut (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, ccn, h_var) - + end subroutine warm_rain ! ======================================================================= @@ -2777,83 +2817,83 @@ end subroutine warm_rain ! ======================================================================= subroutine prevp (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, h_var, dp, reevap) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, rh_rain, h_var - + real, intent (in), dimension (ks:ke) :: den, denfac, dp - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (inout), dimension (ks:ke) :: qv, qr, ql, qi, qs, qg - + real, intent (out) :: reevap - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: dqv, qsat, dqdt, tmp, t2, qden, q_plus, q_minus, sink real :: qpz, dq, dqh, tin, fac_revp, rh_tem - + real, dimension (ks:ke) :: q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + reevap = 0 - + ! ----------------------------------------------------------------------- ! time-scale factor ! ----------------------------------------------------------------------- - + fac_revp = 1. if (tau_revp .gt. 1.e-6) then fac_revp = 1. - exp (- dts / tau_revp) endif - + ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + do k = ks, ke - + tin = (tz (k) * cvm (k) - lv00 * ql (k)) / mhc (qv (k) + ql (k), qr (k), q_sol (k)) - + ! ----------------------------------------------------------------------- ! calculate supersaturation and subgrid variability of water ! ----------------------------------------------------------------------- - + qpz = qv (k) + ql (k) qsat = wqs (tin, den (k), dqdt) dqv = qsat - qv (k) - + dqh = max (ql (k), h_var * max (qpz, qcmin)) dqh = min (dqh, 0.2 * qpz) q_minus = qpz - dqh q_plus = qpz + dqh - + ! ----------------------------------------------------------------------- ! rain evaporation ! ----------------------------------------------------------------------- - + rh_tem = qpz / qsat - + if (tz (k) .gt. t_wfr .and. qr (k) .gt. qcmin .and. dqv .gt. 0.0 .and. qsat .gt. q_minus) then if (qsat .gt. q_plus) then @@ -2868,23 +2908,23 @@ subroutine prevp (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, if (use_rhc_revap .and. rh_tem .ge. rhc_revap) then sink = 0.0 endif - + ! ----------------------------------------------------------------------- ! alternative minimum evaporation in dry environmental air ! ----------------------------------------------------------------------- ! tmp = min (qr (k), dim (rh_rain * qsat, qv (k)) / (1. + lcpk (k) * dqdt)) ! sink = max (sink, tmp) - + reevap = reevap + sink * dp (k) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & sink, 0., - sink, 0., 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo ! k loop - + end subroutine prevp ! ======================================================================= @@ -2892,35 +2932,35 @@ end subroutine prevp ! ======================================================================= subroutine pracw (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, vtw, vtr) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vtw, vtr - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + real, intent (inout), dimension (ks:ke) :: qv, qr, ql, qi, qs, qg - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: qden, sink - + do k = ks, ke - + if (tz (k) .gt. t_wfr .and. qr (k) .gt. qcmin .and. ql (k) .gt. qcmin) then - + qden = qr (k) * den (k) if (do_new_acc_water) then sink = dts * acr3d (vtr (k), vtw (k), ql (k), qr (k), cracw, acco (:, 5), & @@ -2929,14 +2969,14 @@ subroutine pracw (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, denfac, vtw, vtr sink = dts * acr2d (qden, cracw, denfac (k), blinr, mur) sink = sink / (1. + sink) * ql (k) endif - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, sink, 0., 0., 0.) - + endif - + enddo - + end subroutine pracw ! ======================================================================= @@ -2944,44 +2984,44 @@ end subroutine pracw ! ======================================================================= subroutine praut (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, ccn, h_var) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, h_var - + real, intent (in), dimension (ks:ke) :: den - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ccn - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real, parameter :: so3 = 7.0 / 3.0 real, parameter :: so1 = - 1.0 / 3.0 - + integer :: k - + real :: sink, dq, qc - + real, dimension (ks:ke) :: dl, c_praut if (irain_f .eq. 0) then - + call linear_prof (ke - ks + 1, ql (ks), dl (ks), z_slope_liq, h_var) - + do k = ks, ke - + if (tz (k) .gt. t_wfr .and. ql (k) .gt. qcmin) then - + if (do_psd_water_num) then call cal_pc_ed_oe_rr_tv (ql (k), den (k), blinw, muw, & pca = pcaw, pcb = pcbw, pc = ccn (k)) @@ -2991,31 +3031,31 @@ subroutine praut (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, ccn, h_var) qc = fac_rc * ccn (k) dl (k) = min (max (qcmin, dl (k)), 0.5 * ql (k)) dq = 0.5 * (ql (k) + dl (k) - qc) - + if (dq .gt. 0.) then - + c_praut (k) = cpaut * exp (so1 * log (ccn (k) * rhow)) sink = min (1., dq / dl (k)) * dts * c_praut (k) * den (k) * & exp (so3 * log (ql (k))) sink = min (ql (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, sink, 0., 0., 0.) - + endif - + endif - + enddo - + endif - + if (irain_f .eq. 1) then - + do k = ks, ke - + if (tz (k) .gt. t_wfr .and. ql (k) .gt. qcmin) then - + if (do_psd_water_num) then call cal_pc_ed_oe_rr_tv (ql (k), den (k), blinw, muw, & pca = pcaw, pcb = pcbw, pc = ccn (k)) @@ -3024,144 +3064,144 @@ subroutine praut (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg, den, ccn, h_var) qc = fac_rc * ccn (k) dq = ql (k) - qc - + if (dq .gt. 0.) then - + c_praut (k) = cpaut * exp (so1 * log (ccn (k) * rhow)) sink = min (dq, dts * c_praut (k) * den (k) * exp (so3 * log (ql (k)))) sink = min (ql (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, sink, 0., 0., 0.) - + endif - + endif - + enddo - + endif end subroutine praut - + ! ======================================================================= ! ice cloud microphysics ! ======================================================================= subroutine ice_cloud (ks, ke, tz, qv, ql, qr, qi, qs, qg, den, & denfac, vtw, vtr, vti, vts, vtg, dts, h_var) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, h_var - + real, intent (in), dimension (ks:ke) :: den, denfac, vtw, vtr, vti, vts, vtg - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real, dimension (ks:ke) :: di, q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + if (.not. do_warm_rain_mp) then - + ! ----------------------------------------------------------------------- ! cloud ice melting to form cloud water and rain ! ----------------------------------------------------------------------- - + call pimlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! cloud water freezing to form cloud ice and snow ! ----------------------------------------------------------------------- - + call pifr (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! vertical subgrid variability ! ----------------------------------------------------------------------- - + call linear_prof (ke - ks + 1, qi, di, z_slope_ice, h_var) - + ! ----------------------------------------------------------------------- ! snow melting (includes snow accretion with cloud water and rain) to form cloud water and rain ! ----------------------------------------------------------------------- - + call psmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtw, vtr, vts, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! graupel melting (includes graupel accretion with cloud water and rain) to form rain ! ----------------------------------------------------------------------- - + call pgmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtw, vtr, vtg, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! snow accretion with cloud ice ! ----------------------------------------------------------------------- - + call psaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vts) - + ! ----------------------------------------------------------------------- ! cloud ice to snow autoconversion ! ----------------------------------------------------------------------- - + call psaut (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, di) - + ! ----------------------------------------------------------------------- ! graupel accretion with cloud ice ! ----------------------------------------------------------------------- - + call pgaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vtg) - + ! ----------------------------------------------------------------------- ! snow accretion with rain and rain freezing to form graupel ! ----------------------------------------------------------------------- - + call psacr_pgfr (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtr, vts, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! graupel accretion with snow ! ----------------------------------------------------------------------- - + call pgacs (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, vts, vtg) - + ! ----------------------------------------------------------------------- ! snow to graupel autoconversion ! ----------------------------------------------------------------------- - + call pgaut (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den) - + ! ----------------------------------------------------------------------- ! graupel accretion with cloud water and rain ! ----------------------------------------------------------------------- - + call pgacw_pgacr (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtr, vtg, lcpk, icpk, tcpk, tcp3) - + endif ! do_warm_rain_mp - + end subroutine ice_cloud ! ======================================================================= @@ -3169,52 +3209,52 @@ end subroutine ice_cloud ! ======================================================================= subroutine pimlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, tmp, sink, fac_imlt - + fac_imlt = 1. - exp (- dts / tau_imlt) - + do k = ks, ke - + tc = tz (k) - tice_mlt - + if (tc .gt. 0 .and. qi (k) .gt. qcmin) then - + sink = fac_imlt * tc / icpk (k) sink = min (qi (k), sink) tmp = min (sink, dim (ql_mlt, ql (k))) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., tmp, sink - tmp, - sink, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pimlt ! ======================================================================= @@ -3222,51 +3262,51 @@ end subroutine pimlt ! ======================================================================= subroutine pifr (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in), dimension (ks:ke) :: den - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, tmp, sink, qim - + do k = ks, ke - + tc = t_wfr - tz (k) - + if (tc .gt. 0. .and. ql (k) .gt. qcmin) then - + sink = ql (k) * tc / dt_fr sink = min (ql (k), sink, tc / icpk (k)) qim = qi0_crt / den (k) tmp = min (sink, dim (qim, qi (k))) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, 0., tmp, sink - tmp, 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pifr ! ======================================================================= @@ -3276,41 +3316,41 @@ end subroutine pifr subroutine psmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtw, vtr, vts, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vtw, vtr, vts - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, tmp, sink, qden, dqdt, tin, dq, qsi real :: psacw, psacr, pracs - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .ge. 0. .and. qs (k) .gt. qcmin) then - + psacw = 0. qden = qs (k) * den (k) if (ql (k) .gt. qcmin) then @@ -3322,7 +3362,7 @@ subroutine psmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac psacw = factor / (1. + dts * factor) * ql (k) endif endif - + psacr = 0. pracs = 0. if (qr (k) .gt. qcmin) then @@ -3331,24 +3371,24 @@ subroutine psmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac pracs = acr3d (vtr (k), vts (k), qs (k), qr (k), cracs, acco (:, 1), & acc (1), acc (2), den (k)) endif - + tin = tz (k) qsi = iqs (tin, den (k), dqdt) dq = qsi - qv (k) sink = max (0., pmlt (tc, dq, qden, psacw, psacr, csmlt, den (k), denfac (k), blins, mus, & lcpk (k), icpk (k), cvm (k))) - + sink = min (qs (k), (sink + pracs) * dts, tc / icpk (k)) tmp = min (sink, dim (qs_mlt, ql (k))) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., tmp, sink - tmp, 0., - sink, 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine psmlt ! ======================================================================= @@ -3358,41 +3398,41 @@ end subroutine psmlt subroutine pgmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtw, vtr, vtg, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vtw, vtr, vtg - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink, qden, dqdt, tin, dq, qsi real :: pgacw, pgacr - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .ge. 0. .and. qg (k) .gt. qcmin) then - + pgacw = 0. qden = qg (k) * den (k) if (ql (k) .gt. qcmin) then @@ -3408,13 +3448,13 @@ subroutine pgmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac pgacw = factor / (1. + dts * factor) * ql (k) endif endif - + pgacr = 0. if (qr (k) .gt. qcmin) then pgacr = min (acr3d (vtg (k), vtr (k), qr (k), qg (k), cgacr, acco (:, 3), & acc (5), acc (6), den (k)), qr (k) / dts) endif - + tin = tz (k) qsi = iqs (tin, den (k), dqdt) dq = qsi - qv (k) @@ -3425,17 +3465,17 @@ subroutine pgmlt (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac sink = max (0., pmlt (tc, dq, qden, pgacw, pgacr, cgmlt, den (k), denfac (k), & bling, mug, lcpk (k), icpk (k), cvm (k))) endif - + sink = min (qg (k), sink * dts, tc / icpk (k)) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., sink, 0., 0., - sink, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pgmlt ! ======================================================================= @@ -3443,37 +3483,37 @@ end subroutine pgmlt ! ======================================================================= subroutine psaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vts) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vti, vts - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink, qden - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qi (k) .gt. qcmin) then - + sink = 0. qden = qs (k) * den (k) if (qs (k) .gt. qcmin) then @@ -3485,16 +3525,16 @@ subroutine psaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vts sink = factor / (1. + factor) * qi (k) endif endif - + sink = min (fi2s_fac * qi (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., - sink, sink, 0.) - + endif - + enddo - + end subroutine psaci ! ======================================================================= @@ -3502,39 +3542,39 @@ end subroutine psaci ! ======================================================================= subroutine psaut (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, di) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, di - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, sink, fac_i2s, q_plus, qim, dq, tmp - + fac_i2s = 1. - exp (- dts / tau_i2s) - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qi (k) .gt. qcmin) then - + sink = 0. tmp = fac_i2s * exp (0.025 * tc) di (k) = max (di (k), qcmin) @@ -3548,16 +3588,16 @@ subroutine psaut (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, di) endif sink = tmp * dq endif - + sink = min (fi2s_fac * qi (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., - sink, sink, 0.) - + endif - + enddo - + end subroutine psaut ! ======================================================================= @@ -3565,37 +3605,37 @@ end subroutine psaut ! ======================================================================= subroutine pgaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vtg) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vti, vtg - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink, qden - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qi (k) .gt. qcmin) then - + sink = 0. qden = qg (k) * den (k) if (qg (k) .gt. qcmin) then @@ -3611,16 +3651,16 @@ subroutine pgaci (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, denfac, vti, vtg sink = factor / (1. + factor) * qi (k) endif endif - + sink = min (fi2g_fac * qi (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., - sink, 0., sink) - + endif - + enddo - + end subroutine pgaci ! ======================================================================= @@ -3629,65 +3669,65 @@ end subroutine pgaci subroutine psacr_pgfr (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtr, vts, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vtr, vts - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink real :: psacr, pgfr - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qr (k) .gt. qcmin) then - + psacr = 0. if (qs (k) .gt. qcmin) then psacr = dts * acr3d (vts (k), vtr (k), qr (k), qs (k), csacr, acco (:, 2), & acc (3), acc (4), den (k)) endif - + pgfr = dts * cgfr (1) / den (k) * (exp (- cgfr (2) * tc) - 1.) * & exp ((6 + mur) / (mur + 3) * log (6 * qr (k) * den (k))) - + sink = psacr + pgfr factor = min (sink, qr (k), - tc / icpk (k)) / max (sink, qcmin) psacr = factor * psacr pgfr = factor * pgfr - + sink = min (qr (k), psacr + pgfr) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., - sink, 0., psacr, pgfr, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine psacr_pgfr ! ======================================================================= @@ -3695,46 +3735,46 @@ end subroutine psacr_pgfr ! ======================================================================= subroutine pgacs (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den, vts, vtg) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, vts, vtg - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink - + do k = ks, ke - + if (tz (k) .lt. tice .and. qs (k) .gt. qcmin .and. qg (k) .gt. qcmin) then - + sink = dts * acr3d (vtg (k), vts (k), qs (k), qg (k), cgacs, acco (:, 4), & acc (7), acc (8), den (k)) sink = min (fs2g_fac * qs (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., 0., - sink, sink) - + endif - + enddo - + end subroutine pgacs ! ======================================================================= @@ -3742,53 +3782,53 @@ end subroutine pgacs ! ======================================================================= subroutine pgaut (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink, qsm - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qs (k) .gt. qcmin) then - + sink = 0 qsm = qs0_crt / den (k) if (qs (k) .gt. qsm) then factor = dts * 1.e-3 * exp (0.09 * (tz (k) - tice)) sink = factor / (1. + factor) * (qs (k) - qsm) endif - + sink = min (fs2g_fac * qs (k), sink) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., 0., - sink, sink) - + endif - + enddo - + end subroutine pgaut ! ======================================================================= @@ -3797,41 +3837,41 @@ end subroutine pgaut subroutine pgacw_pgacr (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, denfac, & vtr, vtg, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, denfac, vtr, vtg - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, factor, sink, qden real :: pgacw, pgacr - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qg (k) .gt. qcmin) then - + pgacw = 0. if (ql (k) .gt. qcmin) then qden = qg (k) * den (k) @@ -3842,28 +3882,28 @@ subroutine pgacw_pgacr (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, endif pgacw = factor / (1. + factor) * ql (k) endif - + pgacr = 0. if (qr (k) .gt. qcmin) then pgacr = min (dts * acr3d (vtg (k), vtr (k), qr (k), qg (k), cgacr, acco (:, 3), & acc (5), acc (6), den (k)), qr (k)) endif - + sink = pgacr + pgacw factor = min (sink, dim (tice, tz (k)) / icpk (k)) / max (sink, qcmin) pgacr = factor * pgacr pgacw = factor * pgacw - + sink = pgacr + pgacw - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - pgacw, - pgacr, 0., 0., sink, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pgacw_pgacr ! ======================================================================= @@ -3871,111 +3911,127 @@ end subroutine pgacw_pgacr ! ======================================================================= subroutine subgrid_z_proc (ks, ke, den, denfac, dts, rh_adj, tz, qv, ql, qr, & - qi, qs, qg, dp, ccn, cin, cond, dep, reevap, sub) - + qi, qs, qg, dp, ccn, cin, cond, dep, reevap, sub, last_step) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + + logical, intent (in) :: last_step + integer, intent (in) :: ks, ke - + real, intent (in) :: dts, rh_adj - + real, intent (in), dimension (ks:ke) :: den, denfac, dp - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ccn, cin - + real, intent (out) :: cond, dep, reevap, sub - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + + logical :: cond_evap + + integer :: n + real, dimension (ks:ke) :: q_liq, q_sol, q_cond, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + cond = 0 dep = 0 reevap = 0 sub = 0 - + ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! instant processes (include deposition, evaporation, and sublimation) ! ----------------------------------------------------------------------- - + if (.not. do_warm_rain_mp) then - + call pinst (ks, ke, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, rh_adj, dep, sub, reevap) - + endif - + ! ----------------------------------------------------------------------- ! cloud water condensation and evaporation ! ----------------------------------------------------------------------- - - call pcond_pevap (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & - lcpk, icpk, tcpk, tcp3, cond, reevap) - + + if (delay_cond_evap) then + cond_evap = last_step + else + cond_evap = .true. + endif + + if (cond_evap) then + do n = 1, nconds + call pcond_pevap (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & + lcpk, icpk, tcpk, tcp3, cond, reevap) + enddo + endif + if (.not. do_warm_rain_mp) then - + ! ----------------------------------------------------------------------- ! enforce complete freezing below t_wfr ! ----------------------------------------------------------------------- - + call pcomp (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! Wegener Bergeron Findeisen process ! ----------------------------------------------------------------------- - + call pwbf (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! Bigg freezing mechanism ! ----------------------------------------------------------------------- - + call pbigg (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, ccn, lcpk, icpk, tcpk, tcp3) - + ! ----------------------------------------------------------------------- ! cloud ice deposition and sublimation ! ----------------------------------------------------------------------- - + call pidep_pisub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, cin, dep, sub) - + ! ----------------------------------------------------------------------- ! snow deposition and sublimation ! ----------------------------------------------------------------------- - + call psdep_pssub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & denfac, lcpk, icpk, tcpk, tcp3, dep, sub) - + ! ----------------------------------------------------------------------- ! graupel deposition and sublimation ! ----------------------------------------------------------------------- - + call pgdep_pgsub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & denfac, lcpk, icpk, tcpk, tcp3, dep, sub) - + endif - + end subroutine subgrid_z_proc ! ======================================================================= @@ -3984,83 +4040,83 @@ end subroutine subgrid_z_proc subroutine pinst (ks, ke, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, rh_adj, dep, sub, reevap) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: rh_adj - + real, intent (in), dimension (ks:ke) :: den, dp - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + real, intent (out) :: dep, reevap, sub - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tin, qpz, rh, dqdt, tmp, qsi - + do k = ks, ke - + ! ----------------------------------------------------------------------- ! instant deposit all water vapor to cloud ice when temperature is super low ! ----------------------------------------------------------------------- - + if (tz (k) .lt. t_min) then - + sink = dim (qv (k), qcmin) dep = dep + sink * dp (k) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & - sink, 0., 0., sink, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + ! ----------------------------------------------------------------------- ! instant evaporation / sublimation of all clouds when rh < rh_adj ! ----------------------------------------------------------------------- - + qpz = qv (k) + ql (k) + qi (k) tin = (te8 (k) - lv00 * qpz + li00 * (qs (k) + qg (k))) / & mhc (qpz, qr (k), qs (k) + qg (k)) - + if (tin .gt. t_sub + 6.) then - + qsi = iqs (tin, den (k), dqdt) rh = qpz / qsi if (rh .lt. rh_adj) then - + sink = ql (k) tmp = qi (k) - + reevap = reevap + sink * dp (k) sub = sub + tmp * dp (k) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & sink + tmp, - sink, 0., - tmp, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + endif - + enddo - + end subroutine pinst ! ======================================================================= @@ -4069,68 +4125,73 @@ end subroutine pinst subroutine pcond_pevap (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, cond, reevap) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, dp - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + real, intent (out) :: cond, reevap - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tin, qpz, dqdt, qsw, rh_tem, dq, factor, fac_l2v, fac_v2l - + fac_l2v = 1. - exp (- dts / tau_l2v) fac_v2l = 1. - exp (- dts / tau_v2l) - + do k = ks, ke - + tin = tz (k) qsw = wqs (tin, den (k), dqdt) qpz = qv (k) + ql (k) + qi (k) rh_tem = qpz / qsw dq = qsw - qv (k) if (dq .gt. 0.) then - factor = min (1., fac_l2v * (rh_fac * dq / qsw)) + if (do_evap_timescale) then + factor = min (1., fac_l2v * (rh_fac_evap * dq / qsw)) + else + factor = 1. + endif sink = min (ql (k), factor * dq / (1. + tcp3 (k) * dqdt)) if (use_rhc_cevap .and. rh_tem .ge. rhc_cevap) then sink = 0. endif reevap = reevap + sink * dp (k) - elseif (do_cond_timescale) then - factor = min (1., fac_v2l * (rh_fac * (- dq) / qsw)) - sink = - min (qv (k), factor * (- dq) / (1. + tcp3 (k) * dqdt)) - cond = cond - sink * dp (k) else - sink = - min (qv (k), - dq / (1. + tcp3 (k) * dqdt)) + if (do_cond_timescale) then + factor = min (1., fac_v2l * (rh_fac_cond * (- dq) / qsw)) + else + factor = 1. + endif + sink = - min (qv (k), factor * (- dq) / (1. + tcp3 (k) * dqdt)) cond = cond - sink * dp (k) endif - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & sink, - sink, 0., 0., 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + enddo - + end subroutine pcond_pevap ! ======================================================================= @@ -4138,47 +4199,47 @@ end subroutine pcond_pevap ! ======================================================================= subroutine pcomp (ks, ke, qv, ql, qr, qi, qs, qg, tz, cvm, te8, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, sink - + do k = ks, ke - + tc = t_wfr - tz (k) - + if (tc .gt. 0. .and. ql (k) .gt. qcmin) then - + sink = ql (k) * tc / dt_fr sink = min (ql (k), sink, tc / icpk (k)) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, 0., sink, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pcomp ! ======================================================================= @@ -4186,42 +4247,42 @@ end subroutine pcomp ! ======================================================================= subroutine pwbf (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, tin, sink, dqdt, qsw, qsi, qim, tmp, fac_wbf if (.not. do_wbf) return - + fac_wbf = 1. - exp (- dts / tau_wbf) - + do k = ks, ke - + tc = tice - tz (k) - + tin = tz (k) qsw = wqs (tin, den (k), dqdt) qsi = iqs (tin, den (k), dqdt) @@ -4232,15 +4293,15 @@ subroutine pwbf (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, lcpk, i sink = min (fac_wbf * ql (k), tc / icpk (k)) qim = qi0_crt / den (k) tmp = min (sink, dim (qim, qi (k))) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, 0., tmp, sink - tmp, 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pwbf ! ======================================================================= @@ -4248,40 +4309,40 @@ end subroutine pwbf ! ======================================================================= subroutine pbigg (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, ccn, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ccn real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tc - + do k = ks, ke - + tc = tice - tz (k) - + if (tc .gt. 0 .and. ql (k) .gt. qcmin) then - + if (do_psd_water_num) then call cal_pc_ed_oe_rr_tv (ql (k), den (k), blinw, muw, & pca = pcaw, pcb = pcbw, pc = ccn (k)) @@ -4290,63 +4351,63 @@ subroutine pbigg (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, den, ccn, l sink = 100. / (rhow * ccn (k)) * dts * (exp (0.66 * tc) - 1.) * ql (k) ** 2 sink = min (ql (k), sink, tc / icpk (k)) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, 0., sink, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo end subroutine pbigg - + ! ======================================================================= ! cloud ice deposition and sublimation, Hong et al. (2004) ! ======================================================================= subroutine pidep_pisub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & lcpk, icpk, tcpk, tcp3, cin, dep, sub) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, dp - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, cin real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + real, intent (out) :: dep, sub - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tin, dqdt, qsi, dq, pidep, tmp, tc, qi_gen, qi_crt - + do k = ks, ke - + if (tz (k) .lt. tice) then - + pidep = 0. tin = tz (k) qsi = iqs (tin, den (k), dqdt) dq = qv (k) - qsi tmp = dq / (1. + tcpk (k) * dqdt) - + if (qi (k) .gt. qcmin) then if (.not. prog_ccn) then if (inflag .eq. 1) & @@ -4369,7 +4430,7 @@ subroutine pidep_pisub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, d (qsi * den (k) * (tcpk (k) * cvm (k)) ** 2 / (tcond * rvgas * tz (k) ** 2) + & 1. / vdifu) endif - + if (dq .gt. 0.) then tc = tice - tz (k) qi_gen = 4.92e-11 * exp (1.33 * log (1.e3 * exp (0.1 * tc))) @@ -4388,15 +4449,15 @@ subroutine pidep_pisub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, d sink = max (pidep, tmp, - qi (k)) sub = sub - sink * dp (k) endif - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & - sink, 0., 0., sink, 0., 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pidep_pisub ! ======================================================================= @@ -4405,40 +4466,40 @@ end subroutine pidep_pisub subroutine psdep_pssub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & denfac, lcpk, icpk, tcpk, tcp3, dep, sub) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, dp, denfac - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + real, intent (out) :: dep, sub - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tin, dqdt, qsi, qden, t2, dq, pssub - + do k = ks, ke - + if (qs (k) .gt. qcmin) then - + tin = tz (k) qsi = iqs (tin, den (k), dqdt) qden = qs (k) * den (k) @@ -4457,15 +4518,15 @@ subroutine psdep_pssub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, d endif dep = dep - sink * dp (k) endif - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & sink, 0., 0., 0., - sink, 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine psdep_pssub ! ======================================================================= @@ -4474,40 +4535,40 @@ end subroutine psdep_pssub subroutine pgdep_pgsub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, den, & denfac, lcpk, icpk, tcpk, tcp3, dep, sub) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den, dp, denfac - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + real, intent (out) :: dep, sub - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: sink, tin, dqdt, qsi, qden, t2, dq, pgsub - + do k = ks, ke - + if (qg (k) .gt. qcmin) then - + tin = tz (k) qsi = iqs (tin, den (k), dqdt) qden = qg (k) * den (k) @@ -4532,15 +4593,15 @@ subroutine pgdep_pgsub (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, dp, cvm, te8, d endif dep = dep - sink * dp (k) endif - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & sink, 0., 0., 0., 0., - sink, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pgdep_pgsub ! ======================================================================= @@ -4548,49 +4609,49 @@ end subroutine pgdep_pgsub ! ======================================================================= subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_var, gsize) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: h_var, gsize - + real, intent (in), dimension (ks:ke) :: pz, den - + real (kind = r8), intent (in), dimension (ks:ke) :: tz - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, qa - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: q_plus, q_minus real :: rh, rqi, tin, qsw, qsi, qpz, qstar, sigma, gam real :: dqdt, dq, liq, ice real :: qa10, qa100 - + real, dimension (ks:ke) :: q_liq, q_sol, q_cond, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! calculate heat capacities and latent heat coefficients ! ----------------------------------------------------------------------- - + call cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, cvm, te8, tz, & lcpk, icpk, tcpk, tcp3) - + do k = ks, ke - + ! combine water species - + ice = q_sol (k) q_sol (k) = qi (k) if (rad_snow) then @@ -4599,24 +4660,24 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va q_sol (k) = qi (k) + qs (k) + qg (k) endif endif - + liq = q_liq (k) q_liq (k) = ql (k) if (rad_rain) then q_liq (k) = ql (k) + qr (k) endif - + q_cond (k) = q_liq (k) + q_sol (k) qpz = qv (k) + q_cond (k) - + ! use the "liquid - frozen water temperature" (tin) to compute saturated specific humidity - + ice = ice - q_sol (k) liq = liq - q_liq (k) tin = (te8 (k) - lv00 * qpz + li00 * ice) / mhc (qpz, liq, ice) - + ! calculate saturated specific humidity - + if (tin .le. t_wfr) then qstar = iqs (tin, den (k), dqdt) elseif (tin .ge. tice) then @@ -4631,14 +4692,14 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va endif qstar = rqi * qsi + (1. - rqi) * qsw endif - + ! cloud schemes - + rh = qpz / qstar - + if (cfflag .eq. 1) then if (rh .gt. rh_thres .and. qpz .gt. qcmin) then - + dq = h_var * qpz if (do_cld_adj) then q_plus = qpz + dq * f_dq_p * min (1.0, max (0.0, (pz (k) - 200.e2) / & @@ -4647,7 +4708,7 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va q_plus = qpz + dq * f_dq_p endif q_minus = qpz - dq * f_dq_m - + if (icloud_f .eq. 2) then if (qstar .lt. qpz) then qa (k) = 1. @@ -4692,7 +4753,7 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va qa (k) = 0. endif endif - + if (cfflag .eq. 2) then if (rh .ge. 1.0) then qa (k) = 1.0 @@ -4704,7 +4765,7 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va qa (k) = 0.0 endif endif - + if (cfflag .eq. 3) then if (q_cond (k) .gt. qcmin) then qa (k) = 1. / 50. * (5.77 * (100. - gsize / 1000.) * & @@ -4718,7 +4779,7 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va qa (k) = 0.0 endif endif - + if (cfflag .eq. 4) then sigma = 0.28 + exp (0.49 * log (max (qcmin * 1000., q_cond (k) * 1000.))) gam = max (0.0, q_cond (k) * 1000.) / sigma @@ -4741,9 +4802,9 @@ subroutine cloud_fraction (ks, ke, pz, den, qv, ql, qr, qi, qs, qg, qa, tz, h_va qa (k) = qa10 + (log10 (gsize / 1000.) - 1) * (qa100 - qa10) qa (k) = max (0.0, min (1., qa (k))) endif - + enddo - + end subroutine cloud_fraction ! ======================================================================= @@ -4752,56 +4813,56 @@ end subroutine cloud_fraction ! ======================================================================= subroutine lagrangian_fall (ks, ke, zs, ze, zt, dp, q, precip, m1) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: zs - + real, intent (in), dimension (ks:ke + 1) :: ze, zt - + real, intent (in), dimension (ks:ke) :: dp - + real, intent (inout), dimension (ks:ke) :: q - + real, intent (inout) :: precip - + real, intent (out), dimension (ks:ke) :: m1 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k, k0, n, m - + real :: a4 (4, ks:ke), pl, pr, delz, esl - + real, parameter :: r3 = 1. / 3., r23 = 2. / 3. - + real, dimension (ks:ke) :: qm, dz - + ! ----------------------------------------------------------------------- ! density: ! ----------------------------------------------------------------------- - + do k = ks, ke dz (k) = zt (k) - zt (k + 1) q (k) = q (k) * dp (k) a4 (1, k) = q (k) / dz (k) qm (k) = 0. enddo - + ! ----------------------------------------------------------------------- ! construct vertical profile with zt as coordinate ! ----------------------------------------------------------------------- - + call cs_profile (a4 (1, ks), dz (ks), ke - ks + 1) - + k0 = ks do k = ks, ke do n = k0, ke @@ -4839,22 +4900,22 @@ subroutine lagrangian_fall (ks, ke, zs, ze, zt, dp, q, precip, m1) enddo 555 continue enddo - + m1 (ks) = q (ks) - qm (ks) do k = ks + 1, ke m1 (k) = m1 (k - 1) + q (k) - qm (k) enddo precip = precip + m1 (ke) - + ! ----------------------------------------------------------------------- ! convert back to * dry * mixing ratio: ! dp must be dry air_mass (because moist air mass will be changed due to terminal fall) . ! ----------------------------------------------------------------------- - + do k = ks, ke q (k) = qm (k) / dp (k) enddo - + end subroutine lagrangian_fall ! ======================================================================= @@ -4863,70 +4924,70 @@ end subroutine lagrangian_fall ! ======================================================================= subroutine cs_profile (a4, del, km) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: km - + real, intent (in) :: del (km) - + real, intent (inout) :: a4 (4, km) - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + logical :: extm (km) - + real :: gam (km), q (km + 1), d4, bet, a_bot, grat, pmp, lac real :: pmp_1, lac_1, pmp_2, lac_2, da1, da2, a6da - + grat = del (2) / del (1) ! grid ratio bet = grat * (grat + 0.5) q (1) = (2. * grat * (grat + 1.) * a4 (1, 1) + a4 (1, 2)) / bet gam (1) = (1. + grat * (grat + 1.5)) / bet - + do k = 2, km d4 = del (k - 1) / del (k) bet = 2. + 2. * d4 - gam (k - 1) q (k) = (3. * (a4 (1, k - 1) + d4 * a4 (1, k)) - q (k - 1)) / bet gam (k) = d4 / bet enddo - + a_bot = 1. + d4 * (d4 + 1.5) q (km + 1) = (2. * d4 * (d4 + 1.) * a4 (1, km) + a4 (1, km - 1) - a_bot * q (km)) & / (d4 * (d4 + 0.5) - a_bot * gam (km)) - + do k = km, 1, - 1 q (k) = q (k) - gam (k) * q (k + 1) enddo - + ! ----------------------------------------------------------------------- ! apply constraints ! ----------------------------------------------------------------------- - + do k = 2, km gam (k) = a4 (1, k) - a4 (1, k - 1) enddo - + ! ----------------------------------------------------------------------- ! top: ! ----------------------------------------------------------------------- - + q (1) = max (q (1), 0.) q (2) = min (q (2), max (a4 (1, 1), a4 (1, 2))) q (2) = max (q (2), min (a4 (1, 1), a4 (1, 2)), 0.) - + ! ----------------------------------------------------------------------- ! interior: ! ----------------------------------------------------------------------- - + do k = 3, km - 1 if (gam (k - 1) * gam (k + 1) .gt. 0.) then ! apply large - scale constraints to all fields if not local max / min @@ -4944,20 +5005,20 @@ subroutine cs_profile (a4, del, km) endif endif enddo - + ! ----------------------------------------------------------------------- ! bottom: ! ----------------------------------------------------------------------- - + q (km) = min (q (km), max (a4 (1, km - 1), a4 (1, km))) q (km) = max (q (km), min (a4 (1, km - 1), a4 (1, km)), 0.) q (km + 1) = max (q (km + 1), 0.) - + do k = 1, km a4 (2, k) = q (k) a4 (3, k) = q (k + 1) enddo - + do k = 1, km if (k .eq. 1 .or. k .eq. km) then extm (k) = (a4 (2, k) - a4 (1, k)) * (a4 (3, k) - a4 (1, k)) .gt. 0. @@ -4977,7 +5038,7 @@ subroutine cs_profile (a4, del, km) ! ----------------------------------------------------------------------- a4 (2, 1) = max (0., a4 (2, 1)) - + ! ----------------------------------------------------------------------- ! Huynh's 2nd constraint for interior: ! ----------------------------------------------------------------------- @@ -5004,11 +5065,11 @@ subroutine cs_profile (a4, del, km) endif endif enddo - + do k = 1, km - 1 a4 (4, k) = 6. * a4 (1, k) - 3. * (a4 (2, k) + a4 (3, k)) enddo - + k = km - 1 if (extm (k)) then a4 (2, k) = a4 (1, k) @@ -5026,46 +5087,46 @@ subroutine cs_profile (a4, del, km) a4 (2, k) = a4 (3, k) - a4 (4, k) endif endif - + call cs_limiters (km - 1, a4) - + ! ----------------------------------------------------------------------- ! bottom: ! ----------------------------------------------------------------------- - + a4 (2, km) = a4 (1, km) a4 (3, km) = a4 (1, km) a4 (4, km) = 0. - + end subroutine cs_profile ! ======================================================================= ! cubic spline (cs) limiters or boundary conditions -! a positive-definite constraint (iv = 0) is applied to tracers in every layer, +! a positive-definite constraint (iv = 0) is applied to tracers in every layer, ! adjusting the top-most and bottom-most interface values to enforce positive. ! this subroutine is the same as cs_limiters in fv_mapz_mod where iv = 0. ! ======================================================================= subroutine cs_limiters (km, a4) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: km - + real, intent (inout) :: a4 (4, km) ! ppm array - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real, parameter :: r12 = 1. / 12. - + do k = 1, km if (a4 (1, k) .le. 0.) then a4 (2, k) = a4 (1, k) @@ -5091,7 +5152,7 @@ subroutine cs_limiters (km, a4) endif endif enddo - + end subroutine cs_limiters ! ======================================================================= @@ -5099,60 +5160,60 @@ end subroutine cs_limiters ! ======================================================================= subroutine implicit_fall (dts, ks, ke, ze, vt, dp, q, precip, m1) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke + 1) :: ze - + real, intent (in), dimension (ks:ke) :: vt, dp - + real, intent (inout), dimension (ks:ke) :: q - + real, intent (inout) :: precip - + real, intent (out), dimension (ks:ke) :: m1 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real, dimension (ks:ke) :: dz, qm, dd - + do k = ks, ke dz (k) = ze (k) - ze (k + 1) dd (k) = dts * vt (k) q (k) = q (k) * dp (k) enddo - + qm (ks) = q (ks) / (dz (ks) + dd (ks)) do k = ks + 1, ke qm (k) = (q (k) + qm (k - 1) * dd (k - 1)) / (dz (k) + dd (k)) enddo - + do k = ks, ke qm (k) = qm (k) * dz (k) enddo - + m1 (ks) = q (ks) - qm (ks) do k = ks + 1, ke m1 (k) = m1 (k - 1) + q (k) - qm (k) enddo precip = precip + m1 (ke) - + do k = ks, ke q (k) = qm (k) / dp (k) enddo - + end subroutine implicit_fall ! ======================================================================= @@ -5160,47 +5221,47 @@ end subroutine implicit_fall ! ======================================================================= subroutine explicit_fall (dts, ks, ke, ze, vt, dp, q, precip, m1) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke + 1) :: ze - + real, intent (in), dimension (ks:ke) :: vt, dp - + real, intent (inout), dimension (ks:ke) :: q - + real, intent (inout) :: precip - + real, intent (out), dimension (ks:ke) :: m1 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: n, k, nstep - + real, dimension (ks:ke) :: dz, qm, q0, dd - + do k = ks, ke dz (k) = ze (k) - ze (k + 1) dd (k) = dts * vt (k) q0 (k) = q (k) * dp (k) enddo - + nstep = 1 + int (maxval (dd / dz)) do k = ks, ke dd (k) = dd (k) / nstep q (k) = q0 (k) enddo - + do n = 1, nstep qm (ks) = q (ks) - q (ks) * dd (ks) / dz (ks) do k = ks + 1, ke @@ -5208,17 +5269,17 @@ subroutine explicit_fall (dts, ks, ke, ze, vt, dp, q, precip, m1) enddo q = qm enddo - + m1 (ks) = q0 (ks) - qm (ks) do k = ks + 1, ke m1 (k) = m1 (k - 1) + q0 (k) - qm (k) enddo precip = precip + m1 (ke) - + do k = ks, ke q (k) = qm (k) / dp (k) enddo - + end subroutine explicit_fall ! ======================================================================= @@ -5227,38 +5288,38 @@ end subroutine explicit_fall subroutine implicit_lagrangian_fall (dts, ks, ke, zs, ze, zt, vt, dp, q, & precip, flux, sed_fac) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: zs, dts, sed_fac - + real, intent (in), dimension (ks:ke + 1) :: ze, zt - + real, intent (in), dimension (ks:ke) :: vt, dp - + real, intent (inout), dimension (ks:ke) :: q - + real, intent (inout) :: precip - + real, intent (out), dimension (ks:ke) :: flux ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: pre0, pre1 - + real, dimension (ks:ke) :: q0, q1, m0, m1 q0 = q pre0 = precip - + call implicit_fall (dts, ks, ke, ze, vt, dp, q0, pre0, m0) q1 = q @@ -5269,38 +5330,38 @@ subroutine implicit_lagrangian_fall (dts, ks, ke, zs, ze, zt, vt, dp, q, & q = q0 * sed_fac + q1 * (1.0 - sed_fac) flux = m0 * sed_fac + m1 * (1.0 - sed_fac) precip = pre0 * sed_fac + pre1 * (1.0 - sed_fac) - + end subroutine implicit_lagrangian_fall - + ! ======================================================================= ! vertical subgrid variability used for cloud ice and cloud water autoconversion ! edges: qe == qbar + / - dm ! ======================================================================= subroutine linear_prof (km, q, dm, z_var, h_var) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: km - + logical, intent (in) :: z_var - + real, intent (in) :: q (km), h_var - + real, intent (out) :: dm (km) - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: dq (km) - + if (z_var) then do k = 2, km dq (k) = 0.5 * (q (k) - q (k - 1)) @@ -5331,7 +5392,7 @@ subroutine linear_prof (km, q, dm, z_var, h_var) dm (k) = max (0.0, h_var * q (k)) enddo endif - + end subroutine linear_prof ! ======================================================================= @@ -5339,19 +5400,19 @@ end subroutine linear_prof ! ======================================================================= function acr2d (qden, c, denfac, blin, mu) - + implicit none - + real :: acr2d - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qden, c, denfac, blin, mu - + acr2d = denfac * c * exp ((2 + mu + blin) / (mu + 3) * log (6 * qden)) - + end function acr2d ! ======================================================================= @@ -5359,41 +5420,41 @@ end function acr2d ! ======================================================================= function acr3d (v1, v2, q1, q2, c, acco, acc1, acc2, den) - + implicit none - + real :: acr3d - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: v1, v2, c, den, q1, q2, acco (3), acc1, acc2 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i - + real :: t1, t2, tmp, vdiff - + t1 = exp (1. / (acc1 + 3) * log (6 * q1 * den)) t2 = exp (1. / (acc2 + 3) * log (6 * q2 * den)) if (vdiffflag .eq. 1) vdiff = abs (v1 - v2) if (vdiffflag .eq. 2) vdiff = sqrt ((1.20 * v1 - 0.95 * v2) ** 2. + 0.08 * v1 * v2) if (vdiffflag .eq. 3) vdiff = sqrt ((1.00 * v1 - 1.00 * v2) ** 2. + 0.04 * v1 * v2) - + acr3d = c * vdiff / den - + tmp = 0 do i = 1, 3 tmp = tmp + acco (i) * exp ((6 + acc1 - i) * log (t1)) * exp ((acc2 + i - 1) * log (t2)) enddo - + acr3d = acr3d * tmp - + end function acr3d ! ======================================================================= @@ -5401,20 +5462,20 @@ end function acr3d ! ======================================================================= function vent_coeff (qden, c1, c2, denfac, blin, mu) - + implicit none - + real :: vent_coeff - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qden, c1, c2, denfac, blin, mu - + vent_coeff = c1 + c2 * exp ((3 + 2 * mu + blin) / (mu + 3) / 2 * log (6 * qden)) * & sqrt (denfac) / exp ((1 + mu) / (mu + 3) * log (6 * qden)) - + end function vent_coeff ! ======================================================================= @@ -5422,23 +5483,23 @@ end function vent_coeff ! ======================================================================= function psub (t2, dq, qden, qsat, c, den, denfac, blin, mu, cpk, cvm) - + implicit none - + real :: psub - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: t2, dq, qden, qsat, c (5), den, denfac, blin, cpk, mu - + real (kind = r8), intent (in) :: cvm - + psub = c (1) * t2 * dq * exp ((1 + mu) / (mu + 3) * log (6 * qden)) * & vent_coeff (qden, c (2), c (3), denfac, blin, mu) / & (c (4) * t2 + c (5) * (cpk * cvm) ** 2 * qsat * den) - + end function psub ! ======================================================================= @@ -5446,24 +5507,24 @@ end function psub ! ======================================================================= function pmlt (tc, dq, qden, pxacw, pxacr, c, den, denfac, blin, mu, lcpk, icpk, cvm) - + implicit none - + real :: pmlt - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tc, dq, qden, pxacw, pxacr, c (4), den, denfac, blin, lcpk, icpk, mu - + real (kind = r8), intent (in) :: cvm - + pmlt = (c (1) / (icpk * cvm) * tc / den - c (2) * lcpk / icpk * dq) * & exp ((1 + mu) / (mu + 3) * log (6 * qden)) * & vent_coeff (qden, c (3), c (4), denfac, blin, mu) + & c_liq / (icpk * cvm) * tc * (pxacw + pxacr) - + end function pmlt ! ======================================================================= @@ -5471,30 +5532,30 @@ end function pmlt ! ======================================================================= subroutine sedi_uv (ks, ke, m1, dp, u, v) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in), dimension (ks:ke) :: m1, dp - + real, intent (inout), dimension (ks:ke) :: u, v - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + do k = ks + 1, ke u (k) = (dp (k) * u (k) + m1 (k - 1) * u (k - 1)) / (dp (k) + m1 (k - 1)) v (k) = (dp (k) * v (k) + m1 (k - 1) * v (k - 1)) / (dp (k) + m1 (k - 1)) enddo - + end subroutine sedi_uv ! ======================================================================= @@ -5502,31 +5563,31 @@ end subroutine sedi_uv ! ======================================================================= subroutine sedi_w (ks, ke, m1, w, vt, dm) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in), dimension (ks:ke) :: m1, vt, dm - + real, intent (inout), dimension (ks:ke) :: w - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + w (ks) = w (ks) + m1 (ks) * vt (ks) / dm (ks) do k = ks + 1, ke w (k) = (dm (k) * w (k) + m1 (k - 1) * (w (k - 1) - vt (k - 1)) + m1 (k) * vt (k)) / & (dm (k) + m1 (k - 1)) enddo - + end subroutine sedi_w ! ======================================================================= @@ -5534,40 +5595,40 @@ end subroutine sedi_w ! ======================================================================= subroutine sedi_heat (ks, ke, dm, m1, dz, tz, qv, ql, qr, qi, qs, qg, cw) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: cw - + real, intent (in), dimension (ks:ke) :: dm, m1, dz, qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real, dimension (ks:ke) :: dgz, cv0 - + do k = ks + 1, ke dgz (k) = - 0.5 * grav * (dz (k - 1) + dz (k)) cv0 (k) = dm (k) * (cv_air + qv (k) * cv_vap + (qr (k) + ql (k)) * c_liq + & (qi (k) + qs (k) + qg (k)) * c_ice) + cw * (m1 (k) - m1 (k - 1)) enddo - + do k = ks + 1, ke tz (k) = (cv0 (k) * tz (k) + m1 (k - 1) * (cw * tz (k - 1) + dgz (k))) / & (cv0 (k) + cw * m1 (k - 1)) enddo - + end subroutine sedi_heat ! ======================================================================= @@ -5576,33 +5637,29 @@ end subroutine sedi_heat subroutine fast_sat_adj (dtm, is, ie, ks, ke, hydrostatic, consv_te, & adj_vmr, te, dte, qv, ql, qr, qi, qs, qg, qa, qnl, qni, hs, delz, & - pt, delp, q_con, cappa, gsize, last_step, condensation, & - evaporation, deposition, sublimation, do_sat_adj) - + pt, delp, q_con, cappa, gsize, last_step, do_sat_adj) + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: is, ie, ks, ke - + logical, intent (in) :: hydrostatic, last_step, consv_te, do_sat_adj - + real, intent (in) :: dtm - + real, intent (in), dimension (is:ie) :: hs, gsize - + real, intent (in), dimension (is:ie, ks:ke) :: qnl, qni - + real, intent (inout), dimension (is:ie, ks:ke) :: delp, delz, pt, te real, intent (inout), dimension (is:ie, ks:ke) :: qv, ql, qr, qi, qs, qg, qa - + real, intent (inout), dimension (is:, ks:) :: q_con, cappa - - real, intent (inout), dimension (is:ie) :: condensation, deposition - real, intent (inout), dimension (is:ie) :: evaporation, sublimation - + real, intent (out), dimension (is:ie, ks:ke) :: adj_vmr real (kind = r8), intent (out), dimension (is:ie) :: dte @@ -5610,49 +5667,40 @@ subroutine fast_sat_adj (dtm, is, ie, ks, ke, hydrostatic, consv_te, & ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real, dimension (is:ie, ks:ke) :: ua, va, wa, prefluxw, prefluxr, prefluxi, prefluxs, prefluxg - + real, dimension (is:ie) :: water, rain, ice, snow, graupel - - real, dimension (is:ie, ks:ke) :: pcw, edw, oew, rrw, tvw - real, dimension (is:ie, ks:ke) :: pci, edi, oei, rri, tvi - real, dimension (is:ie, ks:ke) :: pcr, edr, oer, rrr, tvr - real, dimension (is:ie, ks:ke) :: pcs, eds, oes, rrs, tvs - real, dimension (is:ie, ks:ke) :: pcg, edg, oeg, rrg, tvg ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + ua = 0.0 va = 0.0 wa = 0.0 - + water = 0.0 rain = 0.0 ice = 0.0 snow = 0.0 graupel = 0.0 - + prefluxw = 0.0 prefluxr = 0.0 prefluxi = 0.0 prefluxs = 0.0 prefluxg = 0.0 - + ! ----------------------------------------------------------------------- ! major cloud microphysics driver ! ----------------------------------------------------------------------- - + call mpdrv (hydrostatic, ua, va, wa, delp, pt, qv, ql, qr, qi, qs, qg, qa, & qnl, qni, delz, is, ie, ks, ke, dtm, water, rain, ice, snow, graupel, & - gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, pcw, edw, oew, rrw, tvw, & - pci, edi, oei, rri, tvi, pcr, edr, oer, rrr, tvr, pcs, eds, oes, rrs, tvs, & - pcg, edg, oeg, rrg, tvg, prefluxw, prefluxr, prefluxi, & - prefluxs, prefluxg, condensation, deposition, evaporation, sublimation, & - last_step, .true., do_sat_adj, .false.) - + gsize, hs, q_con, cappa, consv_te, adj_vmr, te, dte, prefluxw, prefluxr, & + prefluxi, prefluxs, prefluxg, last_step, .true., do_sat_adj, .false.) + end subroutine fast_sat_adj ! ======================================================================= @@ -5661,51 +5709,51 @@ end subroutine fast_sat_adj subroutine pgfr_simp (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, sink, fac_r2g - + fac_r2g = 1. - exp (- dts / tau_r2g) - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .lt. 0. .and. qr (k) .gt. qcmin) then - + sink = (- tc * 0.025) ** 2 * qr (k) sink = min (qr (k), sink, - fac_r2g * tc / icpk (k)) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., - sink, 0., 0., sink, te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine pgfr_simp ! ======================================================================= @@ -5714,52 +5762,52 @@ end subroutine pgfr_simp subroutine psmlt_simp (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, cvm, te8, & lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real (kind = r8), intent (in), dimension (ks:ke) :: te8 - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg real, intent (inout), dimension (ks:ke) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (inout), dimension (ks:ke) :: cvm, tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, tmp, sink, fac_smlt - + fac_smlt = 1. - exp (- dts / tau_smlt) - + do k = ks, ke - + tc = tz (k) - tice - + if (tc .ge. 0. .and. qs (k) .gt. qcmin) then - + sink = (tc * 0.1) ** 2 * qs (k) sink = min (qs (k), sink, fac_smlt * tc / icpk (k)) tmp = min (sink, dim (qs_mlt, ql (k))) - + call update_qt (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., tmp, sink - tmp, 0., - sink, 0., te8 (k), cvm (k), tz (k), & lcpk (k), icpk (k), tcpk (k), tcp3 (k)) - + endif - + enddo - + end subroutine psmlt_simp ! ======================================================================= @@ -5767,97 +5815,97 @@ end subroutine psmlt_simp ! ======================================================================= subroutine praut_simp (ks, ke, dts, tz, qv, ql, qr, qi, qs, qg) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, sink, fac_l2r - + fac_l2r = 1. - exp (- dts / tau_l2r) - + do k = ks, ke - + tc = tz (k) - t_wfr - + if (tc .gt. 0 .and. ql (k) .gt. ql0_max) then - + sink = fac_l2r * (ql (k) - ql0_max) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., - sink, sink, 0., 0., 0.) - + endif - + enddo end subroutine praut_simp - + ! ======================================================================= ! cloud ice to snow autoconversion, simple version ! ======================================================================= subroutine psaut_simp (ks, ke, dts, qv, ql, qr, qi, qs, qg, tz, den) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in) :: dts - + real, intent (in), dimension (ks:ke) :: den - + real, intent (inout), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (inout), dimension (ks:ke) :: tz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: tc, sink, fac_i2s, qim - + fac_i2s = 1. - exp (- dts / tau_i2s) - + do k = ks, ke - + tc = tz (k) - tice - + qim = qi0_max / den (k) - + if (tc .lt. 0. .and. qi (k) .gt. qim) then - + sink = fac_i2s * (qi (k) - qim) - + call update_qq (qv (k), ql (k), qr (k), qi (k), qs (k), qg (k), & 0., 0., 0., - sink, sink, 0.) - + endif - + enddo - + end subroutine psaut_simp ! ======================================================================= @@ -5867,37 +5915,37 @@ end subroutine psaut_simp subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qa, & qcw, qci, qcr, qcs, qcg, rew, rei, rer, res, reg, cld, cloud, snowd, & cnvw, cnvi, cnvc) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: is, ie, ks, ke - + real, intent (in), dimension (is:ie) :: lsm, snowd - + real, intent (in), dimension (is:ie, ks:ke) :: delp, t, p, cloud real, intent (in), dimension (is:ie, ks:ke) :: qv, qw, qi, qr, qs, qg, qa - + real, intent (in), dimension (is:ie, ks:ke), optional :: cnvw, cnvi, cnvc - + real, intent (inout), dimension (is:ie, ks:ke) :: qcw, qci, qcr, qcs, qcg real, intent (inout), dimension (is:ie, ks:ke) :: rew, rei, rer, res, reg real, intent (inout), dimension (is:ie, ks:ke) :: cld - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i, k, ind - + real, dimension (is:ie, ks:ke) :: qmw, qmr, qmi, qms, qmg - + real :: dpg, rho, ccnw, mask, cor, tc, bw real :: lambdaw, lambdar, lambdai, lambdas, lambdag, rei_fac - + real :: retab (138) = (/ & 0.05000, 0.05000, 0.05000, 0.05000, 0.05000, 0.05000, & 0.05500, 0.06000, 0.07000, 0.08000, 0.09000, 0.10000, & @@ -5922,18 +5970,18 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, 124.954, 130.630, 136.457, 142.446, 148.608, 154.956, & 161.503, 168.262, 175.248, 182.473, 189.952, 197.699, & 205.728, 214.055, 222.694, 231.661, 240.971, 250.639 /) - + qmw = qw qmi = qi qmr = qr qms = qs qmg = qg cld = cloud - + ! ----------------------------------------------------------------------- ! merge convective cloud to total cloud ! ----------------------------------------------------------------------- - + if (present (cnvw)) then qmw = qmw + cnvw endif @@ -5943,11 +5991,11 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, if (present (cnvc)) then cld = cnvc + (1 - cnvc) * cld endif - + ! ----------------------------------------------------------------------- ! combine liquid and solid phases ! ----------------------------------------------------------------------- - + if (liq_ice_combine) then do i = is, ie do k = ks, ke @@ -5959,11 +6007,11 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, enddo enddo endif - + ! ----------------------------------------------------------------------- ! combine snow and graupel ! ----------------------------------------------------------------------- - + if (snow_grauple_combine) then do i = is, ie do k = ks, ke @@ -5972,33 +6020,33 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, enddo enddo endif - - + + do i = is, ie do k = ks, ke - + qmw (i, k) = max (qmw (i, k), qcmin) qmi (i, k) = max (qmi (i, k), qcmin) qmr (i, k) = max (qmr (i, k), qcmin) qms (i, k) = max (qms (i, k), qcmin) qmg (i, k) = max (qmg (i, k), qcmin) - + cld (i, k) = min (max (cld (i, k), 0.0), 1.0) - + mask = min (max (lsm (i), 0.0), 2.0) - + dpg = abs (delp (i, k)) / grav rho = p (i, k) / (rdgas * t (i, k) * (1. + zvir * qv (i, k))) - + tc = t (i, k) - tice - + if (rewflag .eq. 1) then - + ! ----------------------------------------------------------------------- ! cloud water (Martin et al. 1994) ! ----------------------------------------------------------------------- - + if (prog_ccn) then ! boucher and lohmann (1995) ccnw = (1.0 - abs (mask - 1.0)) * & @@ -6008,7 +6056,7 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, else ccnw = ccn_o * abs (mask - 1.0) + ccn_l * (1.0 - abs (mask - 1.0)) endif - + if (qmw (i, k) .gt. qcmin) then qcw (i, k) = dpg * qmw (i, k) * 1.0e3 rew (i, k) = exp (1.0 / 3.0 * log ((3.0 * qmw (i, k) * rho) / & @@ -6018,15 +6066,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcw (i, k) = 0.0 rew (i, k) = rewmin endif - + endif - + if (rewflag .eq. 2) then - + ! ----------------------------------------------------------------------- ! cloud water (Martin et al. 1994, gfdl revision) ! ----------------------------------------------------------------------- - + if (prog_ccn) then ! boucher and lohmann (1995) ccnw = (1.0 - abs (mask - 1.0)) * & @@ -6036,7 +6084,7 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, else ccnw = 1.077 * ccn_o * abs (mask - 1.0) + 1.143 * ccn_l * (1.0 - abs (mask - 1.0)) endif - + if (qmw (i, k) .gt. qcmin) then qcw (i, k) = dpg * qmw (i, k) * 1.0e3 rew (i, k) = exp (1.0 / 3.0 * log ((3.0 * qmw (i, k) * rho) / & @@ -6046,15 +6094,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcw (i, k) = 0.0 rew (i, k) = rewmin endif - + endif - + if (rewflag .eq. 3) then - + ! ----------------------------------------------------------------------- ! cloud water (Kiehl et al. 1994) ! ----------------------------------------------------------------------- - + if (qmw (i, k) .gt. qcmin) then qcw (i, k) = dpg * qmw (i, k) * 1.0e3 rew (i, k) = 14.0 * abs (mask - 1.0) + & @@ -6067,15 +6115,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcw (i, k) = 0.0 rew (i, k) = rewmin endif - + endif - + if (rewflag .eq. 4) then - + ! ----------------------------------------------------------------------- ! cloud water derived from PSD ! ----------------------------------------------------------------------- - + if (qmw (i, k) .gt. qcmin) then qcw (i, k) = dpg * qmw (i, k) * 1.0e3 call cal_pc_ed_oe_rr_tv (qmw (i, k), rho, blinw, muw, & @@ -6086,15 +6134,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcw (i, k) = 0.0 rew (i, k) = rewmin endif - + endif - + if (reiflag .eq. 1) then - + ! ----------------------------------------------------------------------- ! cloud ice (Heymsfield and Mcfarquhar 1996) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 rei_fac = log (1.0e3 * qmi (i, k) * rho) @@ -6112,15 +6160,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 2) then - + ! ----------------------------------------------------------------------- ! cloud ice (Donner et al. 1997) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 if (tc .le. - 55) then @@ -6145,15 +6193,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 3) then - + ! ----------------------------------------------------------------------- ! cloud ice (Fu 2007) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 rei (i, k) = 47.05 + tc * (0.6624 + 0.001741 * tc) @@ -6162,15 +6210,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 4) then - + ! ----------------------------------------------------------------------- ! cloud ice (Kristjansson et al. 2000) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 ind = min (max (int (t (i, k) - 136.0), 44), 138 - 1) @@ -6181,15 +6229,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 5) then - + ! ----------------------------------------------------------------------- ! cloud ice (Wyser 1998) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 bw = - 2. + 1.e-3 * log10 (rho * qmi (i, k) / 50.e-3) * & @@ -6200,15 +6248,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 6) then - + ! ----------------------------------------------------------------------- ! cloud ice (Sun and Rikus 1999, Sun 2001) ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 rei_fac = log (1.0e3 * qmi (i, k) * rho) @@ -6220,15 +6268,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (reiflag .eq. 7) then - + ! ----------------------------------------------------------------------- ! cloud ice derived from PSD ! ----------------------------------------------------------------------- - + if (qmi (i, k) .gt. qcmin) then qci (i, k) = dpg * qmi (i, k) * 1.0e3 call cal_pc_ed_oe_rr_tv (qmi (i, k), rho, blini, mui, & @@ -6239,15 +6287,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qci (i, k) = 0.0 rei (i, k) = reimin endif - + endif - + if (rerflag .eq. 1) then - + ! ----------------------------------------------------------------------- ! rain derived from PSD ! ----------------------------------------------------------------------- - + if (qmr (i, k) .gt. qcmin) then qcr (i, k) = dpg * qmr (i, k) * 1.0e3 call cal_pc_ed_oe_rr_tv (qmr (i, k), rho, blinr, mur, & @@ -6258,15 +6306,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcr (i, k) = 0.0 rer (i, k) = rermin endif - + endif - + if (resflag .eq. 1) then - + ! ----------------------------------------------------------------------- ! snow derived from PSD ! ----------------------------------------------------------------------- - + if (qms (i, k) .gt. qcmin) then qcs (i, k) = dpg * qms (i, k) * 1.0e3 call cal_pc_ed_oe_rr_tv (qms (i, k), rho, blins, mus, & @@ -6277,15 +6325,15 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcs (i, k) = 0.0 res (i, k) = resmin endif - + endif - + if (regflag .eq. 1) then - + ! ----------------------------------------------------------------------- ! graupel derived from PSD ! ----------------------------------------------------------------------- - + if (qmg (i, k) .gt. qcmin) then qcg (i, k) = dpg * qmg (i, k) * 1.0e3 if (do_hail) then @@ -6301,13 +6349,13 @@ subroutine cld_eff_rad (is, ie, ks, ke, lsm, p, delp, t, qv, qw, qi, qr, qs, qg, qcg (i, k) = 0.0 reg (i, k) = regmin endif - + endif - + enddo - + enddo - + end subroutine cld_eff_rad ! ======================================================================= @@ -6317,73 +6365,73 @@ end subroutine cld_eff_rad subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & delz, dbz, maxdbz, allmax, npz, ncnst, hydrostatic, zvir, & do_inline_mp, sphum, liq_wat, ice_wat, rainwat, snowwat, graupel, mp_top) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + logical, intent (in) :: hydrostatic, do_inline_mp - + integer, intent (in) :: is, ie, js, je, isd, ied, jsd, jed integer, intent (in) :: npz, ncnst, mp_top integer, intent (in) :: sphum, liq_wat, ice_wat, rainwat, snowwat, graupel - + real, intent (in) :: zvir - + real, intent (in), dimension (is:, js:, 1:) :: delz - + real, intent (in), dimension (isd:ied, jsd:jed, npz) :: pt, delp - + real, intent (in), dimension (isd:ied, jsd:jed, npz, ncnst) :: q - + real, intent (in), dimension (is:ie, npz + 1, js:je) :: peln - + real, intent (out) :: allmax - + real, intent (out), dimension (is:ie, js:je) :: maxdbz - + real, intent (out), dimension (is:ie, js:je, npz) :: dbz - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i, j, k - + real, parameter :: alpha = 0.224, mp_const = 200 * exp (1.6 * log (3.6e6)) - + real (kind = r8) :: qden, z_e real :: fac_r, fac_s, fac_g - + real, dimension (npz) :: den, denfac, qmr, qms, qmg, vtr, vts, vtg - + ! ----------------------------------------------------------------------- ! return if the microphysics scheme doesn't include rain ! ----------------------------------------------------------------------- - + if (rainwat .lt. 1) return - + ! ----------------------------------------------------------------------- ! initialization ! ----------------------------------------------------------------------- - + dbz = - 20. maxdbz = - 20. allmax = - 20. - + ! ----------------------------------------------------------------------- ! calculate radar reflectivity ! ----------------------------------------------------------------------- - + do j = js, je do i = is, ie - + ! ----------------------------------------------------------------------- ! air density ! ----------------------------------------------------------------------- - + do k = 1, npz if (hydrostatic) then den (k) = delp (i, j, k) / ((peln (i, k + 1, j) - peln (i, k, j)) * & @@ -6395,27 +6443,27 @@ subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & qms (k) = max (qcmin, q (i, j, k, snowwat)) qmg (k) = max (qcmin, q (i, j, k, graupel)) enddo - + do k = 1, npz denfac (k) = sqrt (den (npz) / den (k)) enddo - + ! ----------------------------------------------------------------------- ! fall speed ! ----------------------------------------------------------------------- - + if (radr_flag .eq. 3) then call term_rsg (1, npz, qmr, den, denfac, vr_fac, blinr, & mur, tvar, tvbr, vr_max, const_vr, vtr) vtr = vtr / rhor endif - + if (rads_flag .eq. 3) then call term_rsg (1, npz, qms, den, denfac, vs_fac, blins, & mus, tvas, tvbs, vs_max, const_vs, vts) vts = vts / rhos endif - + if (radg_flag .eq. 3) then if (do_hail .and. .not. do_inline_mp) then call term_rsg (1, npz, qmg, den, denfac, vg_fac, blinh, & @@ -6427,14 +6475,14 @@ subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & vtg = vtg / rhog endif endif - + ! ----------------------------------------------------------------------- ! radar reflectivity ! ----------------------------------------------------------------------- - + do k = mp_top + 1, npz z_e = 0. - + if (rainwat .gt. 0) then qden = den (k) * qmr (k) if (qmr (k) .gt. qcmin) then @@ -6450,7 +6498,7 @@ subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & z_e = z_e + mp_const * exp (1.6 * log (qden * vtr (k))) endif endif - + if (snowwat .gt. 0) then qden = den (k) * qms (k) if (qms (k) .gt. qcmin) then @@ -6477,7 +6525,7 @@ subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & z_e = z_e + mp_const * exp (1.6 * log (qden * vts (k))) endif endif - + if (graupel .gt. 0) then qden = den (k) * qmg (k) if (do_hail .and. .not. do_inline_mp) then @@ -6519,19 +6567,19 @@ subroutine rad_ref (is, ie, js, je, isd, ied, jsd, jed, q, pt, delp, peln, & z_e = z_e + mp_const * exp (1.6 * log (qden * vtg (k))) endif endif - + dbz (i, j, k) = 10. * log10 (max (0.01, z_e)) enddo - + do k = mp_top + 1, npz maxdbz (i, j) = max (dbz (i, j, k), maxdbz (i, j)) enddo - + allmax = max (maxdbz (i, j), allmax) - + enddo enddo - + end subroutine rad_ref ! ======================================================================= @@ -6539,23 +6587,23 @@ end subroutine rad_ref ! ======================================================================= function mhc3 (qv, q_liq, q_sol) - + implicit none - + real (kind = r8) :: mhc3 - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, q_liq, q_sol - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + mhc3 = one_r8 + qv * c1_vap + q_liq * c1_liq + q_sol * c1_ice - + end function mhc3 ! ======================================================================= @@ -6563,25 +6611,25 @@ end function mhc3 ! ======================================================================= function mhc4 (qd, qv, q_liq, q_sol) - + implicit none - + real (kind = r8) :: mhc4 - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, q_liq, q_sol - + real (kind = r8), intent (in) :: qd - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + mhc4 = qd + qv * c1_vap + q_liq * c1_liq + q_sol * c1_ice - + end function mhc4 ! ======================================================================= @@ -6589,27 +6637,27 @@ end function mhc4 ! ======================================================================= function mhc6 (qv, ql, qr, qi, qs, qg) - + implicit none - + real (kind = r8) :: mhc6 - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, ql, qr, qi, qs, qg - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: q_liq, q_sol - + q_liq = ql + qr q_sol = qi + qs + qg mhc6 = mhc (qv, q_liq, q_sol) - + end function mhc6 ! ======================================================================= @@ -6617,29 +6665,29 @@ end function mhc6 ! ======================================================================= function mte (qv, ql, qr, qi, qs, qg, tk, dp, moist_q) - + implicit none - + real (kind = r8) :: mte - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + logical, intent (in) :: moist_q - + real, intent (in) :: qv, ql, qr, qi, qs, qg, dp - + real (kind = r8), intent (in) :: tk - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: q_liq, q_sol, q_cond - + real (kind = r8) :: cvm, con_r8 - + q_liq = ql + qr q_sol = qi + qs + qg q_cond = q_liq + q_sol @@ -6650,7 +6698,7 @@ function mte (qv, ql, qr, qi, qs, qg, tk, dp, moist_q) cvm = mhc (qv, q_liq, q_sol) endif mte = rgrav * cvm * c_air * tk * dp - + end function mte ! ======================================================================= @@ -6658,45 +6706,45 @@ end function mte ! ======================================================================= subroutine mtetw (ks, ke, qv, ql, qr, qi, qs, qg, tz, ua, va, wa, delp, & - gsize, dte, vapor, water, rain, ice, snow, graupel, sen, stress, dts, & + dte, vapor, water, rain, ice, snow, graupel, sen, stress, dts, & te, tw, te_b, tw_b, moist_q, hydrostatic, te_loss) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + logical, intent (in) :: moist_q, hydrostatic - - real, intent (in) :: gsize, vapor, water, rain, ice, snow, graupel, dts, sen, stress - + + real, intent (in) :: vapor, water, rain, ice, snow, graupel, dts, sen, stress + real, intent (in), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg, ua, va, wa, delp - + real (kind = r8), intent (in) :: dte - + real (kind = r8), intent (in), dimension (ks:ke) :: tz - + real (kind = r8), intent (out) :: te_b, tw_b - + real (kind = r8), intent (out), optional :: te_loss - + real (kind = r8), intent (out), dimension (ks:ke) :: te, tw - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + real :: q_cond - + real (kind = r8) :: con_r8 - + real, dimension (ks:ke) :: q_liq, q_sol - + real (kind = r8), dimension (ks:ke) :: cvm do k = ks, ke @@ -6715,48 +6763,48 @@ subroutine mtetw (ks, ke, qv, ql, qr, qi, qs, qg, tz, ua, va, wa, delp, & else te (k) = te (k) + 0.5 * (ua (k) ** 2 + va (k) ** 2 + wa (k) ** 2) endif - te (k) = rgrav * te (k) * delp (k) * gsize ** 2.0 - tw (k) = rgrav * (qv (k) + q_cond) * delp (k) * gsize ** 2.0 + te (k) = rgrav * te (k) * delp (k) + tw (k) = rgrav * (qv (k) + q_cond) * delp (k) enddo - te_b = (dte + (lv00 * c_air * vapor - li00 * c_air * (ice + snow + graupel)) * dts / 86400 + sen * dts + stress * dts) * gsize ** 2.0 - tw_b = (vapor + water + rain + ice + snow + graupel) * dts / 86400 * gsize ** 2.0 + te_b = (dte + (lv00 * c_air * vapor - li00 * c_air * (ice + snow + graupel)) * dts / 86400 + sen * dts + stress * dts) + tw_b = (vapor + water + rain + ice + snow + graupel) * dts / 86400 if (present (te_loss)) then ! total energy change due to sedimentation and its heating - te_loss = dte * gsize ** 2.0 + te_loss = dte endif end subroutine mtetw - + ! ======================================================================= ! calculate heat capacities and latent heat coefficients ! ======================================================================= subroutine cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, & cvm, te8, tz, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: ks, ke - + real, intent (in), dimension (ks:ke) :: qv, ql, qr, qi, qs, qg - + real (kind = r8), intent (in), dimension (ks:ke) :: tz - + real, intent (out), dimension (ks:ke) :: q_liq, q_sol, lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (out), dimension (ks:ke) :: cvm, te8 - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: k - + do k = ks, ke q_liq (k) = ql (k) + qr (k) q_sol (k) = qi (k) + qs (k) + qg (k) @@ -6769,30 +6817,30 @@ subroutine cal_mhc_lhc (ks, ke, qv, ql, qr, qi, qs, qg, q_liq, q_sol, & enddo end subroutine cal_mhc_lhc - + ! ======================================================================= ! update hydrometeors ! ======================================================================= subroutine update_qq (qv, ql, qr, qi, qs, qg, dqv, dql, dqr, dqi, dqs, dqg) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: dqv, dql, dqr, dqi, dqs, dqg - + real, intent (inout) :: qv, ql, qr, qi, qs, qg - + qv = qv + dqv ql = ql + dql qr = qr + dqr qi = qi + dqi qs = qs + dqs qg = qg + dqg - + end subroutine update_qq ! ======================================================================= @@ -6801,42 +6849,42 @@ end subroutine update_qq subroutine update_qt (qv, ql, qr, qi, qs, qg, dqv, dql, dqr, dqi, dqs, dqg, te8, & cvm, tk, lcpk, icpk, tcpk, tcp3) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: dqv, dql, dqr, dqi, dqs, dqg - + real (kind = r8), intent (in) :: te8 - + real, intent (inout) :: qv, ql, qr, qi, qs, qg - + real, intent (out) :: lcpk, icpk, tcpk, tcp3 - + real (kind = r8), intent (out) :: cvm, tk - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + qv = qv + dqv ql = ql + dql qr = qr + dqr qi = qi + dqi qs = qs + dqs qg = qg + dqg - + cvm = mhc (qv, ql, qr, qi, qs, qg) tk = (te8 - lv00 * qv + li00 * (qi + qs + qg)) / cvm - + lcpk = (lv00 + d1_vap * tk) / cvm icpk = (li00 + d1_ice * tk) / cvm tcpk = (li20 + (d1_vap + d1_ice) * tk) / cvm tcp3 = lcpk + icpk * min (1., dim (tice, tk) / (tice - t_wfr)) - + end subroutine update_qt ! ======================================================================= @@ -6885,31 +6933,31 @@ end subroutine cal_pc_ed_oe_rr_tv ! ======================================================================= subroutine qs_init - + implicit none - + integer :: i - + if (.not. tables_are_initialized) then - + allocate (table0 (length)) allocate (table1 (length)) allocate (table2 (length)) allocate (table3 (length)) allocate (table4 (length)) - + allocate (des0 (length)) allocate (des1 (length)) allocate (des2 (length)) allocate (des3 (length)) allocate (des4 (length)) - + call qs_table0 (length) call qs_table1 (length) call qs_table2 (length) call qs_table3 (length) call qs_table4 (length) - + do i = 1, length - 1 des0 (i) = max (0., table0 (i + 1) - table0 (i)) des1 (i) = max (0., table1 (i + 1) - table1 (i)) @@ -6922,11 +6970,11 @@ subroutine qs_init des2 (length) = des2 (length - 1) des3 (length) = des3 (length - 1) des4 (length) = des4 (length - 1) - + tables_are_initialized = .true. endif - + end subroutine qs_init ! ======================================================================= @@ -6934,41 +6982,41 @@ end subroutine qs_init ! ======================================================================= subroutine qs_table_core (n, n_blend, do_smith_table, table) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n, n_blend - + logical, intent (in) :: do_smith_table - + real, intent (out), dimension (n) :: table - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i integer, parameter :: n_min = 1600 - + real (kind = r8) :: delt = 0.1 real (kind = r8) :: tmin, tem, esh real (kind = r8) :: wice, wh2o, fac0, fac1, fac2 real (kind = r8) :: esbasw, tbasw, esbasi, a, b, c, d, e real (kind = r8) :: esupc (n_blend) - + esbasw = 1013246.0 tbasw = tice + 100. esbasi = 6107.1 tmin = tice - n_min * delt - + ! ----------------------------------------------------------------------- ! compute es over ice between - (n_min * delt) deg C and 0 deg C ! ----------------------------------------------------------------------- - + if (do_smith_table) then do i = 1, n_min tem = tmin + delt * real (i - 1) @@ -6987,11 +7035,11 @@ subroutine qs_table_core (n, n_blend, do_smith_table, table) table (i) = e00 * exp (fac2) enddo endif - + ! ----------------------------------------------------------------------- ! compute es over water between - (n_blend * delt) deg C and [ (n - n_min - 1) * delt] deg C ! ----------------------------------------------------------------------- - + if (do_smith_table) then do i = 1, n - n_min + n_blend tem = tice + delt * (real (i - 1) - n_blend) @@ -7021,18 +7069,18 @@ subroutine qs_table_core (n, n_blend, do_smith_table, table) endif enddo endif - + ! ----------------------------------------------------------------------- ! derive blended es over ice and supercooled water between - (n_blend * delt) deg C and 0 deg C ! ----------------------------------------------------------------------- - + do i = 1, n_blend tem = tice + delt * (real (i - 1) - n_blend) wice = 1.0 / (delt * n_blend) * (tice - tem) wh2o = 1.0 / (delt * n_blend) * (tem - tice + delt * n_blend) table (i + n_min - n_blend) = wice * table (i + n_min - n_blend) + wh2o * esupc (i) enddo - + end subroutine qs_table_core ! ======================================================================= @@ -7042,30 +7090,30 @@ end subroutine qs_table_core ! ======================================================================= subroutine qs_table0 (n) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i - + real (kind = r8) :: delt = 0.1 real (kind = r8) :: tmin, tem, fac0, fac1, fac2 - + tmin = tice - 160. - + ! ----------------------------------------------------------------------- ! compute es over water only ! ----------------------------------------------------------------------- - + do i = 1, n tem = tmin + delt * real (i - 1) fac0 = (tem - tice) / (tem * tice) @@ -7073,7 +7121,7 @@ subroutine qs_table0 (n) fac2 = (dc_vap * log (tem / tice) + fac1) / rvgas table0 (i) = e00 * exp (fac2) enddo - + end subroutine qs_table0 ! ======================================================================= @@ -7083,17 +7131,17 @@ end subroutine qs_table0 ! ======================================================================= subroutine qs_table1 (n) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n - + call qs_table_core (n, 200, .false., table1) - + end subroutine qs_table1 ! ======================================================================= @@ -7104,17 +7152,17 @@ end subroutine qs_table1 ! ======================================================================= subroutine qs_table2 (n) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n - + call qs_table_core (n, 0, .false., table2) - + end subroutine qs_table2 ! ======================================================================= @@ -7124,17 +7172,17 @@ end subroutine qs_table2 ! ======================================================================= subroutine qs_table3 (n) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n - + call qs_table_core (n, 200, .true., table3) - + end subroutine qs_table3 ! ======================================================================= @@ -7144,17 +7192,17 @@ end subroutine qs_table3 ! ======================================================================= subroutine qs_table4 (n) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: n - + call qs_table_core (n, 0, .true., table4) - + end subroutine qs_table4 ! ======================================================================= @@ -7162,37 +7210,37 @@ end subroutine qs_table4 ! ======================================================================= function es_core (length, tk, table, des) - + implicit none - + real :: es_core - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- integer, intent (in) :: length - + real, intent (in) :: tk - + real, intent (in), dimension (length) :: table, des - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: it - + real :: ap1, tmin - + if (.not. tables_are_initialized) call qs_init - + tmin = tice - 160. ap1 = 10. * dim (tk, tmin) + 1. ap1 = min (2621., ap1) it = ap1 es_core = table (it) + (ap1 - it) * des (it) - + end function es_core ! ======================================================================= @@ -7200,38 +7248,38 @@ end function es_core ! ======================================================================= function qs_core (length, tk, den, dqdt, table, des) - + implicit none - + real :: qs_core - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- integer, intent (in) :: length - + real, intent (in) :: tk, den - + real, intent (in), dimension (length) :: table, des - + real, intent (out) :: dqdt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: it - + real :: ap1, tmin - + tmin = tice - 160. ap1 = 10. * dim (tk, tmin) + 1. ap1 = min (2621., ap1) qs_core = es_core (length, tk, table, des) / (rvgas * tk * den) it = ap1 - 0.5 dqdt = 10. * (des (it) + (ap1 - it) * (des (it + 1) - des (it))) / (rvgas * tk * den) - + end function qs_core ! ======================================================================= @@ -7241,19 +7289,19 @@ end function qs_core ! ======================================================================= function wes_t (tk) - + implicit none - + real :: wes_t - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk - + wes_t = es_core (length, tk, table0, des0) - + end function wes_t ! ======================================================================= @@ -7262,19 +7310,19 @@ end function wes_t ! ======================================================================= function mes_t (tk) - + implicit none - + real :: mes_t - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk - + mes_t = es_core (length, tk, table1, des1) - + end function mes_t ! ======================================================================= @@ -7284,19 +7332,19 @@ end function mes_t ! ======================================================================= function ies_t (tk) - + implicit none - + real :: ies_t - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk - + ies_t = es_core (length, tk, table2, des2) - + end function ies_t ! ======================================================================= @@ -7306,21 +7354,21 @@ end function ies_t ! ======================================================================= function wqs_trho (tk, den, dqdt) - + implicit none - + real :: wqs_trho - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, den - + real, intent (out) :: dqdt - + wqs_trho = qs_core (length, tk, den, dqdt, table0, des0) - + end function wqs_trho ! ======================================================================= @@ -7329,21 +7377,21 @@ end function wqs_trho ! ======================================================================= function mqs_trho (tk, den, dqdt) - + implicit none - + real :: mqs_trho - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, den - + real, intent (out) :: dqdt - + mqs_trho = qs_core (length, tk, den, dqdt, table1, des1) - + end function mqs_trho ! ======================================================================= @@ -7353,21 +7401,21 @@ end function mqs_trho ! ======================================================================= function iqs_trho (tk, den, dqdt) - + implicit none - + real :: iqs_trho - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, den - + real, intent (out) :: dqdt - + iqs_trho = qs_core (length, tk, den, dqdt, table2, des2) - + end function iqs_trho ! ======================================================================= @@ -7377,29 +7425,29 @@ end function iqs_trho ! ======================================================================= function wqs_ptqv (tk, pa, qv, dqdt) - + implicit none - + real :: wqs_ptqv - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, pa, qv - + real, intent (out) :: dqdt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: den - + den = pa / (rdgas * tk * (1. + zvir * qv)) - + wqs_ptqv = wqs (tk, den, dqdt) - + end function wqs_ptqv ! ======================================================================= @@ -7408,29 +7456,29 @@ end function wqs_ptqv ! ======================================================================= function mqs_ptqv (tk, pa, qv, dqdt) - + implicit none - + real :: mqs_ptqv - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, pa, qv - + real, intent (out) :: dqdt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: den - + den = pa / (rdgas * tk * (1. + zvir * qv)) - + mqs_ptqv = mqs (tk, den, dqdt) - + end function mqs_ptqv ! ======================================================================= @@ -7440,29 +7488,29 @@ end function mqs_ptqv ! ======================================================================= function iqs_ptqv (tk, pa, qv, dqdt) - + implicit none - + real :: iqs_ptqv - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: tk, pa, qv - + real, intent (out) :: dqdt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: den - + den = pa / (rdgas * tk * (1. + zvir * qv)) - + iqs_ptqv = iqs (tk, den, dqdt) - + end function iqs_ptqv ! ======================================================================= @@ -7472,29 +7520,29 @@ end function iqs_ptqv ! ======================================================================= subroutine mqs3d (im, km, ks, tk, pa, qv, qs, dqdt) - + implicit none - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + integer, intent (in) :: im, km, ks - + real, intent (in), dimension (im, ks:km) :: tk, pa, qv - + real, intent (out), dimension (im, ks:km) :: qs - + real, intent (out), dimension (im, ks:km), optional :: dqdt - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + integer :: i, k - + real :: dqdt0 - + if (present (dqdt)) then do k = ks, km do i = 1, im @@ -7508,7 +7556,7 @@ subroutine mqs3d (im, km, ks, tk, pa, qv, qs, dqdt) enddo enddo endif - + end subroutine mqs3d ! ======================================================================= @@ -7517,37 +7565,37 @@ end subroutine mqs3d ! ======================================================================= function wet_bulb_core (qv, tk, den, lcp) - + implicit none - + real :: wet_bulb_core - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, tk, den, lcp - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + logical :: do_adjust = .false. - + real :: factor = 1. / 3. real :: qsat, tp, dqdt - + wet_bulb_core = tk qsat = wqs (wet_bulb_core, den, dqdt) tp = factor * (qsat - qv) / (1. + lcp * dqdt) * lcp wet_bulb_core = wet_bulb_core - tp - + if (do_adjust .and. tp .gt. 0.0) then qsat = wqs (wet_bulb_core, den, dqdt) tp = (qsat - qv) / (1. + lcp * dqdt) * lcp wet_bulb_core = wet_bulb_core - tp endif - + end function wet_bulb_core ! ======================================================================= @@ -7555,27 +7603,27 @@ end function wet_bulb_core ! ======================================================================= function wet_bulb_dry (qv, tk, den) - + implicit none - + real :: wet_bulb_dry - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, tk, den - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: lcp - + lcp = hlv / cp_air - + wet_bulb_dry = wet_bulb_core (qv, tk, den, lcp) - + end function wet_bulb_dry ! ======================================================================= @@ -7583,32 +7631,32 @@ end function wet_bulb_dry ! ======================================================================= function wet_bulb_moist (qv, ql, qi, qr, qs, qg, tk, den) - + implicit none - + real :: wet_bulb_moist - + ! ----------------------------------------------------------------------- ! input / output arguments ! ----------------------------------------------------------------------- - + real, intent (in) :: qv, ql, qi, qr, qs, qg, tk, den - + ! ----------------------------------------------------------------------- ! local variables ! ----------------------------------------------------------------------- - + real :: lcp, q_liq, q_sol - + real (kind = r8) :: cvm - + q_liq = ql + qr q_sol = qi + qs + qg cvm = mhc (qv, q_liq, q_sol) lcp = (lv00 + d1_vap * tk) / cvm - + wet_bulb_moist = wet_bulb_core (qv, tk, den, lcp) - + end function wet_bulb_moist end module gfdl_mp_mod diff --git a/model/intermediate_phys.F90 b/model/intermediate_phys.F90 index c0bb24c74..36c0835a0 100644 --- a/model/intermediate_phys.F90 +++ b/model/intermediate_phys.F90 @@ -31,7 +31,6 @@ module intermediate_phys_mod use fv_grid_utils_mod, only: cubed_to_latlon, update_dwinds_phys use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type, inline_mp_type use mpp_domains_mod, only: domain2d, mpp_update_domains - use fv_timing_mod, only: timing_on, timing_off use tracer_manager_mod, only: get_tracer_index, get_tracer_names use field_manager_mod, only: model_atmos use gfdl_mp_mod, only: gfdl_mp_driver, fast_sat_adj, mtetw @@ -177,8 +176,6 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, ! Note: pt at this stage is T_v if ((do_adiabatic_init .or. (.not. do_inline_mp) .or. do_sat_adj) .and. nwat .eq. 6) then - call timing_on ('fast_sat_adj') - allocate (dz (is:ie, kmp:km)) allocate (tz (kmp:km)) @@ -189,9 +186,8 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, !$OMP liq_wat, ice_wat, snowwat, graupel, q_con, r_vir, & !$OMP sphum, pkz, last_step, consv, te0_2d, gridstruct, & !$OMP q, mdt, cld_amt, cappa, rrg, akap, ccn_cm3, & -!$OMP cin_cm3, aerosol, inline_mp, do_sat_adj, & -!$OMP adj_mass_vmr, conv_vmr_mmr, nq, consv_checker, & -!$OMP te_err, tw_err) & +!$OMP cin_cm3, aerosol, do_sat_adj, adj_mass_vmr, & +!$OMP conv_vmr_mmr, nq, consv_checker, te_err, tw_err) & !$OMP private (q2, q3, gsize, dz, pe, peln, adj_vmr, qliq, qsol, & !$OMP tz, wz, dte, te_beg, tw_beg, te_b_beg, tw_b_beg, & !$OMP te_end, tw_end, te_b_end, tw_b_end, te_loss) @@ -232,7 +228,7 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, call mtetw (kmp, km, q (i, j, kmp:km, sphum), q (i, j, kmp:km, liq_wat), & q (i, j, kmp:km, rainwat), q (i, j, kmp:km, ice_wat), q (i, j, kmp:km, snowwat), & q (i, j, kmp:km, graupel), tz (kmp:km), ua (i, j, kmp:km), va (i, j, kmp:km), wz (kmp:km), & - delp (i, j, kmp:km), gsize (i), dte (i), 0.0, 0.0, 0.0, 0.0, 0.0, & + delp (i, j, kmp:km), dte (i), 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, abs (mdt), te_beg (i, kmp:km), tw_beg (i, kmp:km), & te_b_beg (i), tw_b_beg (i), .true., hydrostatic) enddo @@ -272,8 +268,7 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, #else cappa (isd:, jsd, 1:), & #endif - gsize, last_step, inline_mp%cond (is:ie, j), inline_mp%reevap (is:ie, j), & - inline_mp%dep (is:ie, j), inline_mp%sub (is:ie, j), do_sat_adj) + gsize, last_step, do_sat_adj) ! update non-microphyiscs tracers due to mass change if (adj_mass_vmr .gt. 0) then @@ -312,7 +307,7 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, call mtetw (kmp, km, q (i, j, kmp:km, sphum), q (i, j, kmp:km, liq_wat), & q (i, j, kmp:km, rainwat), q (i, j, kmp:km, ice_wat), q (i, j, kmp:km, snowwat), & q (i, j, kmp:km, graupel), tz (kmp:km), ua (i, j, kmp:km), va (i, j, kmp:km), wz (kmp:km), & - delp (i, j, kmp:km), gsize (i), dte (i), 0.0, 0.0, 0.0, 0.0, 0.0, & + delp (i, j, kmp:km), dte (i), 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, abs (mdt), te_end (i, kmp:km), tw_end (i, kmp:km), & te_b_end (i), tw_b_end (i), .true., hydrostatic, te_loss (i)) enddo @@ -333,16 +328,16 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, if (abs (sum (te_end (i, kmp:km)) + te_b_end (i) - sum (te_beg (i, kmp:km)) - te_b_beg (i)) / & (sum (te_beg (i, kmp:km)) + te_b_beg (i)) .gt. te_err) then print*, "FAST_SAT_ADJ TE: ", & - !(sum (te_beg (i, kmp:km)) + te_b_beg (i)) / (gsize (i) ** 2), & - !(sum (te_end (i, kmp:km)) + te_b_end (i)) / (gsize (i) ** 2), & + !(sum (te_beg (i, kmp:km)) + te_b_beg (i)), & + !(sum (te_end (i, kmp:km)) + te_b_end (i)), & (sum (te_end (i, kmp:km)) + te_b_end (i) - sum (te_beg (i, kmp:km)) - te_b_beg (i)) / & (sum (te_beg (i, kmp:km)) + te_b_beg (i)) endif if (abs (sum (tw_end (i, kmp:km)) + tw_b_end (i) - sum (tw_beg (i, kmp:km)) - tw_b_beg (i)) / & (sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) .gt. tw_err) then print*, "FAST_SAT_ADJ TW: ", & - !(sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) / (gsize (i) ** 2), & - !(sum (tw_end (i, kmp:km)) + tw_b_end (i)) / (gsize (i) ** 2), & + !(sum (tw_beg (i, kmp:km)) + tw_b_beg (i)), & + !(sum (tw_end (i, kmp:km)) + tw_b_end (i)), & (sum (tw_end (i, kmp:km)) + tw_b_end (i) - sum (tw_beg (i, kmp:km)) - tw_b_beg (i)) / & (sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) endif @@ -357,8 +352,6 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, deallocate (tz) deallocate (wz) - call timing_off ('fast_sat_adj') - endif !----------------------------------------------------------------------- @@ -371,8 +364,6 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, if ((.not. do_adiabatic_init) .and. do_inline_mp .and. nwat .eq. 6) then - call timing_on ('gfdl_mp') - allocate (u_dt (isd:ied, jsd:jed, km)) allocate (v_dt (isd:ied, jsd:jed, km)) @@ -450,7 +441,6 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, ! note: the unit of area is m^2 ! note: the unit of prew, prer, prei, pres, preg is mm/day ! note: the unit of prefluxw, prefluxr, prefluxi, prefluxs, prefluxg is mm/day - ! note: the unit of cond, dep, reevap, sub is mm/day ! save ua, va for wind tendency calculation u_dt (is:ie, j, kmp:km) = ua (is:ie, j, kmp:km) @@ -501,7 +491,7 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, call mtetw (kmp, km, q (i, j, kmp:km, sphum), q (i, j, kmp:km, liq_wat), & q (i, j, kmp:km, rainwat), q (i, j, kmp:km, ice_wat), q (i, j, kmp:km, snowwat), & q (i, j, kmp:km, graupel), tz (kmp:km), ua (i, j, kmp:km), va (i, j, kmp:km), wz (kmp:km), & - delp (i, j, kmp:km), gsize (i), dte (i), 0.0, inline_mp%prew (i, j), & + delp (i, j, kmp:km), dte (i), 0.0, inline_mp%prew (i, j), & inline_mp%prer (i, j), inline_mp%prei (i, j), inline_mp%pres (i, j), & inline_mp%preg (i, j), 0.0, 0.0, abs (mdt), te_beg (i, kmp:km), tw_beg (i, kmp:km), & te_b_beg (i), tw_b_beg (i), .true., hydrostatic) @@ -547,26 +537,10 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, cappa (isd:, jsd, 1:), & #endif consv .gt. consv_min, adj_vmr (is:ie, kmp:km), te (is:ie, j, kmp:km), dte (is:ie), & - inline_mp%pcw (is:ie, j, kmp:km), inline_mp%edw (is:ie, j, kmp:km), & - inline_mp%oew (is:ie, j, kmp:km), & - inline_mp%rrw (is:ie, j, kmp:km), inline_mp%tvw (is:ie, j, kmp:km), & - inline_mp%pci (is:ie, j, kmp:km), inline_mp%edi (is:ie, j, kmp:km), & - inline_mp%oei (is:ie, j, kmp:km), & - inline_mp%rri (is:ie, j, kmp:km), inline_mp%tvi (is:ie, j, kmp:km), & - inline_mp%pcr (is:ie, j, kmp:km), inline_mp%edr (is:ie, j, kmp:km), & - inline_mp%oer (is:ie, j, kmp:km), & - inline_mp%rrr (is:ie, j, kmp:km), inline_mp%tvr (is:ie, j, kmp:km), & - inline_mp%pcs (is:ie, j, kmp:km), inline_mp%eds (is:ie, j, kmp:km), & - inline_mp%oes (is:ie, j, kmp:km), & - inline_mp%rrs (is:ie, j, kmp:km), inline_mp%tvs (is:ie, j, kmp:km), & - inline_mp%pcg (is:ie, j, kmp:km), inline_mp%edg (is:ie, j, kmp:km), & - inline_mp%oeg (is:ie, j, kmp:km), & - inline_mp%rrg (is:ie, j, kmp:km), inline_mp%tvg (is:ie, j, kmp:km), & inline_mp%prefluxw(is:ie, j, kmp:km), & inline_mp%prefluxr(is:ie, j, kmp:km), inline_mp%prefluxi(is:ie, j, kmp:km), & inline_mp%prefluxs(is:ie, j, kmp:km), inline_mp%prefluxg(is:ie, j, kmp:km), & - inline_mp%cond (is:ie, j), inline_mp%dep (is:ie, j), inline_mp%reevap (is:ie, j), & - inline_mp%sub (is:ie, j), last_step, do_inline_mp) + last_step, do_inline_mp) ! update non-microphyiscs tracers due to mass change if (adj_mass_vmr .gt. 0) then @@ -647,7 +621,7 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, call mtetw (kmp, km, q (i, j, kmp:km, sphum), q (i, j, kmp:km, liq_wat), & q (i, j, kmp:km, rainwat), q (i, j, kmp:km, ice_wat), q (i, j, kmp:km, snowwat), & q (i, j, kmp:km, graupel), tz (kmp:km), ua (i, j, kmp:km), va (i, j, kmp:km), wz (kmp:km), & - delp (i, j, kmp:km), gsize (i), dte (i), 0.0, inline_mp%prew (i, j), & + delp (i, j, kmp:km), dte (i), 0.0, inline_mp%prew (i, j), & inline_mp%prer (i, j), inline_mp%prei (i, j), inline_mp%pres (i, j), & inline_mp%preg (i, j), 0.0, 0.0, abs (mdt), te_end (i, kmp:km), tw_end (i, kmp:km), & te_b_end (i), tw_b_end (i), .true., hydrostatic, te_loss (i)) @@ -669,16 +643,16 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, if (abs (sum (te_end (i, kmp:km)) + te_b_end (i) - sum (te_beg (i, kmp:km)) - te_b_beg (i)) / & (sum (te_beg (i, kmp:km)) + te_b_beg (i)) .gt. te_err) then print*, "GFDL-MP-INTM TE: ", & - !(sum (te_beg (i, kmp:km)) + te_b_beg (i)) / (gsize (i) ** 2), & - !(sum (te_end (i, kmp:km)) + te_b_end (i)) / (gsize (i) ** 2), & + !(sum (te_beg (i, kmp:km)) + te_b_beg (i)), & + !(sum (te_end (i, kmp:km)) + te_b_end (i)), & (sum (te_end (i, kmp:km)) + te_b_end (i) - sum (te_beg (i, kmp:km)) - te_b_beg (i)) / & (sum (te_beg (i, kmp:km)) + te_b_beg (i)) endif if (abs (sum (tw_end (i, kmp:km)) + tw_b_end (i) - sum (tw_beg (i, kmp:km)) - tw_b_beg (i)) / & (sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) .gt. tw_err) then print*, "GFDL-MP-INTM TW: ", & - !(sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) / (gsize (i) ** 2), & - !(sum (tw_end (i, kmp:km)) + tw_b_end (i)) / (gsize (i) ** 2), & + !(sum (tw_beg (i, kmp:km)) + tw_b_beg (i)), & + !(sum (tw_end (i, kmp:km)) + tw_b_end (i)), & (sum (tw_end (i, kmp:km)) + tw_b_end (i) - sum (tw_beg (i, kmp:km)) - tw_b_beg (i)) / & (sum (tw_beg (i, kmp:km)) + tw_b_beg (i)) endif @@ -766,8 +740,6 @@ subroutine intermediate_phys (is, ie, js, je, isd, ied, jsd, jed, km, npx, npy, deallocate (dp0) endif - call timing_off ('gfdl_mp') - endif !----------------------------------------------------------------------- diff --git a/model/nh_core.F90 b/model/nh_core.F90 index df7802cdc..c0bf06a83 100644 --- a/model/nh_core.F90 +++ b/model/nh_core.F90 @@ -29,12 +29,13 @@ module nh_core_mod use nh_utils_mod, only: update_dz_c, update_dz_d, nh_bc use nh_utils_mod, only: sim_solver, sim1_solver, sim3_solver use nh_utils_mod, only: sim3p0_solver, rim_2d - use nh_utils_mod, only: Riem_Solver_c + use nh_utils_mod, only: Riem_Solver_c, imp_diff_w + use nh_utils_mod, only: edge_profile1 implicit none private - public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nh_bc + public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nh_bc, edge_profile1 real, parameter:: r3 = 1./3. CONTAINS @@ -44,7 +45,7 @@ subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & ptop, zs, q_con, w, delz, pt, & delp, zh, pe, ppe, pk3, pk, peln, & ws, scale_m, p_fac, a_imp, & - use_logp, last_call, fp_out, fast_tau_w_sec) + use_logp, last_call, fp_out, d2bg_zq, debug, fast_tau_w_sec) !-------------------------------------------- ! !OUTPUT PARAMETERS ! Ouput: gz: grav*height at edges @@ -54,9 +55,9 @@ subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & integer, intent(in):: ms, is, ie, js, je, km, ng integer, intent(in):: isd, ied, jsd, jed real, intent(in):: dt ! the BIG horizontal Lagrangian time step - real, intent(in):: akap, cp, ptop, p_fac, a_imp, scale_m, fast_tau_w_sec + real, intent(in):: akap, cp, ptop, p_fac, a_imp, scale_m, d2bg_zq, fast_tau_w_sec real, intent(in):: zs(isd:ied,jsd:jed) - logical, intent(in):: last_call, use_logp, fp_out + logical, intent(in):: last_call, use_logp, fp_out, debug real, intent(in):: ws(is:ie,js:je) real, intent(in), dimension(isd:,jsd:,1:):: q_con, cappa real, intent(in), dimension(isd:ied,jsd:jed,km):: delp, pt @@ -81,7 +82,7 @@ subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & !$OMP parallel do default(none) shared(is,ie,js,je,km,delp,ptop,peln1,pk3,ptk,akap,rgrav,zh,pt, & !$OMP w,a_imp,dt,gama,ws,p_fac,scale_m,ms,delz,last_call, & -!$OMP peln,pk,fp_out,ppe,use_logp,zs,pe,cappa,q_con,fast_tau_w_sec ) & +!$OMP peln,pk,fp_out,ppe,use_logp,zs,pe,cappa,q_con,d2bg_zq,debug,fast_tau_w_sec ) & !$OMP private(cp2, gm2, dm, dz2, pm2, pem, peg, pelng, pe2, peln2, w2) do 2000 j=js, je @@ -153,6 +154,10 @@ subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & a_imp, p_fac, scale_m, fast_tau_w_sec) endif + if (d2bg_zq > 0.0001) then + call imp_diff_w(is, ie, km, d2bg_zq, dz2, ws(is,j), w2) + endif + do k=1, km do i=is, ie w(i,j,k) = w2(i,k) diff --git a/model/nh_utils.F90 b/model/nh_utils.F90 index 6cd65fede..2a636eced 100644 --- a/model/nh_utils.F90 +++ b/model/nh_utils.F90 @@ -36,7 +36,8 @@ module nh_utils_mod public update_dz_c, update_dz_d, nh_bc public sim_solver, sim1_solver, sim3_solver public sim3p0_solver, rim_2d - public Riem_Solver_c, edge_scalar + public Riem_Solver_c, edge_scalar, imp_diff_w + public edge_profile1 #ifdef DZ_MIN_6 real, parameter:: dz_min = 6. @@ -611,13 +612,12 @@ subroutine Riem_Solver3test(ms, dt, is, ie, js, je, km, ng, & end subroutine Riem_Solver3test - subroutine imp_diff_w(j, is, ie, js, je, ng, km, cd, delz, ws, w, w3) - integer, intent(in) :: j, is, ie, js, je, km, ng + subroutine imp_diff_w(is, ie, km, cd, delz, ws, w) + integer, intent(in) :: is, ie, km real, intent(in) :: cd real, intent(in) :: delz(is:ie, km) ! delta-height (m) - real, intent(in) :: w(is:ie, km) ! vertical vel. (m/s) + real, intent(inout) :: w(is:ie, km) ! vertical vel. (m/s) real, intent(in) :: ws(is:ie) - real, intent(out) :: w3(is-ng:ie+ng,js-ng:je+ng,km) ! Local: real, dimension(is:ie,km):: c, gam, dz, wt real:: bet(is:ie) @@ -655,22 +655,23 @@ subroutine imp_diff_w(j, is, ie, js, je, ng, km, cd, delz, ws, w, w3) do i=is,ie gam(i,km) = c(i,km-1) / bet(i) a = cd/(dz(i,km)*delz(i,km)) - wt(i,km) = (w(i,km) + 2.*ws(i)*cd/delz(i,km)**2 & + w(i,km) = (w(i,km) + 2.*ws(i)*cd/delz(i,km)**2 & + a*wt(i,km-1))/(1. + a + (cd+cd)/delz(i,km)**2 + a*gam(i,km)) enddo do k=km-1,1,-1 do i=is,ie - wt(i,k) = wt(i,k) - gam(i,k+1)*wt(i,k+1) - enddo - enddo - - do k=1,km - do i=is,ie - w3(i,j,k) = wt(i,k) + w(i,k) = wt(i,k) - gam(i,k+1)*w(i,k+1) enddo enddo +!!$ +!!$ do k=1,km +!!$ do i=is,ie +!!$ w3(i,j,k) = wt(i,k) +!!$ enddo +!!$ enddo +!!$ end subroutine imp_diff_w @@ -1681,6 +1682,72 @@ subroutine edge_profile(q1, q2, q1e, q2e, i1, i2, j1, j2, j, km, dp0, uniform_gr end subroutine edge_profile + subroutine edge_profile1(q1, q1e, i1, i2, km, dp0, limiter) +! Edge profiles for a single scalar quantity + integer, intent(in):: i1, i2 + integer, intent(in):: km + integer, intent(in):: limiter + real, intent(in):: dp0(km) + real, intent(in), dimension(i1:i2,km):: q1 + real, intent(out), dimension(i1:i2,km+1):: q1e +!----------------------------------------------------------------------- + real, dimension(i1:i2,km+1):: qe1, gam ! edge values + real gak(km) + real bet, r2o3, r4o3 + real g0, gk, xt1, xt2, a_bot + integer i, k + +! Assuming grid varying in vertical only + g0 = dp0(2) / dp0(1) + xt1 = 2.*g0*(g0+1. ) + bet = g0*(g0+0.5) + do i=i1,i2 + qe1(i,1) = ( xt1*q1(i,1) + q1(i,2) ) / bet + gam(i,1) = ( 1. + g0*(g0+1.5) ) / bet + enddo + + do k=2,km + gk = dp0(k-1) / dp0(k) + do i=i1,i2 + bet = 2. + 2.*gk - gam(i,k-1) + qe1(i,k) = ( 3.*(q1(i,k-1)+gk*q1(i,k)) - qe1(i,k-1) ) / bet + gam(i,k) = gk / bet + enddo + enddo + + a_bot = 1. + gk*(gk+1.5) + xt1 = 2.*gk*(gk+1.) + do i=i1,i2 + xt2 = gk*(gk+0.5) - a_bot*gam(i,km) + qe1(i,km+1) = ( xt1*q1(i,km) + q1(i,km-1) - a_bot*qe1(i,km) ) / xt2 + enddo + + do k=km,1,-1 + do i=i1,i2 + qe1(i,k) = qe1(i,k) - gam(i,k)*qe1(i,k+1) + enddo + enddo + +!------------------ +! Apply constraints +!------------------ + if ( limiter/=0 ) then ! limit the top & bottom winds + do i=i1,i2 +! Top + if ( q1(i,1)*qe1(i,1) < 0. ) qe1(i,1) = 0. +! Surface: + if ( q1(i,km)*qe1(i,km+1) < 0. ) qe1(i,km+1) = 0. + enddo + endif + + do k=1,km+1 + do i=i1,i2 + q1e(i,k) = qe1(i,k) + enddo + enddo + + end subroutine edge_profile1 + subroutine nh_bc(ptop, grav, kappa, cp, delp, delzBC, pt, phis, & #ifdef USE_COND q_con, & diff --git a/model/sw_core.F90 b/model/sw_core.F90 index 69f0c02e1..b1c21facd 100644 --- a/model/sw_core.F90 +++ b/model/sw_core.F90 @@ -25,6 +25,7 @@ module sw_core_mod use fv_mp_mod, only: fill_corners, XDir, YDir use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type, fv_flags_type use a2b_edge_mod, only: a2b_ord4 + use mpp_mod, only: mpp_pe !DEBUG #ifdef SW_DYNAMICS use test_cases_mod, only: test_case @@ -67,7 +68,7 @@ module sw_core_mod real, parameter:: b3 = -13./60. real, parameter:: b4 = 0.45 real, parameter:: b5 = -0.05 - + real, parameter :: smag_scalar = r3 private public :: c_sw, d_sw, fill_4corners, del6_vt_flux, divergence_corner, divergence_corner_nest @@ -537,10 +538,13 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & !--- real :: fx2(bd%isd:bd%ied+1,bd%jsd:bd%jed) real :: fy2(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real :: fx3(bd%isd:bd%ied+1,bd%jsd:bd%jed) + real :: fy3(bd%isd:bd%ied, bd%jsd:bd%jed+1) real :: dw(bd%is:bd%ie,bd%js:bd%je) ! work array !--- real, dimension(bd%is:bd%ie+1,bd%js:bd%je+1):: ub, vb real :: wk(bd%isd:bd%ied,bd%jsd:bd%jed) ! work array + real :: smag_q(bd%isd:bd%ied,bd%jsd:bd%jed) real :: ke(bd%isd:bd%ied+1,bd%jsd:bd%jed+1) ! needs this for corner_comm real :: vort(bd%isd:bd%ied,bd%jsd:bd%jed) ! Vorticity real :: fx(bd%is:bd%ie+1,bd%js:bd%je ) ! 1-D X-direction Fluxes @@ -908,7 +912,6 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo enddo - call fv_tp_2d(delp, crx_adv, cry_adv, npx, npy, hord_dp, fx, fy, & xfx_adv,yfx_adv, gridstruct, bd, ra_x, ra_y, flagstruct%lim_fac, nord=nord_v, damp_c=damp_v) @@ -956,14 +959,14 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & endif enddo enddo - endif - call fv_tp_2d(w, crx_adv,cry_adv, npx, npy, hord_vt, gx, gy, xfx_adv, yfx_adv, & + endif + call fv_tp_2d(w, crx_adv,cry_adv, npx, npy, hord_vt, gx, gy, xfx_adv, yfx_adv, & gridstruct, bd, ra_x, ra_y, flagstruct%lim_fac, mfx=fx, mfy=fy) - do j=js,je - do i=is,ie - w(i,j) = delp(i,j)*w(i,j) + (gx(i,j)-gx(i+1,j)+gy(i,j)-gy(i,j+1))*rarea(i,j) - enddo - enddo + do j=js,je + do i=is,ie + w(i,j) = delp(i,j)*w(i,j) + (gx(i,j)-gx(i+1,j)+gy(i,j)-gy(i,j+1))*rarea(i,j) + enddo + enddo endif #ifdef USE_COND @@ -1553,7 +1556,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & end subroutine d_sw - subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) + subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd, damp_Km) ! Del-nord damping for the relative vorticity ! nord must be <= 2 !------------------ @@ -1568,6 +1571,8 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) type(fv_grid_bounds_type), intent(IN) :: bd real, intent(inout):: q(bd%isd:bd%ied, bd%jsd:bd%jed) ! rel. vorticity ghosted on input type(fv_grid_type), intent(IN), target :: gridstruct + real, OPTIONAL, intent(in) :: damp_Km(bd%isd:bd%ied,bd%jsd:bd%jed) ! variable diffusion coeff for scalars + ! First try adapts cell-centered eddy diffusivities ! Work arrays: real, intent(out):: d2(bd%isd:bd%ied, bd%jsd:bd%jed) real, intent(out):: fx2(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy2(bd%isd:bd%ied,bd%jsd:bd%jed+1) @@ -1666,6 +1671,20 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) enddo endif + if (present(damp_Km)) then !Coefficient multiplied in earlier + do j=js,je + do i=is,ie+1 + fx2(i,j) = fx2(i,j)*0.5*damp_km(i,j) + enddo + enddo + do j=js,je+1 + do i=is,ie + fy2(i,j) = fy2(i,j)*0.5*damp_km(i,j) + enddo + enddo + + endif + end subroutine del6_vt_flux @@ -1956,6 +1975,133 @@ subroutine smag_corner(dt, u, v, ua, va, smag_c, bd, npx, npy, gridstruct, ng) end subroutine smag_corner + subroutine smag_cell(dt, u, v, ua, va, smag_q, bd, npx, npy, gridstruct, ng, do_smag, dudz, dvdz, smag2d) +! Compute the cell-mean Tension_Shear strain for Smagorinsky diffusion +!!! works only if (grid_type==4) (need to add corner handling on cubed sphere) +!!! Next want to add in vertical shear terms + !!! To complete the calculation + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(in):: dt, smag2d + integer, intent(IN) :: npx, npy, ng + real, intent(in), dimension(bd%isd:bd%ied, bd%jsd:bd%jed+1):: u + real, intent(in), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ):: v + real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: ua, va + real, intent(out), dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: smag_q + type(fv_grid_type), intent(IN), target :: gridstruct + logical, intent(in) :: do_smag + real , intent(IN) :: dudz(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real , intent(IN) :: dvdz(bd%isd:bd%ied+1,bd%jsd:bd%jed) + +! local + real:: ut(bd%isd:bd%ied+1,bd%jsd:bd%jed) + real:: vt(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real:: wk(bd%isd:bd%ied,bd%jsd:bd%jed) ! work array + real:: sh(bd%isd:bd%ied,bd%jsd:bd%jed) + integer i,j + integer is2, ie1 + real smag_limit + + real, pointer, dimension(:,:) :: dxc, dyc, dx, dy, rarea, rarea_c + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + dxc => gridstruct%dxc + dyc => gridstruct%dyc + dx => gridstruct%dx + dy => gridstruct%dy + rarea => gridstruct%rarea + rarea_c => gridstruct%rarea_c + + is2 = max(2,is); ie1 = min(npx-1,ie+1) + + if (smag2d > 1.e-3) then + smag_limit = 0.20/smag2d + elseif (do_smag) then + smag_q = 0.0 + return + endif + +! Smag = sqrt [ T**2 + S**2 ]: unit = 1/s +! where T = du/dx - dv/dy; S = du/dy + dv/dx +! Compute tension strain at corners: + do j=js-1,je+2 + do i=is-2,ie+2 + ut(i,j) = u(i,j)*dyc(i,j) + enddo + enddo + do j=js-2,je+2 + do i=is-1,ie+2 + vt(i,j) = v(i,j)*dxc(i,j) + enddo + enddo + do j=js-1,je+2 + do i=is-1,ie+2 + wk(i,j) = rarea_c(i,j)*(vt(i,j-1)-vt(i,j)-ut(i-1,j)+ut(i,j)) + enddo + enddo +! Fix the corners?? if grid_type /= 4 + do j=js-1,je+1 + do i=is-1,ie+1 + smag_q(i,j) = 0.25*(wk(i,j) + wk(i,j+1) + wk(i+1,j) + wk(i+1,j+1)) + enddo + enddo + + if (do_smag) then + do j=js-1,je+1 + do i=is-1,ie+1 + smag_q(i,j) = smag_q(i,j) - 0.5*(dvdz(i,j-1)+dvdz(i,j)) + smag_q(i,j) = smag_q(i,j) + 0.5*(dudz(i-1,j)+dudz(i,j)) + enddo + enddo + endif + +! Compute shear strain: + do j=js-1,je+2 + do i=is-1,ie+1 + vt(i,j) = u(i,j)*dx(i,j) + enddo + enddo + do j=js-1,je+1 + do i=is-1,ie+2 + ut(i,j) = v(i,j)*dy(i,j) + enddo + enddo + + do j=js-1,je+1 + do i=is-1,ie+1 + wk(i,j) = rarea(i,j)*(vt(i,j)-vt(i,j+1)+ut(i,j)-ut(i+1,j)) + enddo + enddo + if (do_smag) then + do j=js-1,je+1 + do i=is-1,ie+1 + wk(i,j) = wk(i,j) - 0.5*(dvdz(i-1,j)+dvdz(i,j)) + wk(i,j) = wk(i,j) - 0.5*(dudz(i,j-1)+dudz(i,j)) + smag_q(i,j) = min(dt*sqrt( wk(i,j)**2 + smag_q(i,j)**2 ), smag_limit) + enddo + enddo + else + do j=js-1,je+1 + do i=is-1,ie+1 + smag_q(i,j) = dt*sqrt( wk(i,j)**2 + smag_q(i,j)**2 ) + enddo + enddo + endif + + end subroutine smag_cell + + subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, npy, grid_type, bounded_domain, lim_fac) integer, intent(in):: is,ie,js,je, isd,ied,jsd,jed diff --git a/model/tp_core.F90 b/model/tp_core.F90 index 89d2b14e8..4ab2164a6 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -78,7 +78,8 @@ module tp_core_mod contains subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & - gridstruct, bd, ra_x, ra_y, lim_fac, mfx, mfy, mass, nord, damp_c) + gridstruct, bd, ra_x, ra_y, lim_fac, mfx, mfy, & + mass, nord, damp_c, damp_smag, damp_Km) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy integer, intent(in)::hord @@ -102,6 +103,9 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & real, OPTIONAL, intent(in):: mass(bd%isd:bd%ied,bd%jsd:bd%jed) real, OPTIONAL, intent(in):: damp_c integer, OPTIONAL, intent(in):: nord + real, OPTIONAL, intent(in) :: damp_smag ! additional 2nd-order flux + real, OPTIONAL, intent(in) :: damp_Km(bd%isd:bd%ied,bd%jsd:bd%jed) ! variable diffusion coeff for scalars + ! First try adapts cell-centered eddy diffusivities ! Local: integer ord_ou, ord_in real q_i(bd%isd:bd%ied,bd%js:bd%je) @@ -194,7 +198,13 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & damp = (damp_c * gridstruct%da_min)**(nord+1) call deln_flux(nord, is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd, mass ) endif - endif + endif + if (present(damp_smag) .and. present(damp_Km) .and. present(mass)) then + if (damp_smag > 1.e-3) then + damp = damp_smag * gridstruct%da_min !2nd order + call deln_flux(0, is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd, mass, damp_Km=damp_Km ) + endif + endif else !--------------------------------- ! For transport of delp, vorticity @@ -215,6 +225,12 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & call deln_flux(nord, is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd) endif endif + if (present(damp_smag) .and. present(damp_Km)) then + if (damp_smag > 1.e-3) then + damp = damp_smag * gridstruct%da_min !2nd order + call deln_flux(0, is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd, damp_Km=damp_Km) + endif + endif endif end subroutine fv_tp_2d @@ -1209,7 +1225,7 @@ subroutine pert_ppm(im, a0, al, ar, iv) end subroutine pert_ppm !TODO lmh 25may18: Need to ensure copy_corners is just ignored if not a global domain - subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd, mass ) + subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd, mass, damp_Km ) ! Del-n damping for the cell-mean values (A grid) !------------------ ! nord = 0: del-2 @@ -1224,6 +1240,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd real, intent(in):: q(bd%isd:bd%ied, bd%jsd:bd%jed) ! q ghosted on input type(fv_grid_type), intent(IN), target :: gridstruct real, optional, intent(in):: mass(bd%isd:bd%ied, bd%jsd:bd%jed) ! q ghosted on input + real, OPTIONAL, intent(in) :: damp_Km(bd%isd:bd%ied,bd%jsd:bd%jed) ! variable diffusion coeff for scalars ! diffusive fluxes: real, intent(inout):: fx(bd%is:bd%ie+1,bd%js:bd%je), fy(bd%is:bd%ie,bd%js:bd%je+1) ! local: @@ -1245,7 +1262,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd i1 = is-1-nord; i2 = ie+1+nord j1 = js-1-nord; j2 = je+1+nord - if ( .not. present(mass) ) then + if ( .not. present(mass) .and. .not. present(damp_Km) ) then do j=j1, j2 do i=i1,i2 d2(i,j) = damp*q(i,j) @@ -1332,7 +1349,21 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd !--------------------------------------------- if ( present(mass) ) then -! Apply mass weighting to diffusive fluxes: + ! Apply mass weighting to diffusive fluxes: + if (present(damp_Km)) then + do j=js,je + do i=is,ie+1 + damp2 = 0.25*damp*(damp_km(i-1,j)+damp_km(i,j)) + fx(i,j) = fx(i,j) + damp2*(mass(i-1,j)+mass(i,j))*fx2(i,j) + enddo + enddo + do j=js,je+1 + do i=is,ie + damp2 = 0.25*damp*(damp_km(i,j-1)+damp_km(i,j)) + fy(i,j) = fy(i,j) + damp2*(mass(i,j-1)+mass(i,j))*fy2(i,j) + enddo + enddo + else damp2 = 0.5*damp do j=js,je do i=is,ie+1 @@ -1344,7 +1375,23 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd fy(i,j) = fy(i,j) + damp2*(mass(i,j-1)+mass(i,j))*fy2(i,j) enddo enddo + endif else + if (present(damp_Km)) then + do j=js,je + do i=is,ie+1 + damp2 = 0.25*damp*(damp_km(i-1,j)+damp_km(i,j)) + fx(i,j) = fx(i,j) + damp2*fx2(i,j) + enddo + enddo + do j=js,je+1 + do i=is,ie + damp2 = 0.25*damp*(damp_km(i,j-1)+damp_km(i,j)) + fy(i,j) = fy(i,j) + damp2*fy2(i,j) + enddo + enddo + else + do j=js,je do i=is,ie+1 fx(i,j) = fx(i,j) + fx2(i,j) @@ -1355,6 +1402,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd fy(i,j) = fy(i,j) + fy2(i,j) enddo enddo + endif endif end subroutine deln_flux diff --git a/tools/external_aero.F90 b/tools/external_aero.F90 index 3f48fa1f9..97dc6c986 100644 --- a/tools/external_aero.F90 +++ b/tools/external_aero.F90 @@ -325,7 +325,7 @@ subroutine read_aero(is, ie, js, je, npz, nq, Time, pe, peln, qa, kord_tr, fill) call map1_q2 (nlev, aero_now_pe (is:ie, j, :), aero_now_a (is:ie, js:je, :), & npz, pe (is:ie, :, j), qa (is:ie, j, :, aero_id), & pe (is:ie, 2:npz+1, j) - pe (is:ie, 1:npz, j), & - is, ie, 0, kord_tr, j, is, ie, js, je, 0., .false.) + is, ie, 0, kord_tr, j, is, ie, js, je, 0.) if (fill) call fillz (ie-is+1, npz, 1, qa (is:ie, j, :, aero_id), & pe (is:ie, 2:npz+1, j) - pe (is:ie, 1:npz, j)) enddo diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index b69a6d9ad..4c91a29d2 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -34,7 +34,7 @@ module external_ic_mod FmsNetcdfFile_t, FmsNetcdfDomainFile_t, read_restart, & register_restart_field, register_axis, get_dimension_size, & get_variable_dimension_names, get_variable_num_dimensions - use mpp_mod, only: mpp_error, FATAL, NOTE, mpp_pe, mpp_root_pe + use mpp_mod, only: mpp_error, FATAL, NOTE, WARNING, mpp_pe, mpp_root_pe use mpp_mod, only: stdlog, input_nml_file, mpp_npes, mpp_get_current_pelist use mpp_parameter_mod, only: AGRID_PARAM=>AGRID use mpp_domains_mod, only: mpp_get_tile_id, domain2d, mpp_update_domains, NORTH, EAST @@ -55,7 +55,6 @@ module external_ic_mod use fv_surf_map_mod, only: surfdrv, FV3_zs_filter use fv_surf_map_mod, only: sgh_g, oro_g use fv_surf_map_mod, only: del2_cubed_sphere, del4_cubed_sphere - use fv_timing_mod, only: timing_on, timing_off use init_hydro_mod, only: p_var use fv_fill_mod, only: fillz use fv_eta_mod, only: set_eta, set_external_eta @@ -70,7 +69,7 @@ module external_ic_mod use boundary_mod, only: nested_grid_BC, extrapolation_BC use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_global_domain, mpp_get_compute_domain use fv_grid_utils_mod, only: cubed_a2d - + implicit none private @@ -154,9 +153,8 @@ subroutine get_external_ic( Atm, cold_start, icdir ) ! Read in the specified external dataset and do all the needed transformation if ( Atm%flagstruct%ncep_ic ) then nq = 1 - call timing_on('NCEP_IC') + if( is_master() ) write(*,*) 'Calling get_ncep_ic' call get_ncep_ic( Atm, nq ) - call timing_off('NCEP_IC') #ifdef FV_TRACERS if (.not. cold_start) then call fv_io_read_tracers( Atm ) @@ -164,18 +162,14 @@ subroutine get_external_ic( Atm, cold_start, icdir ) endif #endif elseif ( Atm%flagstruct%nggps_ic ) then - call timing_on('NGGPS_IC') + if( is_master() ) write(*,*) 'Calling get_nggps_ic' call get_nggps_ic( Atm ) - call timing_off('NGGPS_IC') elseif ( Atm%flagstruct%hrrrv3_ic ) then - call timing_on('HRRR_IC') + if( is_master() ) write(*,*) 'Calling get_hrrr_ic' call get_hrrr_ic( Atm ) - call timing_off('HRRR_IC') elseif ( Atm%flagstruct%ecmwf_ic ) then if( is_master() ) write(*,*) 'Calling get_ecmwf_ic' - call timing_on('ECMWF_IC') call get_ecmwf_ic( Atm ) - call timing_off('ECMWF_IC') else ! The following is to read in legacy lat-lon FV core restart file ! is Atm%q defined in all cases? @@ -183,10 +177,15 @@ subroutine get_external_ic( Atm, cold_start, icdir ) call get_fv_ic( Atm, nq ) endif + if (.not. (Atm%flagstruct%ncep_ic .or. Atm%flagstruct%nggps_ic) .and. Atm%flagstruct%fv_land) then + call mpp_error(FATAL, "fv_land = .true. only supported for ncep_ic, nggps_ic, restart run with n_zs_filter > 0, or idealized test.") + endif + call prt_mxm('PS', Atm%ps, is, ie, js, je, ng, 1, 0.01, Atm%gridstruct%area_64, Atm%domain) call prt_mxm('T', Atm%pt, is, ie, js, je, ng, Atm%npz, 1., Atm%gridstruct%area_64, Atm%domain) if (.not.Atm%flagstruct%hydrostatic) call prt_mxm('W', Atm%w, is, ie, js, je, ng, Atm%npz, 1., Atm%gridstruct%area_64, Atm%domain) call prt_mxm('SPHUM', Atm%q(:,:,:,1), is, ie, js, je, ng, Atm%npz, 1., Atm%gridstruct%area_64, Atm%domain) + if ( Atm%flagstruct%nggps_ic .or. Atm%flagstruct%ecmwf_ic .or. Atm%flagstruct%hrrrv3_ic ) then sphum = get_tracer_index(MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') @@ -229,8 +228,9 @@ subroutine get_cubed_sphere_terrain( Atm ) real, allocatable :: g_dat2(:,:,:) real, allocatable :: pt_coarse(:,:,:) integer isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg,jeg + integer :: i,j - integer :: is, ie, js, je + integer :: is, ie, js, je integer :: isd, ied, jsd, jed, ng is = Atm%bd%is @@ -253,16 +253,24 @@ subroutine get_cubed_sphere_terrain( Atm ) call read_data(Fv_core, 'phis', Atm%phis(is:ie,js:je)) call close_file(Fv_core) else - call mpp_error(NOTE, fname//' not found; generating terrain from USGS data') + call mpp_error(NOTE, fname//' not found (forgot your restart files?); generating terrain from USGS data') call surfdrv( Atm%npx, Atm%npy, Atm%gridstruct%grid_64, Atm%gridstruct%agrid_64, & - Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & - Atm%gridstruct%dxa, Atm%gridstruct%dya, & - Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & - Atm%phis, Atm%flagstruct%stretch_fac, & - Atm%neststruct%nested, Atm%gridstruct%bounded_domain, & - Atm%neststruct%npx_global, Atm%domain, & - Atm%flagstruct%grid_number, Atm%bd ) + Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxa, Atm%gridstruct%dya, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & + Atm%phis, Atm%flagstruct%stretch_fac, & + Atm%neststruct%nested, Atm%gridstruct%bounded_domain, & + Atm%neststruct%npx_global, Atm%domain, & + Atm%flagstruct%grid_number, Atm%bd ) + if ( Atm%flagstruct%fv_land ) then + do j=js,je + do i=is,ie + Atm%sgh(i,j) = sgh_g(i,j) + Atm%oro(i,j) = oro_g(i,j) + enddo + enddo endif + endif !Needed for reproducibility. DON'T REMOVE THIS!! @@ -302,7 +310,7 @@ subroutine get_nggps_ic (Atm) type(fv_atmos_type), intent(inout) :: Atm ! local: real, dimension(:), allocatable:: ak, bk - real, dimension(:,:), allocatable:: wk2, ps, oro_g + real, dimension(:,:), allocatable:: wk2, ps, oro_ic real, dimension(:,:,:), allocatable:: ud, vd, u_w, v_w, u_s, v_s, omga, temp real, dimension(:,:,:), allocatable:: zh(:,:,:) ! 3D height at 65 edges real, dimension(:,:,:,:), allocatable:: q @@ -452,13 +460,13 @@ subroutine get_nggps_ic (Atm) endif if ( Atm%flagstruct%full_zs_filter) then - allocate (oro_g(isd:ied,jsd:jed)) - oro_g = 0. + allocate (oro_ic(isd:ied,jsd:jed)) + oro_ic = 0. ! land-frac - call register_restart_field(ORO_restart, 'land_frac', oro_g, dim_names_2d) - call mpp_update_domains(oro_g, Atm%domain) + call register_restart_field(ORO_restart, 'land_frac', oro_ic, dim_names_2d) + call mpp_update_domains(oro_ic, Atm%domain) if (Atm%neststruct%nested) then - call extrapolation_BC(oro_g, 0, 0, Atm%npx, Atm%npy, Atm%bd, .true.) + call extrapolation_BC(oro_ic, 0, 0, Atm%npx, Atm%npy, Atm%bd, .true.) endif endif @@ -583,8 +591,8 @@ subroutine get_nggps_ic (Atm) Atm%gridstruct%area_64, Atm%gridstruct%dxa, Atm%gridstruct%dya, & Atm%gridstruct%dx, Atm%gridstruct%dy, Atm%gridstruct%dxc, & Atm%gridstruct%dyc, Atm%gridstruct%grid_64, Atm%gridstruct%agrid_64, & - Atm%gridstruct%sin_sg, Atm%phis, oro_g) - deallocate(oro_g) + Atm%gridstruct%sin_sg, Atm%phis, oro_ic) + deallocate(oro_ic) endif @@ -595,7 +603,7 @@ subroutine get_nggps_ic (Atm) Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & Atm%flagstruct%n_zs_filter, cnst_0p20*Atm%gridstruct%da_min, & - .false., oro_g, Atm%gridstruct%bounded_domain, & + .false., oro_ic, Atm%gridstruct%bounded_domain, & Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & Atm%flagstruct%n_zs_filter, ' times' @@ -603,7 +611,7 @@ subroutine get_nggps_ic (Atm) call del4_cubed_sphere(Atm%npx, Atm%npy, Atm%phis, Atm%gridstruct%area_64, & Atm%gridstruct%dx, Atm%gridstruct%dy, & Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & - Atm%flagstruct%n_zs_filter, .false., oro_g, & + Atm%flagstruct%n_zs_filter, .false., oro_ic, & Atm%gridstruct%bounded_domain, & Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & @@ -824,7 +832,7 @@ subroutine get_hrrr_ic (Atm) type(fv_atmos_type), intent(inout) :: Atm ! local: real, dimension(:), allocatable:: ak, bk - real, dimension(:,:), allocatable:: wk2, ps, oro_g + real, dimension(:,:), allocatable:: wk2, ps, oro_ic real, dimension(:,:,:), allocatable:: ud, vd, u_w, v_w, u_s, v_s, w, t real, dimension(:,:,:), allocatable:: zh ! 3D height at 51 edges real, dimension(:,:,:,:), allocatable:: q @@ -963,11 +971,11 @@ subroutine get_hrrr_ic (Atm) endif if ( Atm%flagstruct%full_zs_filter) then - allocate (oro_g(isd:ied,jsd:jed)) - oro_g = 0. + allocate (oro_ic(isd:ied,jsd:jed)) + oro_ic = 0. ! land-frac - call register_restart_field(ORO_restart, 'land_frac', oro_g, dim_names_2d) - call mpp_update_domains(oro_g, Atm%domain) + call register_restart_field(ORO_restart, 'land_frac', oro_ic, dim_names_2d) + call mpp_update_domains(oro_ic, Atm%domain) endif if ( Atm%flagstruct%fv_land ) then @@ -1114,8 +1122,8 @@ subroutine get_hrrr_ic (Atm) Atm%gridstruct%area_64, Atm%gridstruct%dxa, Atm%gridstruct%dya, & Atm%gridstruct%dx, Atm%gridstruct%dy, Atm%gridstruct%dxc, & Atm%gridstruct%dyc, Atm%gridstruct%grid_64, Atm%gridstruct%agrid_64, & - Atm%gridstruct%sin_sg, Atm%phis, oro_g) - deallocate(oro_g) + Atm%gridstruct%sin_sg, Atm%phis, oro_ic) + deallocate(oro_ic) endif @@ -1126,7 +1134,7 @@ subroutine get_hrrr_ic (Atm) Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & Atm%flagstruct%n_zs_filter, cnst_0p20*Atm%gridstruct%da_min, & - .false., oro_g, Atm%gridstruct%bounded_domain, & + .false., oro_ic, Atm%gridstruct%bounded_domain, & Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & Atm%flagstruct%n_zs_filter, ' times' @@ -1134,7 +1142,7 @@ subroutine get_hrrr_ic (Atm) call del4_cubed_sphere(Atm%npx, Atm%npy, Atm%phis, Atm%gridstruct%area_64, & Atm%gridstruct%dx, Atm%gridstruct%dy, & Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & - Atm%flagstruct%n_zs_filter, .false., oro_g, & + Atm%flagstruct%n_zs_filter, .false., oro_ic, & Atm%gridstruct%bounded_domain, & Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & @@ -1757,7 +1765,7 @@ subroutine get_ecmwf_ic( Atm ) real(kind=4), allocatable:: uec(:,:,:), vec(:,:,:), tec(:,:,:), wec(:,:,:) real(kind=4), allocatable:: psec(:,:), zsec(:,:), zhec(:,:,:), qec(:,:,:,:) real(kind=4), allocatable:: psc(:,:) - real(kind=4), allocatable:: sphumec(:,:,:) + real(kind=4), allocatable:: sphumec(:,:,:),o3ec(:,:,:) real, allocatable:: psc_r8(:,:), zhc(:,:,:), qc(:,:,:,:) real, allocatable:: lat(:), lon(:), ak0(:), bk0(:) real, allocatable:: pt_c(:,:,:), pt_d(:,:,:) @@ -1881,48 +1889,53 @@ subroutine get_ecmwf_ic( Atm ) if(is_master()) write(*,*) 'done reading model terrain from oro_data.nc' call mpp_update_domains( Atm%phis, Atm%domain ) -!! Read in o3mr, ps and zh from GFS_data.tile?.nc - allocate (o3mr_gfs(is:ie,js:je,levp_gfs)) - allocate (ps_gfs(is:ie,js:je)) - allocate (zh_gfs(is:ie,js:je,levp_gfs+1)) - - if( open_file(GFS_restart, fn_gfs_ics, "read", Atm%domain_for_read, is_restart=.true., dont_add_res_to_filename=.true.) ) then - call register_axis(GFS_restart, "lat", "y") - call register_axis(GFS_restart, "lon", "x") - call register_axis(GFS_restart, "lev", size(o3mr_gfs,3)) - call register_axis(GFS_restart, "levp", size(zh_gfs,3)) - call register_restart_field(GFS_restart, 'o3mr', o3mr_gfs, dim_names_3d3, is_optional=.true.) - call register_restart_field(GFS_restart, 'ps', ps_gfs, dim_names_2d) - call register_restart_field(GFS_restart, 'zh', zh_gfs, dim_names_3d4) - call read_restart(GFS_restart) - call close_file(GFS_restart) - endif - ! Get GFS ak, bk for o3mr vertical interpolation - allocate (wk2(levp_gfs+1,2)) - allocate (ak_gfs(levp_gfs+1)) - allocate (bk_gfs(levp_gfs+1)) - allocate(pes(mpp_npes())) - call mpp_get_current_pelist(pes) - if( open_file(Gfs_ctl, fn_gfs_ctl, "read", pelist=pes) ) then - call read_data(Gfs_ctl,'vcoord',wk2) - call close_file(Gfs_ctl) - endif - deallocate(pes) - ak_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,1) - bk_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,2) - deallocate (wk2) +!! No O3 in IFS IC before the DIMOSIC period (201806). + if ( Atm%flagstruct%use_gfsO3 ) then + if( is_master() ) write(*,*) 'using GFS O3 with other ECMWF ICs:' + !! Read in o3mr, ps and zh from GFS_data.tile?.nc + allocate (o3mr_gfs(is:ie,js:je,levp_gfs)) + allocate (ps_gfs(is:ie,js:je)) + allocate (zh_gfs(is:ie,js:je,levp_gfs+1)) + + if( open_file(GFS_restart, fn_gfs_ics, "read", Atm%domain_for_read, is_restart=.true., dont_add_res_to_filename=.true.) ) then + call register_axis(GFS_restart, "lat", "y") + call register_axis(GFS_restart, "lon", "x") + call register_axis(GFS_restart, "lev", size(o3mr_gfs,3)) + call register_axis(GFS_restart, "levp", size(zh_gfs,3)) + call register_restart_field(GFS_restart, 'o3mr', o3mr_gfs, dim_names_3d3, is_optional=.true.) + call register_restart_field(GFS_restart, 'ps', ps_gfs, dim_names_2d) + call register_restart_field(GFS_restart, 'zh', zh_gfs, dim_names_3d4) + call read_restart(GFS_restart) + call close_file(GFS_restart) + endif + + ! Get GFS ak, bk for o3mr vertical interpolation + allocate (wk2(levp_gfs+1,2)) + allocate (ak_gfs(levp_gfs+1)) + allocate (bk_gfs(levp_gfs+1)) + allocate(pes(mpp_npes())) + call mpp_get_current_pelist(pes) + if( open_file(Gfs_ctl, fn_gfs_ctl, "read", pelist=pes) ) then + call read_data(Gfs_ctl,'vcoord',wk2) + call close_file(Gfs_ctl) + endif + deallocate(pes) + ak_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,1) + bk_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,2) + deallocate (wk2) - if ( bk_gfs(1) < 1.E-9 ) ak_gfs(1) = max(1.e-9, ak_gfs(1)) + if ( bk_gfs(1) < 1.E-9 ) ak_gfs(1) = max(1.e-9, ak_gfs(1)) - iq = o3mr - if(is_master()) write(*,*) 'Reading o3mr from GFS_data.nc:' - if(is_master()) write(*,*) 'o3mr =', iq - call remap_scalar_single(Atm, levp_gfs, npz, ak_gfs, bk_gfs, ps_gfs, o3mr_gfs, zh_gfs, iq) + iq = o3mr + if(is_master()) write(*,*) 'Reading o3mr from GFS_data.nc:' + if(is_master()) write(*,*) 'o3mr =', iq + call remap_scalar_single(Atm, levp_gfs, npz, ak_gfs, bk_gfs, ps_gfs, o3mr_gfs, zh_gfs, iq) - deallocate (ak_gfs, bk_gfs) - deallocate (ps_gfs, zh_gfs) - deallocate (o3mr_gfs) + deallocate (ak_gfs, bk_gfs) + deallocate (ps_gfs, zh_gfs) + deallocate (o3mr_gfs) + endif !! Start to read EC data fname = Atm%flagstruct%res_latlon_dynamics @@ -1978,7 +1991,7 @@ subroutine get_ecmwf_ic( Atm ) if ( bk0(1) < 1.E-9 ) ak0(1) = max(1.e-9, ak0(1)) else - call mpp_error(FATAL,'==> Error in get_external_ic: Expected file '//trim(fname)//' for NCEP IC does not exist') + call mpp_error(FATAL,'==> Error in get_external_ic: Expected file '//trim(fname)//' for ECMWF IC does not exist') endif ! Initialize lat-lon to Cubed bi-linear interpolation coeff: @@ -2024,6 +2037,17 @@ subroutine get_ecmwf_ic( Atm ) tec(:,:,:) = tec(:,:,:)*scale_value + offset if(is_master()) write(*,*) 'done reading tec' +! read in ozone: + if ( .not. Atm%flagstruct%use_gfsO3 ) then + allocate ( o3ec(1:im,jbeg:jend, 1:km) ) + + call get_var3_r4( ncid, 'o3', 1,im, jbeg,jend, 1,km, o3ec(:,:,:) ) + call get_var_att_double ( ncid, 'o3', 'scale_factor', scale_value ) + call get_var_att_double ( ncid, 'o3', 'add_offset', offset ) + o3ec(:,:,:) = o3ec(:,:,:)*scale_value + offset + if(is_master()) write(*,*) 'done reading o3mr ec' + endif + ! read in specific humidity: allocate ( sphumec(1:im,jbeg:jend, 1:km) ) @@ -2034,9 +2058,9 @@ subroutine get_ecmwf_ic( Atm ) if(is_master()) write(*,*) 'done reading sphum ec' ! Read in other tracers from EC data and remap them into cubic sphere grid: - allocate ( qec(1:im,jbeg:jend,1:km,5) ) + allocate ( qec(1:im,jbeg:jend,1:km,ntracers) ) - do n = 1, 5 + do n = 1, ntracers if (n == sphum) then qec(:,:,:,sphum) = sphumec(:,:,:) deallocate ( sphumec ) @@ -2064,8 +2088,12 @@ subroutine get_ecmwf_ic( Atm ) call get_var_att_double ( ncid, 'cswc', 'add_offset', offset ) qec(:,:,:,snowwat) = qec(:,:,:,snowwat)*scale_value + offset if(is_master()) write(*,*) 'done reading cswc ec' + else if (n == o3mr .and. (.not. Atm%flagstruct%use_gfsO3)) then + qec(:,:,:,o3mr) = o3ec(:,:,:) + deallocate ( o3ec ) else - if(is_master()) write(*,*) 'nq is more then 5!' + qec(:,:,:,n) = 0.0 + if(is_master()) write(*,*) 'tracer number = ', n, 'is not in the IFS IC.' endif enddo @@ -2126,10 +2154,10 @@ subroutine get_ecmwf_ic( Atm ) if(is_master()) write(*,*) 'done interpolate psec/zhec into cubic grid psc/zhc!' -! Read in other tracers from EC data and remap them into cubic sphere grid: - allocate ( qc(is:ie,js:je,km,6) ) +! Remap hydrometeor tracers and ozone (if not using GFS ozone) from EC grid into cubic sphere grid: + allocate ( qc(is:ie,js:je,km,ntracers) ) - do n = 1, 5 + do n = 1, ntracers !$OMP parallel do default(none) shared(n,is,ie,js,je,km,s2c,id1,id2,jdc,qc,qec) & !$OMP private(i1,i2,j1) do k=1,km @@ -2145,11 +2173,10 @@ subroutine get_ecmwf_ic( Atm ) enddo enddo - qc(:,:,:,graupel) = 0. ! note Graupel must be tracer #6 - deallocate ( qec ) if(is_master()) write(*,*) 'done interpolate tracers (qec) into cubic (qc)' + ! Read in vertical wind from EC data and remap them into cubic sphere grid: allocate ( wec(1:im,jbeg:jend, 1:km) ) allocate ( wc(is:ie,js:je,km)) @@ -2182,7 +2209,7 @@ subroutine get_ecmwf_ic( Atm ) psc_r8(:,:) = psc(:,:) deallocate ( psc ) - call remap_scalar(Atm, km, npz, 6, ak0, bk0, psc_r8, qc, zhc, wc) + call remap_scalar(Atm, km, npz, ntracers, ak0, bk0, psc_r8, qc, zhc, wc) call mpp_update_domains(Atm%phis, Atm%domain) if(is_master()) write(*,*) 'done remap_scalar' @@ -2352,7 +2379,7 @@ subroutine get_ecmwf_ic( Atm ) Atm%q(i,j,k,graupel)) endif m_fac = wt / qt - do iq=1,ntracers + do iq=1,Atm%flagstruct%nwat Atm%q(i,j,k,iq) = m_fac * Atm%q(i,j,k,iq) enddo Atm%delp(i,j,k) = qt @@ -2780,7 +2807,7 @@ subroutine remap_scalar(Atm, km, npz, ncnst, ak0, bk0, psc, qa, zh, omga, t_in) #endif !$OMP parallel do default(none) & -!$OMP shared(sphum,liq_wat,rainwat,ice_wat,snowwat,graupel,source_fv3gfs,& +!$OMP shared(sphum,o3mr,liq_wat,rainwat,ice_wat,snowwat,graupel,source_fv3gfs,& !$OMP cld_amt,ncnst,npz,is,ie,js,je,km,k2,ak0,bk0,psc,zh,omga,qa,Atm,z500,t_in) & !$OMP private(l,m,pst,pn,gz,pe0,pn0,pe1,pn1,dp2,qp,qn1,gz_fv) @@ -2863,7 +2890,15 @@ subroutine remap_scalar(Atm, km, npz, ncnst, ak0, bk0, psc, qa, zh, omga, t_in) ! The HiRam step of blending model sphum with NCEP data is obsolete because nggps is always cold starting... do k=1,npz do i=is,ie - Atm%q(i,j,k,iq) = qn1(i,k) + + if ( iq==o3mr ) then + if (.not. Atm%flagstruct%use_gfsO3) then + Atm%q(i,j,k,iq) = qn1(i,k) + endif + else + Atm%q(i,j,k,iq) = qn1(i,k) + endif + enddo enddo enddo @@ -3914,4 +3949,3 @@ end subroutine get_staggered_grid end module external_ic_mod - diff --git a/tools/fv_diag_column.F90 b/tools/fv_diag_column.F90 index 8c021a1e0..127df3022 100644 --- a/tools/fv_diag_column.F90 +++ b/tools/fv_diag_column.F90 @@ -175,6 +175,7 @@ subroutine read_column_table print*, ' read_column_table: error on line ', nline call mpp_error(FATAL,'error in column_table format') endif + else !debug or sonde record with specified lat-lon if (index(lowercase(record), "debug") .ne. 0 ) then if (num_diag_debug >= MAX_DIAG_COLUMN) continue @@ -241,7 +242,7 @@ subroutine find_diagnostic_column(diag_class, diag_names, diag_i, diag_j, diag_t !Index specified if (diag_i(m) >= -10 .and. diag_j(m) >= -10) then - if ((diag_tile(m) < 0 .or. diag_tile(m) > ntiles)) then + if ((diag_tile(m) < 0)) then if (ntiles > 1) then call mpp_error(FATAL, ' find_diagnostic_column: diag_tile must be specified for '//trim(diag_class)//' point '//trim(diag_names(m))//' since ntiles > 1') else @@ -377,12 +378,30 @@ subroutine debug_column(pt, delp, delz, u, v, w, q, npz, ncnst, sphum, nwat, zvi write(unit, '(A, I8, A, I6, A, I6, A, I3)') ' on processor # ', mpp_pe(), ' : local i = ', i, ', local j = ', j, ' tile = ', diag_debug_tile(n) write(unit, *) - write(unit,500) 'k', 'T', 'delp', 'delz', 'u', 'v', 'w', 'sphum', 'cond', 'pres', 'NHprime'!, 'pdry', 'NHpdry' - write(unit,500) ' ', 'K', 'mb', 'm', 'm/s', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb', 'mb'!, ! 'mb', 'mb' -500 format(A4, A7, A8, A6, A8, A8, A8, A8, A9, A9, A9) if (hydrostatic) then - call mpp_error(NOTE, 'Hydrostatic debug sounding not yet supported') + write(unit,500) 'k', 'T', 'delp', 'u', 'v', 'sphum', 'cond', 'pres' !, 'pdry', 'NHpdry' + write(unit,500) ' ', 'K', 'mb', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb' ! 'mb', 'mb' +500 format(A4, A7, A8, A8, A8, A8, A9, A9) + pehyd = ptop + do k=1,npz + pehyd(k+1) = pehyd(k) + delp(i,j,k) + preshyd(k) = (pehyd(k+1) - pehyd(k))/log(pehyd(k+1)/pehyd(k)) + enddo + + do k=max(diag_debug_kbottom-diag_debug_nlevels,1),min(diag_debug_kbottom,npz) + cond = 0. + do l=2,nwat + cond = cond + q(i,j,k,l) + enddo + write(unit,'(I4, F7.2, F8.3, F8.3, F8.3, F8.3, F9.5, F9.3)') & + k, pt(i,j,k), delp(i,j,k)*0.01, u(i,j,k), v(i,j,k), & + q(i,j,k,sphum)*1000., cond*1000., preshyd(k)*1.e-2!, presdry*1.e-2, (presdry-preshyddry(k))*1.e-2 + enddo + else + write(unit,501) 'k', 'T', 'delp', 'delz', 'u', 'v', 'w', 'sphum', 'cond', 'pres', 'NHprime'!, 'pdry', 'NHpdry' + write(unit,501) ' ', 'K', 'mb', 'm', 'm/s', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb', 'mb'!, ! 'mb', 'mb' +501 format(A4, A7, A8, A6, A8, A8, A8, A8, A9, A9, A9) pehyd = ptop pehyddry = ptop do k=1,npz @@ -411,7 +430,6 @@ subroutine debug_column(pt, delp, delz, u, v, w, q, npz, ncnst, sphum, nwat, zvi call flush(unit) - enddo end subroutine debug_column @@ -466,42 +484,68 @@ subroutine debug_column_dyn(pt, delp, delz, u, v, w, q, heat_source, cappa, akap write(unit, '(A, I8, A, I6, A, I6)') ' on processor # ', mpp_pe(), ' : local i = ', i, ', local j = ', j write(unit, *) - write(unit,500) 'k', 'T', 'delp', 'delz', 'u', 'v', 'w', 'sphum', 'cond', 'pres', 'NHprime', 'heat' - write(unit,500) ' ', 'K', 'mb', 'm', 'm/s', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb', 'mb', 'K' -500 format(A4, A7, A8, A6, A8, A8, A8, A8, A9, A9, A9, A8) - if (hydrostatic) then - call mpp_error(NOTE, 'Hydrostatic debug sounding not yet supported') - else - pehyd = ptop - do k=1,npz - pehyd(k+1) = pehyd(k) + delp(i,j,k) - preshyd(k) = (pehyd(k+1) - pehyd(k))/log(pehyd(k+1)/pehyd(k)) + if (hydrostatic) then + write(unit,501) 'k', 'T', 'delp', 'u', 'v', 'sphum', 'cond', 'pres' !, 'pdry', 'NHpdry' + write(unit,501) ' ', 'K', 'mb', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb' ! 'mb', 'mb' +501 format(A4, A7, A8, A8, A8, A8, A9, A9) + pehyd = ptop + do k=1,npz + pehyd(k+1) = pehyd(k) + delp(i,j,k) + preshyd(k) = (pehyd(k+1) - pehyd(k))/log(pehyd(k+1)/pehyd(k)) + enddo + + do k=max(diag_debug_kbottom-diag_debug_nlevels,1),min(diag_debug_kbottom,npz) + cond = 0. + do l=2,nwat + cond = cond + q(i,j,k,l) enddo - !do k=2*npz/3,npz - do k=max(diag_debug_kbottom-diag_debug_nlevels,1),min(diag_debug_kbottom,npz) - cond = 0. - do l=2,nwat - cond = cond + q(i,j,k,l) - enddo - virt = (1.+zvir*q(i,j,k,sphum)) + virt = (1.+zvir*q(i,j,k,sphum)) + !NOTE: Moist cappa not implemented for hydrostatic dynamics. + pk = exp(akap*log(preshyd(k))) + temp = pt(i,j,k)*pk/virt + if (use_heat_source) then + heats = heat_source(i,j,k) / (cp_air*delp(i,j,k)) + else + heats = 0.0 + endif + write(unit,'(I4, F7.2, F8.3, F8.3, F8.3, F8.3, F9.5, F9.3, 1x, G9.3)') & + k, temp, delp(i,j,k)*0.01, u(i,j,k), v(i,j,k), & + q(i,j,k,sphum)*1000., cond*1000., preshyd(k)*1.e-2, heats!, presdry*1.e-2, (presdry-preshyddry(k))*1.e-2 + enddo + else + write(unit,500) 'k', 'T', 'delp', 'delz', 'u', 'v', 'w', 'sphum', 'cond', 'pres', 'NHprime', 'heat' + write(unit,500) ' ', 'K', 'mb', 'm', 'm/s', 'm/s', 'm/s', 'g/kg', 'g/kg', 'mb', 'mb', 'K' +500 format(A4, A7, A8, A6, A8, A8, A8, A8, A9, A9, A9, A8) + pehyd = ptop + do k=1,npz + pehyd(k+1) = pehyd(k) + delp(i,j,k) + preshyd(k) = (pehyd(k+1) - pehyd(k))/log(pehyd(k+1)/pehyd(k)) + enddo + !do k=2*npz/3,npz + do k=max(diag_debug_kbottom-diag_debug_nlevels,1),min(diag_debug_kbottom,npz) + cond = 0. + do l=2,nwat + cond = cond + q(i,j,k,l) + enddo + virt = (1.+zvir*q(i,j,k,sphum)) #ifdef MOIST_CAPPA - pres = exp(1./(1.-cappa(i,j,k))*log(rdg*(delp(i,j,k)-cond)/delz(i,j,k)*pt(i,j,k)) ) - pk = exp(cappa(i,j,k)*log(pres)) + pres = exp(1./(1.-cappa(i,j,k))*log(rdg*(delp(i,j,k)-cond)/delz(i,j,k)*pt(i,j,k)) ) + pk = exp(cappa(i,j,k)*log(pres)) #else - pres = exp(1./(1.-akap)*log(rdg*(delp(i,j,k))/delz(i,j,k)*pt(i,j,k)) ) - pk = exp(akap*log(pres)) + pres = exp(1./(1.-akap)*log(rdg*(delp(i,j,k))/delz(i,j,k)*pt(i,j,k)) ) + pk = exp(akap*log(pres)) #endif - temp = pt(i,j,k)*pk/virt - if (use_heat_source) then - heats = heat_source(i,j,k) / (cv_air*delp(i,j,k)) - else - heats = 0.0 - endif - write(unit,'(I4, F7.2, F8.3, I6, F8.3, F8.3, F8.3, F8.3, F9.5, F9.3, F9.3, G9.3 )') & - k, temp, delp(i,j,k)*0.01, -int(delz(i,j,k)), u(i,j,k), v(i,j,k), w(i,j,k), & - q(i,j,k,sphum)*1000., cond*1000., pres*1.e-2, (pres-preshyd(k))*1.e-2, heats - enddo - endif + temp = pt(i,j,k)*pk/virt + if (use_heat_source) then + heats = heat_source(i,j,k) / (cv_air*delp(i,j,k)) + else + heats = 0.0 + endif + write(unit,'(I4, F7.2, F8.3, I6, F8.3, F8.3, F8.3, F8.3, F9.5, F9.3, F9.3, 1x, G9.3 )') & + k, temp, delp(i,j,k)*0.01, -int(delz(i,j,k)), u(i,j,k), v(i,j,k), w(i,j,k), & + q(i,j,k,sphum)*1000., cond*1000., pres*1.e-2, (pres-preshyd(k))*1.e-2, heats + enddo + endif write(unit, *) '===================================================================' write(unit, *) diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index e14e70554..daec854e3 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -91,12 +91,11 @@ module fv_diagnostics_mod real :: sphum_ll_fix = 0. real :: qcly0 ! initial value for terminator test - logical :: is_ideal_case = .false. public :: fv_diag_init, fv_time, fv_diag, prt_mxm, prt_maxmin, range_check public :: prt_mass, prt_minmax, ppme, fv_diag_init_gn, z_sum, sphum_ll_fix, eqv_pot, qcly0, gn public :: prt_height, prt_gb_nh_sh, interpolate_vertical, rh_calc, get_height_field, get_height_given_pressure - public :: cs3_interpolator, get_vorticity, is_ideal_case + public :: cs3_interpolator, get_vorticity ! needed by fv_nggps_diag public :: max_vv, max_uh, bunkers_vector, helicity_relative_CAPS public :: max_vorticity @@ -236,7 +235,9 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) exit endif enddo - if ( is_master() ) write(*,*) 'mp_top=', mp_top, 'pfull=', pfull(mp_top) + if ( Atm(1)%flagstruct%fv_debug .and. is_master() ) then + write(*,*) 'radar reflectivity: mp_top=', mp_top, 'pfull=', pfull(mp_top) + endif ! allocate(grid_xt(npx-1), grid_yt(npy-1), grid_xe(npx), grid_ye(npy-1), grid_xn(npx-1), grid_yn(npy)) allocate(grid_xt(npx-1), grid_yt(npy-1)) @@ -344,10 +345,10 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) levs = 0 #ifdef FEWER_PLEVS levs(1:nplev) = (/50,70,100,200,250,300,500,750,850,925,1000/) ! lmh mini-levs for MJO simulations - k100 = 2 - k200 = 3 - k300 = 5 - k500 = 6 + k100 = 3 + k200 = 4 + k300 = 6 + k500 = 7 #else levs(1:nplev) = (/1,2,3,5,7,10,20,30,50,70,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,925,950,975,1000/) k100 = 11 @@ -367,20 +368,20 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) endif endif levs(nplev+1:MAX_PLEVS) = -1. - if (abs(levs(k100)-100.) > 1.0) then - call mpp_error(NOTE, "fv_diag_plevs_nml: k100 set incorrectly, finding closest entry in plevs") + if (abs(levs(k100)-100.) > 10.) then + !call mpp_error(NOTE, "fv_diag_plevs_nml: k100 set incorrectly, finding closest entry in plevs") k100 = minloc(abs(levs(1:nplev)-100),1) endif - if (abs(levs(k200)-200.) > 1.0) then - call mpp_error(NOTE, "fv_diag_plevs_nml: k200 set incorrectly, finding closest entry in plevs") + if (abs(levs(k200)-200.) > 10.) then + !call mpp_error(NOTE, "fv_diag_plevs_nml: k200 set incorrectly, finding closest entry in plevs") k200 = minloc(abs(levs(1:nplev)-200),1) endif - if (abs(levs(k300)-300.) > 1.0) then - call mpp_error(NOTE, "fv_diag_plevs_nml: k300 set incorrectly, finding closest entry in plevs") + if (abs(levs(k300)-300.) > 10.) then + !call mpp_error(NOTE, "fv_diag_plevs_nml: k300 set incorrectly, finding closest entry in plevs") k300 = minloc(abs(levs(1:nplev)-300),1) endif - if (abs(levs(k500)-500.) > 1.0) then - call mpp_error(NOTE, "fv_diag_plevs_nml: k500 set incorrectly, finding closest entry in plevs") + if (abs(levs(k500)-500.) > 10.) then + !call mpp_error(NOTE, "fv_diag_plevs_nml: k500 set incorrectly, finding closest entry in plevs") k500 = minloc(abs(levs(1:nplev)-500),1) endif @@ -502,6 +503,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) #ifndef DYNAMICS_ZS if (id_zsurf > 0) used = send_data(id_zsurf, zsurf, Time) + call prt_mxm('ZS', zsurf, isc, iec, jsc, jec, 0, 1, 1.0, Atm(n)%gridstruct%area_64, Atm(n)%domain) #endif if ( Atm(n)%flagstruct%fv_land ) then if (id_zs > 0) used = send_data(id_zs , zs_g, Time) @@ -629,74 +631,6 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) 'snow precipitation', 'mm/day', missing_value=missing_value ) id_preg = register_diag_field ( trim(field), 'preg', axes(1:2), Time, & 'graupel precipitation', 'mm/day', missing_value=missing_value ) - id_prefluxw = register_diag_field ( trim(field), 'prefluxw', axes(1:3), Time, & - 'water precipitation flux', 'mm/day', missing_value=missing_value ) - id_prefluxr = register_diag_field ( trim(field), 'prefluxr', axes(1:3), Time, & - 'rain precipitation flux', 'mm/day', missing_value=missing_value ) - id_prefluxi = register_diag_field ( trim(field), 'prefluxi', axes(1:3), Time, & - 'ice precipitation flux', 'mm/day', missing_value=missing_value ) - id_prefluxs = register_diag_field ( trim(field), 'prefluxs', axes(1:3), Time, & - 'snow precipitation flux', 'mm/day', missing_value=missing_value ) - id_prefluxg = register_diag_field ( trim(field), 'prefluxg', axes(1:3), Time, & - 'graupel precipitation flux', 'mm/day', missing_value=missing_value ) - id_cond = register_diag_field ( trim(field), 'cond', axes(1:2), Time, & - 'condensation', 'mm/day', missing_value=missing_value ) - id_dep = register_diag_field ( trim(field), 'dep', axes(1:2), Time, & - 'deposition', 'mm/day', missing_value=missing_value ) - id_reevap = register_diag_field ( trim(field), 'reevap', axes(1:2), Time, & - 'evaporation', 'mm/day', missing_value=missing_value ) - id_sub = register_diag_field ( trim(field), 'sub', axes(1:2), Time, & - 'sublimation', 'mm/day', missing_value=missing_value ) - id_pcw = register_diag_field ( trim(field), 'pcw', axes(1:3), Time, & - 'water particle concentration', '1/m^3', missing_value=missing_value ) - id_edw = register_diag_field ( trim(field), 'edw', axes(1:3), Time, & - 'water effective diameter', 'm', missing_value=missing_value ) - id_oew = register_diag_field ( trim(field), 'oew', axes(1:3), Time, & - 'water optical extinction', '1/m', missing_value=missing_value ) - id_rrw = register_diag_field ( trim(field), 'rrw', axes(1:3), Time, & - 'water radar reflectivity factor', 'm^3', missing_value=missing_value ) - id_tvw = register_diag_field ( trim(field), 'tvw', axes(1:3), Time, & - 'water terminal velocity', 'm/s', missing_value=missing_value ) - id_pci = register_diag_field ( trim(field), 'pci', axes(1:3), Time, & - 'ice particle concentration', '1/m^3', missing_value=missing_value ) - id_edi = register_diag_field ( trim(field), 'edi', axes(1:3), Time, & - 'ice effective diameter', 'm', missing_value=missing_value ) - id_oei = register_diag_field ( trim(field), 'oei', axes(1:3), Time, & - 'ice optical extinction', '1/m', missing_value=missing_value ) - id_rri = register_diag_field ( trim(field), 'rri', axes(1:3), Time, & - 'ice radar reflectivity factor', 'm^3', missing_value=missing_value ) - id_tvi = register_diag_field ( trim(field), 'tvi', axes(1:3), Time, & - 'ice terminal velocity', 'm/s', missing_value=missing_value ) - id_pcr = register_diag_field ( trim(field), 'pcr', axes(1:3), Time, & - 'rain particle concentration', '1/m^3', missing_value=missing_value ) - id_edr = register_diag_field ( trim(field), 'edr', axes(1:3), Time, & - 'rain effective diameter', 'm', missing_value=missing_value ) - id_oer = register_diag_field ( trim(field), 'oer', axes(1:3), Time, & - 'rain optical extinction', '1/m', missing_value=missing_value ) - id_rrr = register_diag_field ( trim(field), 'rrr', axes(1:3), Time, & - 'rain radar reflectivity factor', 'm^3', missing_value=missing_value ) - id_tvr = register_diag_field ( trim(field), 'tvr', axes(1:3), Time, & - 'rain terminal velocity', 'm/s', missing_value=missing_value ) - id_pcs = register_diag_field ( trim(field), 'pcs', axes(1:3), Time, & - 'snow particle concentration', '1/m^3', missing_value=missing_value ) - id_eds = register_diag_field ( trim(field), 'eds', axes(1:3), Time, & - 'snow effective diameter', 'm', missing_value=missing_value ) - id_oes = register_diag_field ( trim(field), 'oes', axes(1:3), Time, & - 'snow optical extinction', '1/m', missing_value=missing_value ) - id_rrs = register_diag_field ( trim(field), 'rrs', axes(1:3), Time, & - 'snow radar reflectivity factor', 'm^3', missing_value=missing_value ) - id_tvs = register_diag_field ( trim(field), 'tvs', axes(1:3), Time, & - 'snow terminal velocity', 'm/s', missing_value=missing_value ) - id_pcg = register_diag_field ( trim(field), 'pcg', axes(1:3), Time, & - 'graupel particle concentration', '1/m^3', missing_value=missing_value ) - id_edg = register_diag_field ( trim(field), 'edg', axes(1:3), Time, & - 'graupel effective diameter', 'm', missing_value=missing_value ) - id_oeg = register_diag_field ( trim(field), 'oeg', axes(1:3), Time, & - 'graupel optical extinction', '1/m', missing_value=missing_value ) - id_rrg = register_diag_field ( trim(field), 'rrg', axes(1:3), Time, & - 'graupel radar reflectivity factor', 'm^3', missing_value=missing_value ) - id_tvg = register_diag_field ( trim(field), 'tvg', axes(1:3), Time, & - 'graupel terminal velocity', 'm/s', missing_value=missing_value ) !------------------- !! 3D Tendency terms from GFDL MP and physics !------------------- @@ -892,7 +826,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) 'zonal wind', 'm/sec', missing_value=missing_value, range=vrange ) id_v_plev = register_diag_field ( trim(field), 'v_plev', axe2(1:3), Time, & 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange ) - if (is_ideal_case) then + if (Atm(n)%flagstruct%is_ideal_case) then id_t_plev = register_diag_field ( trim(field), 't_plev', axe2(1:3), Time, & 'temperature', 'K', missing_value=missing_value ) else @@ -999,7 +933,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) if ( .not. Atm(n)%flagstruct%hydrostatic ) & id_w = register_diag_field ( trim(field), 'w', axes(1:3), Time, & 'vertical wind', 'm/sec', missing_value=missing_value, range=wrange ) - if (is_ideal_case) then + if (Atm(n)%flagstruct%is_ideal_case) then id_pt = register_diag_field ( trim(field), 'temp', axes(1:3), Time, & 'temperature', 'K', missing_value=missing_value ) else @@ -1353,6 +1287,28 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) id_dp1000 = register_diag_field ( trim(field), 'dp1000', axes(1:2), Time, & '1000-mb dew point', 'K', missing_value=missing_value ) !-------------------------- +! equivalent potential temperature: +!-------------------------- + id_theta_e100 = register_diag_field ( trim(field), 'theta_e100', axes(1:2), Time, & + '100-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e200 = register_diag_field ( trim(field), 'theta_e200', axes(1:2), Time, & + '200-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e250 = register_diag_field ( trim(field), 'theta_e250', axes(1:2), Time, & + '250-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e300 = register_diag_field ( trim(field), 'theta_e300', axes(1:2), Time, & + '300-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e500 = register_diag_field ( trim(field), 'theta_e500', axes(1:2), Time, & + '500-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e700 = register_diag_field ( trim(field), 'theta_e700', axes(1:2), Time, & + '700-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e850 = register_diag_field ( trim(field), 'theta_e850', axes(1:2), Time, & + '850-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e925 = register_diag_field ( trim(field), 'theta_e925', axes(1:2), Time, & + '925-mb equivalent potential temperature', 'K', missing_value=missing_value ) + id_theta_e1000 = register_diag_field ( trim(field), 'theta_e1000', axes(1:2), Time, & + '1000-mb equivalent potential temperature', 'K', missing_value=missing_value ) + +!-------------------------- ! relative humidity (CMIP definition): !-------------------------- id_rh10_cmip = register_diag_field ( trim(field), 'rh10_cmip', axes(1:2), Time, & @@ -1756,14 +1712,14 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #endif elseif ( Atm(n)%flagstruct%range_warn ) then - call range_check('DELP', Atm(n)%delp, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - 0.01*ptop, 200.E2, bad_range, Time) + if (ptop < 200.e2) call range_check('DELP', Atm(n)%delp, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & + 0.01*ptop, 200.E2, bad_range, Time) call range_check('UA', Atm(n)%ua, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & -250., 250., bad_range, Time) call range_check('VA', Atm(n)%va, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & -250., 250., bad_range, Time) #ifndef SW_DYNAMICS - if (is_ideal_case) then + if (Atm(n)%flagstruct%is_ideal_case) then call range_check('TA', Atm(n)%pt, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & 100., 500., bad_range, Time) !DCMIP ICs have very wide range of temperatures else @@ -1804,54 +1760,23 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo if(id_zsurf > 0) used=send_data(id_zsurf, zsurf, Time) + call prt_mxm('ZS', zsurf, isc, iec, jsc, jec, 0, 1, 1.0, Atm(n)%gridstruct%area_64, Atm(n)%domain) #endif if(id_ps > 0) used=send_data(id_ps, Atm(n)%ps(isc:iec,jsc:jec), Time) - if(id_pret > 0) used=send_data(id_pret, & - Atm(n)%inline_mp%prew(isc:iec,jsc:jec)+& - Atm(n)%inline_mp%prer(isc:iec,jsc:jec)+& - Atm(n)%inline_mp%prei(isc:iec,jsc:jec)+& - Atm(n)%inline_mp%pres(isc:iec,jsc:jec)+& - Atm(n)%inline_mp%preg(isc:iec,jsc:jec), Time) - if(id_prew > 0) used=send_data(id_prew, Atm(n)%inline_mp%prew(isc:iec,jsc:jec), Time) - if(id_prer > 0) used=send_data(id_prer, Atm(n)%inline_mp%prer(isc:iec,jsc:jec), Time) - if(id_prei > 0) used=send_data(id_prei, Atm(n)%inline_mp%prei(isc:iec,jsc:jec), Time) - if(id_pres > 0) used=send_data(id_pres, Atm(n)%inline_mp%pres(isc:iec,jsc:jec), Time) - if(id_preg > 0) used=send_data(id_preg, Atm(n)%inline_mp%preg(isc:iec,jsc:jec), Time) - if(id_prefluxw > 0) used=send_data(id_prefluxw, Atm(n)%inline_mp%prefluxw(isc:iec,jsc:jec,1:npz), Time) - if(id_prefluxr > 0) used=send_data(id_prefluxr, Atm(n)%inline_mp%prefluxr(isc:iec,jsc:jec,1:npz), Time) - if(id_prefluxi > 0) used=send_data(id_prefluxi, Atm(n)%inline_mp%prefluxi(isc:iec,jsc:jec,1:npz), Time) - if(id_prefluxs > 0) used=send_data(id_prefluxs, Atm(n)%inline_mp%prefluxs(isc:iec,jsc:jec,1:npz), Time) - if(id_prefluxg > 0) used=send_data(id_prefluxg, Atm(n)%inline_mp%prefluxg(isc:iec,jsc:jec,1:npz), Time) - if(id_cond > 0) used=send_data(id_cond, Atm(n)%inline_mp%cond(isc:iec,jsc:jec), Time) - if(id_dep > 0) used=send_data(id_dep, Atm(n)%inline_mp%dep(isc:iec,jsc:jec), Time) - if(id_reevap > 0) used=send_data(id_reevap, Atm(n)%inline_mp%reevap(isc:iec,jsc:jec), Time) - if(id_sub > 0) used=send_data(id_sub, Atm(n)%inline_mp%sub(isc:iec,jsc:jec), Time) - if(id_pcw > 0) used=send_data(id_pcw, Atm(n)%inline_mp%pcw(isc:iec,jsc:jec,1:npz), Time) - if(id_edw > 0) used=send_data(id_edw, Atm(n)%inline_mp%edw(isc:iec,jsc:jec,1:npz), Time) - if(id_oew > 0) used=send_data(id_oew, Atm(n)%inline_mp%oew(isc:iec,jsc:jec,1:npz), Time) - if(id_rrw > 0) used=send_data(id_rrw, Atm(n)%inline_mp%rrw(isc:iec,jsc:jec,1:npz), Time) - if(id_tvw > 0) used=send_data(id_tvw, Atm(n)%inline_mp%tvw(isc:iec,jsc:jec,1:npz), Time) - if(id_pci > 0) used=send_data(id_pci, Atm(n)%inline_mp%pci(isc:iec,jsc:jec,1:npz), Time) - if(id_edi > 0) used=send_data(id_edi, Atm(n)%inline_mp%edi(isc:iec,jsc:jec,1:npz), Time) - if(id_oei > 0) used=send_data(id_oei, Atm(n)%inline_mp%oei(isc:iec,jsc:jec,1:npz), Time) - if(id_rri > 0) used=send_data(id_rri, Atm(n)%inline_mp%rri(isc:iec,jsc:jec,1:npz), Time) - if(id_tvi > 0) used=send_data(id_tvi, Atm(n)%inline_mp%tvi(isc:iec,jsc:jec,1:npz), Time) - if(id_pcr > 0) used=send_data(id_pcr, Atm(n)%inline_mp%pcr(isc:iec,jsc:jec,1:npz), Time) - if(id_edr > 0) used=send_data(id_edr, Atm(n)%inline_mp%edr(isc:iec,jsc:jec,1:npz), Time) - if(id_oer > 0) used=send_data(id_oer, Atm(n)%inline_mp%oer(isc:iec,jsc:jec,1:npz), Time) - if(id_rrr > 0) used=send_data(id_rrr, Atm(n)%inline_mp%rrr(isc:iec,jsc:jec,1:npz), Time) - if(id_tvr > 0) used=send_data(id_tvr, Atm(n)%inline_mp%tvr(isc:iec,jsc:jec,1:npz), Time) - if(id_pcs > 0) used=send_data(id_pcs, Atm(n)%inline_mp%pcs(isc:iec,jsc:jec,1:npz), Time) - if(id_eds > 0) used=send_data(id_eds, Atm(n)%inline_mp%eds(isc:iec,jsc:jec,1:npz), Time) - if(id_oes > 0) used=send_data(id_oes, Atm(n)%inline_mp%oes(isc:iec,jsc:jec,1:npz), Time) - if(id_rrs > 0) used=send_data(id_rrs, Atm(n)%inline_mp%rrs(isc:iec,jsc:jec,1:npz), Time) - if(id_tvs > 0) used=send_data(id_tvs, Atm(n)%inline_mp%tvs(isc:iec,jsc:jec,1:npz), Time) - if(id_pcg > 0) used=send_data(id_pcg, Atm(n)%inline_mp%pcg(isc:iec,jsc:jec,1:npz), Time) - if(id_edg > 0) used=send_data(id_edg, Atm(n)%inline_mp%edg(isc:iec,jsc:jec,1:npz), Time) - if(id_oeg > 0) used=send_data(id_oeg, Atm(n)%inline_mp%oeg(isc:iec,jsc:jec,1:npz), Time) - if(id_rrg > 0) used=send_data(id_rrg, Atm(n)%inline_mp%rrg(isc:iec,jsc:jec,1:npz), Time) - if(id_tvg > 0) used=send_data(id_tvg, Atm(n)%inline_mp%tvg(isc:iec,jsc:jec,1:npz), Time) + if (Atm(n)%flagstruct%do_inline_mp) then + if(id_pret > 0) used=send_data(id_pret, & + Atm(n)%inline_mp%prew(isc:iec,jsc:jec)+& + Atm(n)%inline_mp%prer(isc:iec,jsc:jec)+& + Atm(n)%inline_mp%prei(isc:iec,jsc:jec)+& + Atm(n)%inline_mp%pres(isc:iec,jsc:jec)+& + Atm(n)%inline_mp%preg(isc:iec,jsc:jec), Time) + if(id_prew > 0) used=send_data(id_prew, Atm(n)%inline_mp%prew(isc:iec,jsc:jec), Time) + if(id_prer > 0) used=send_data(id_prer, Atm(n)%inline_mp%prer(isc:iec,jsc:jec), Time) + if(id_prei > 0) used=send_data(id_prei, Atm(n)%inline_mp%prei(isc:iec,jsc:jec), Time) + if(id_pres > 0) used=send_data(id_pres, Atm(n)%inline_mp%pres(isc:iec,jsc:jec), Time) + if(id_preg > 0) used=send_data(id_preg, Atm(n)%inline_mp%preg(isc:iec,jsc:jec), Time) + endif if (id_qv_dt_gfdlmp > 0) used=send_data(id_qv_dt_gfdlmp, Atm(n)%inline_mp%qv_dt(isc:iec,jsc:jec,1:npz), Time) if (id_ql_dt_gfdlmp > 0) used=send_data(id_ql_dt_gfdlmp, Atm(n)%inline_mp%ql_dt(isc:iec,jsc:jec,1:npz), Time) @@ -1905,7 +1830,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if ( id_vort200>0 .or. id_vort500>0 .or. id_vort850>0 .or. id_vorts>0 & .or. id_vort>0 .or. id_pv>0 .or. id_pv350k>0 .or. id_pv550k>0 & - .or. id_rh>0 .or. id_x850>0 .or. id_uh03>0 .or. id_uh25>0) then + .or. id_rh>0 .or. id_x850>0 .or. id_uh03>0 .or. id_uh25>0 & + .or. id_srh1 > 0 .or. id_srh3 > 0 .or. id_srh25 > 0 & + .or. id_ustm > 0 .or. id_vstm > 0) then call get_vorticity(isc, iec, jsc, jec, isd, ied, jsd, jed, npz, Atm(n)%u, Atm(n)%v, wk, & Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, Atm(n)%gridstruct%rarea) @@ -2362,7 +2289,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif used = send_data (id_slp, slp, Time) if( prt_minmax ) then - call prt_mxm('SLP (Pa): ', slp, isc, iec, jsc, jec, 0, 1, 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) + call prt_mxm('SLP (Pa): ', slp, isc, iec, jsc, jec, 0, 1, 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) + call prt_maxmin('SLP', slp, isc, iec, jsc, jec, 0, 1, 1.) + if ( .not. Atm(n)%gridstruct%bounded_domain ) then ! US Potential Landfall TCs (PLT): do j=jsc,jec do i=isc,iec @@ -2375,6 +2304,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo enddo call prt_mxm('SLP_ATL (Pa): ', a2, isc, iec, jsc, jec, 0, 1, 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) + endif endif endif @@ -2418,7 +2348,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if (Atm(n)%gridstruct%bounded_domain) then call prt_mxm('Z500 (m): ',a3(isc:iec,jsc:jec,k500),isc,iec,jsc,jec,0,1,1.,Atm(n)%gridstruct%area_64,Atm(n)%domain) else - call prt_gb_nh_sh('fv_GFS Z500 (m): ', isc,iec, jsc,jec, a3(isc,jsc,k500), Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & + call prt_gb_nh_sh('Z500', isc,iec, jsc,jec, a3(isc,jsc,k500), Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & Atm(n)%gridstruct%agrid_64(isc:iec,jsc:jec,2)) endif endif @@ -3788,7 +3718,10 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if(id_diss > 0) used=send_data(id_diss, Atm(n)%diss_est(isc:iec,jsc:jec,:), Time) allocate( a3(isc:iec,jsc:jec,npz) ) - if(id_theta_e > 0 ) then + if(id_theta_e > 0 .or. & + id_theta_e100>0 .or. id_theta_e200>0 .or. id_theta_e250>0 .or. id_theta_e300>0 .or. & + id_theta_e500>0 .or. id_theta_e700>0 .or. id_theta_e850>0 .or. id_theta_e925>0 .or. & + id_theta_e1000>0) then if ( Atm(n)%flagstruct%adiabatic .and. Atm(n)%flagstruct%kord_tm>0 ) then do k=1,npz @@ -3803,6 +3736,44 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys) endif + + if (id_theta_e100>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 100.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e100, a2, Time) + endif + if (id_theta_e200>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 200.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e200, a2, Time) + endif + if (id_theta_e250>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 250.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e250, a2, Time) + endif + if (id_theta_e300>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 300.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e300, a2, Time) + endif + if (id_theta_e500>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 500.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e500, a2, Time) + endif + if (id_theta_e700>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 700.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e700, a2, Time) + endif + if (id_theta_e850>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e850, a2, Time) + endif + if (id_theta_e925>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 925.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e925, a2, Time) + endif + if (id_theta_e1000>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 1000.e2, Atm(n)%peln, a3(isc:iec,jsc:jec,:), a2) + used=send_data(id_theta_e1000, a2, Time) + endif + if (id_theta_e > 0) then if( prt_minmax ) call prt_mxm('Theta_E', a3, isc, iec, jsc, jec, 0, npz, 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) used=send_data(id_theta_e, a3, Time) @@ -4149,7 +4120,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo enddo enddo - call prt_gb_nh_sh('Max_cld GB_NH_SH_EQ',isc,iec, jsc,jec, a2, Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & + call prt_gb_nh_sh('Max_cld',isc,iec, jsc,jec, a2, Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & Atm(n)%gridstruct%agrid_64(isc:iec,jsc:jec,2)) endif endif @@ -4398,9 +4369,16 @@ subroutine range_check_2d(qname, q, is, ie, js, je, n_g, pos, q_low, q_hi, bad_r if( qminq_hi ) then if(master) write(*,*) 'Range_check Warning:', qname, ' max = ', qmax, ' min = ', qmin if (present(Time)) then - call get_date(Time, year, month, day, hour, minute, second) - if (master) write(*,999) year, month, day, hour, minute, second -999 format(' Range violation on: ', I4, '/', I02, '/', I02, ' ', I02, ':', I02, ':', I02) + if (m_calendar) then + call get_date(Time, year, month, day, hour, minute, second) + if (master) write(*,999) year, month, day, hour, minute, second +999 format(' Range violation on: ', I4, '/', I02, '/', I02, ' ', I02, ':', I02, ':', I02) + else + call get_time(Time, second, day) + year = 0 ; month = 0 ; hour = 0 ; minute = 0 + if (master) write(*,996) day, second +996 format(' Range violation on: ', I6, ' days ', I05, ' seconds') + endif endif if ( present(bad_range) ) then bad_range = .true. @@ -4413,7 +4391,11 @@ subroutine range_check_2d(qname, q, is, ie, js, je, n_g, pos, q_low, q_hi, bad_r do j=js,je do i=is,ie if( q(i,j)q_hi ) then - write(*,*) 'Warn_(i,j)=',i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, q(i,j) + write(*,995) i, j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, qname, q(i,j) +! write(*,*) 'Warn_(i,j)=',i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, q(i,j) +! write(*,998) k,i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, qname, q(i,j,k) + +995 format('Warn_2D: (i,j)=',2I5,' (lon,lat)=',f7.3,1x,f7.3,1x, A,' =',G10.5) endif enddo enddo @@ -4431,6 +4413,7 @@ subroutine prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac) real qmin, qmax integer i,j,k + character(len=12) :: display_name !mpp_root_pe doesn't appear to recognize nested grid master = (mpp_pe()==mpp_root_pe()) .or. is_master() @@ -4455,7 +4438,9 @@ subroutine prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac) call mp_reduce_max(qmax) if(master) then - write(*,*) qname//trim(gn), ' max = ', qmax*fac, ' min = ', qmin*fac + j = min(len(trim(qname)),12) + display_name = qname(1:j) + write(*,*) display_name, ' ', trim(gn), ' max=', qmax*fac, 'min=',qmin*fac endif end subroutine prt_maxmin @@ -4473,6 +4458,7 @@ subroutine prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain) ! real qmin, qmax, gmean integer i,j,k + character(len=8) :: display_name !mpp_root_pe doesn't appear to recognize nested grid master = (mpp_pe()==mpp_root_pe()) .or. is_master() @@ -4501,7 +4487,11 @@ subroutine prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain) ! gmean = g_sum(domain, q(is,js,km), is, ie, js, je, 3, area, 1) gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1) - if(master) write(6,*) qname, gn, qmax*fac, qmin*fac, gmean*fac + if(master) then + j = min(len(trim(qname)),8) + display_name = qname(1:j) + write(6,*) display_name, trim(gn), qmax*fac, qmin*fac, gmean*fac + endif end subroutine prt_mxm @@ -4531,6 +4521,8 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') graupel = get_tracer_index (MODEL_ATMOS, 'graupel') + if (master) write(*,*) '--- Mass Diagnostics ------------------------' + if ( nwat==0 ) then psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1) if( master ) write(*,*) 'Total surface pressure (mb)', trim(gn), ' = ', 0.01*psmo @@ -4816,6 +4808,7 @@ subroutine prt_gb_nh_sh(qname, is,ie, js,je, a2, area, lat) real:: t_eq, t_nh, t_sh, t_gb real:: area_eq, area_nh, area_sh, area_gb integer:: i,j + character(len=12) :: display_name t_eq = 0. ; t_nh = 0.; t_sh = 0.; t_gb = 0. area_eq = 0.; area_nh = 0.; area_sh = 0.; area_gb = 0. @@ -4849,7 +4842,13 @@ subroutine prt_gb_nh_sh(qname, is,ie, js,je, a2, area, lat) if (area_nh <= 1.) area_nh = -1.0 if (area_sh <= 1.) area_sh = -1.0 if (area_eq <= 1.) area_eq = -1.0 - if (is_master()) write(*,*) qname, t_gb/area_gb, t_nh/area_nh, t_sh/area_sh, t_eq/area_eq + if (is_master()) then + j = min(len(trim(qname)),12) + display_name = qname(1:j) + write(*,*) display_name, 'GB=',t_gb/area_gb, 'NH=',t_nh/area_nh + display_name='' + write(*,*) display_name, 'SH=',t_sh/area_sh, 'EQ=',t_eq/area_eq + endif end subroutine prt_gb_nh_sh diff --git a/tools/fv_diagnostics.h b/tools/fv_diagnostics.h index 3037ba3c3..8914f30c3 100644 --- a/tools/fv_diagnostics.h +++ b/tools/fv_diagnostics.h @@ -52,6 +52,8 @@ integer:: id_ql_plev, id_qi_plev, id_qr_plev, id_qs_plev, id_qg_plev, id_cf_plev integer:: id_t_plev_ave, id_q_plev_ave, id_qv_dt_gfdlmp_plev_ave, id_t_dt_gfdlmp_plev_ave, id_qv_dt_phys_plev_ave, id_t_dt_phys_plev_ave + integer :: id_theta_e100, id_theta_e200, id_theta_e250, id_theta_e300, & + id_theta_e500, id_theta_e700, id_theta_e850, id_theta_e925, id_theta_e1000 ! IPCC diag integer :: id_rh10, id_rh50, id_rh100, id_rh200, id_rh250, id_rh300, & id_rh500, id_rh700, id_rh850, id_rh925, id_rh1000 @@ -83,13 +85,8 @@ real, allocatable :: zsurf(:,:) real, allocatable :: pt1(:) - integer :: id_pret, id_prew, id_prer, id_prei, id_pres, id_preg, id_cond, id_dep, id_reevap, id_sub + integer :: id_pret, id_prew, id_prer, id_prei, id_pres, id_preg integer :: id_prefluxw, id_prefluxr, id_prefluxi, id_prefluxs, id_prefluxg - integer :: id_pcw, id_edw, id_oew, id_rrw, id_tvw - integer :: id_pci, id_edi, id_oei, id_rri, id_tvi - integer :: id_pcr, id_edr, id_oer, id_rrr, id_tvr - integer :: id_pcs, id_eds, id_oes, id_rrs, id_tvs - integer :: id_pcg, id_edg, id_oeg, id_rrg, id_tvg integer :: id_qv_dt_gfdlmp, id_T_dt_gfdlmp, id_ql_dt_gfdlmp, id_qi_dt_gfdlmp integer :: id_qr_dt_gfdlmp, id_qg_dt_gfdlmp, id_qs_dt_gfdlmp integer :: id_liq_wat_dt_gfdlmp, id_ice_wat_dt_gfdlmp diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index 3315846f1..4b4a86dca 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -796,6 +796,7 @@ subroutine set_eta(km, ks, ptop, ak, bk, npz_type,fv_eta_file) call check_eta_levels (ak, bk) if (is_master()) then + print*, ' SET_ETA: vertical level interfaces' write(*, '(A4, A13, A13, A11)') 'klev', 'ak', 'bk', 'p_ref' do k=1,km+1 write(*,'(I4, F13.5, F13.5, F11.2)') k, ak(k), bk(k), 1000.E2*bk(k) + ak(k) @@ -979,7 +980,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) do k=ks+1,km tmp1 = max(tmp1, (ak(k)-ak(k+1))/max(1.E-5, (bk(k+1)-bk(k))) ) enddo - write(*,*) 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. + write(*,'(A, F8.2)') 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. write(*,800) (pm(k), k=km,1,-1) endif @@ -1903,7 +1904,7 @@ subroutine check_eta_levels(ak, bk) if (.not. monotonic) then if (is_master()) then - write(*, '(A4, A13, A13, A11)') 'klev', 'ak', 'bk', 'p_ref' + write(*, '(A4, A13, A13, A11)') 'klev', 'ak', 'bk', 'p_ref (Pa)' do k=1,nlev write(*,'(I4, F13.5, F13.5, F11.2)') k, ak(k), bk(k), ak(k) + bk(k)*1000.E2 enddo diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index bfee63519..f90e53dd3 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -30,7 +30,6 @@ module fv_grid_tools_mod cell_center2, get_area, inner_prod, fill_ghost, & direct_transform, cube_transform, dist2side_latlon, & spherical_linear_interpolation, big_number - use fv_timing_mod, only: timing_on, timing_off use fv_mp_mod, only: is_master, fill_corners, XDir, YDir use fv_mp_mod, only: grids_master_procs use sorted_index_mod, only: sorted_inta, sorted_intb @@ -586,7 +585,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, if (Atm%flagstruct%grid_type>3) then if (Atm%flagstruct%grid_type == 4) then call setup_cartesian(npx, npy, Atm%flagstruct%dx_const, Atm%flagstruct%dy_const, & - Atm%flagstruct%deglat, Atm%bd) + Atm%flagstruct%deglat, Atm%flagstruct%domain_deg, Atm%bd, Atm) elseif (Atm%flagstruct%grid_type == 5) then call setup_orthogonal_grid(npx, npy, Atm%bd, grid_file) else @@ -1016,6 +1015,8 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, 202 format(A,A,i4.4,A,i4.4,A) ! Get and print Grid Statistics + !NOTE: This only computes for a small part of the global domain sor the results can be inaccurate + ! for non-uniform grids. dxAV =0.0 angAV=0.0 aspAV=0.0 @@ -1074,7 +1075,6 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, write(*,* ) '' write(*,*) ' Radius is ', radius, ', omega is ', omega!, ' small_earth_scale = ', small_earth_scale write(*,* ) ' Cubed-Sphere Grid Stats : ', npx,'x',npy,'x',nregions - print*, dxN, dxM, dxAV, dxN, dxM write(*,'(A,f11.2,A,f11.2,A,f11.2,A,f11.2)') ' Grid Length : min: ', dxN,' max: ', dxM,' avg: ', dxAV, ' min/max: ',dxN/dxM write(*,'(A,e21.14,A,e21.14,A,e21.14)') ' Deviation from Orthogonal : min: ',angN,' max: ',angM,' avg: ',angAV write(*,'(A,e21.14,A,e21.14,A,e21.14)') ' Aspect Ratio : min: ',aspN,' max: ',aspM,' avg: ',aspAV @@ -1153,12 +1153,16 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, contains - subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) + subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, domain_deg, bd, Atm) + type(fv_atmos_type), intent(INOUT), target :: Atm type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy - real(kind=R_GRID), intent(IN) :: dx_const, dy_const, deglat - real(kind=R_GRID) lat_rad, lon_rad, domain_rad + real(kind=R_GRID), intent(IN) :: deglat + real(kind=R_GRID), intent(INOUT) :: dx_const, dy_const + real(kind=R_GRID), intent(IN) :: domain_deg + + real(kind=R_GRID) domain_rad, lat_rad, lon_rad integer i,j integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -1172,9 +1176,24 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) jsd = bd%jsd jed = bd%jed - domain_rad = pi/16. ! arbitrary + if (domain_deg > 0.05) then + domain_rad = pi/180. * domain_deg + else + domain_rad = pi/16. ! arbitrary + endif + lat_rad = deglat * pi/180. - lon_rad = 0. ! arbitrary + !lon_rad = 0. ! arbitrary + lon_rad = - 50. * pi /180. ! careful: weird physics IC (tsc) when this is around 0 + + !added by Joseph + if (domain_deg > 0.05) then + dx_const = domain_deg*100000/(npx-1) + dy_const = dx_const + if (is_master()) print*,"Warning: Recalculating dx:", dx_const + if (is_master()) print*,"Creating a square doubly periodic domain of size", & + domain_deg, "degrees, a dx:", dx_const, ", centered at lonlat (deg): ", lon_rad *180./pi, deglat + endif dx(:,:) = dx_const rdx(:,:) = 1./dx_const @@ -1197,16 +1216,51 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) area_c(:,:) = dx_const*dy_const rarea_c(:,:) = 1./(dx_const*dy_const) -! The following is a hack to get pass the am2 phys init: - do j=max(1,jsd),min(jed,npy) - do i=max(1,isd),min(ied,npx) - grid(i,j,1) = lon_rad - 0.5*domain_rad + real(i-1)/real(npx-1)*domain_rad - grid(i,j,2) = lat_rad - 0.5*domain_rad + real(j-1)/real(npy-1)*domain_rad + + !call mpp_update_domains( dy, dx, Atm%domain, flags=SCALAR_PAIR, & + ! gridtype=CGRID_NE_PARAM, complete=.true.) !CHECK + + !generate grid_global for the top level parent grid + if (.not. atm%neststruct%nested)then + + if (is_master())then !! compute the grids as a function of !!npx&npy!! on master then broadcast to other pes + do j=1,npy + do i=1,npx + grid_global(i,j,1,1) = lon_rad - 0.5*domain_rad + real(i-1)/real(npx-1)*domain_rad + grid_global(i,j,2,1) = lat_rad - 0.5*domain_rad + real(j-1)/real(npy-1)*domain_rad + ! for long between 0 and 2pi + ! if (grid_global(i,j,1,1) > 2.*pi) grid_global(i,j,1,1) = grid_global(i,j,1,1) - 2.*pi + ! if (grid_global(i,j,1,1) < 0.) grid_global(i,j,1,1) = grid_global(i,j,1,1) + 2.*pi + enddo + enddo + endif + + call mpp_broadcast(grid_global, size(grid_global), mpp_root_pe()) !! this grid_global will be sent at the end of init_grid to the nested pes as p_grid to generate the nested grid. + + do j=js,je+1 + do i=is,ie+1 + grid(i,j,1)=grid_global(i,j,1,1) + grid(i,j,2)=grid_global(i,j,2,1) enddo enddo - agrid(:,:,1) = lon_rad - agrid(:,:,2) = lat_rad + call mpp_update_domains( grid, Atm%domain, position=CORNER) !CHECK + + agrid(:,:,1) = 99999999999 + agrid(:,:,2) = 99999999999 + + do j=jsd,jed + do i=isd,ied + call cell_center2(grid(i,j, 1:2), grid(i+1,j, 1:2), & + grid(i,j+1,1:2), grid(i+1,j+1,1:2), & + agrid(i,j,1:2) ) + enddo + enddo + + call mpp_update_domains( agrid, Atm%domain, position=CENTER, complete=.true. ) + + endif !if not nested + sina(:,:) = 1. cosa(:,:) = 0. @@ -1219,6 +1273,16 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) e2(2,:,:) = 1. e2(3,:,:) = 0. + call mpp_update_domains( area, Atm%domain, complete=.true. ) + + !############## + !SETUP THE NEST + !############## + + if (Atm%neststruct%nested) then + call setup_aligned_nest(Atm) + endif !if nested + end subroutine setup_cartesian subroutine setup_orthogonal_grid(npx, npy, bd, grid_file) @@ -1837,19 +1901,21 @@ subroutine setup_aligned_nest(Atm) call mpp_update_domains( agrid, Atm%domain, position=CENTER, complete=.true. ) - ! Compute dx - do j=jsd,jed+1 - do i=isd,ied - dx(i,j) = great_circle_dist(grid_global(i,j,:,1), grid_global(i+1,j,:,1), radius) - enddo - enddo + if (Atm%flagstruct%grid_type /= 4) then !already computed for a cartesian grid + ! Compute dx + do j=jsd,jed+1 + do i=isd,ied + dx(i,j) = great_circle_dist(grid_global(i,j,:,1), grid_global(i+1,j,:,1), radius) + enddo + enddo - ! Compute dy - do j=jsd,jed - do i=isd,ied+1 - dy(i,j) = great_circle_dist(grid_global(i,j,:,1), grid_global(i,j+1,:,1), radius) - enddo - enddo + ! Compute dy + do j=jsd,jed + do i=isd,ied+1 + dy(i,j) = great_circle_dist(grid_global(i,j,:,1), grid_global(i,j+1,:,1), radius) + enddo + enddo + endif !We will use Michael Herzog's algorithm for computing the weights. @@ -1921,18 +1987,19 @@ subroutine setup_aligned_nest(Atm) end do - do j=jsd,jed - do i=isd,ied - dxa(i,j) = great_circle_dist(c_grid_u(i,j,:), c_grid_u(i+1,j,:), radius) + if (Atm%flagstruct%grid_type /= 4) then !already computed for a cartesian grid + do j=jsd,jed + do i=isd,ied + dxa(i,j) = great_circle_dist(c_grid_u(i,j,:), c_grid_u(i+1,j,:), radius) + end do end do - end do - do j=jsd,jed - do i=isd,ied - dya(i,j) = great_circle_dist(c_grid_v(i,j,:), c_grid_v(i,j+1,:), radius) + do j=jsd,jed + do i=isd,ied + dya(i,j) = great_circle_dist(c_grid_v(i,j,:), c_grid_v(i,j+1,:), radius) + end do end do - end do - + endif !Compute interpolation weights. (Recall that the weights are defined with respect to a d-grid) diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index d8a310c4d..639799bdc 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -276,6 +276,12 @@ subroutine fv_io_register_restart(Atm) call fv_io_register_axis(Atm%Fv_restart_tile, numx=numx_2d, numy=numy_2d, xpos=xpos_2d, ypos=ypos_2d, numz=numz, zsize=zsize) !--- optionally include D-grid winds even if restarting from A-grid winds + if (Atm%flagstruct%is_ideal_case) then + call register_restart_field(Atm%Fv_restart_tile, 'u0', Atm%u0, & + dim_names_4d, is_optional=.true.) + call register_restart_field(Atm%Fv_restart_tile, 'v0', Atm%v0, & + dim_names_4d2, is_optional=.true.) + endif if (Atm%flagstruct%write_optional_dgrid_vel_rst .and. Atm%flagstruct%restart_from_agrid_winds) then call register_restart_field(Atm%Fv_restart_tile, 'u', Atm%u, & dim_names_4d, is_optional=.true.) @@ -318,6 +324,16 @@ subroutine fv_io_register_restart(Atm) call register_restart_field(Atm%Fv_restart_tile, 'phis', Atm%phis, dim_names_3d) if (.not. Atm%Fv_restart_tile%is_readonly) then !if writing file + if (Atm%flagstruct%is_ideal_case) then + if (variable_exists(Atm%Fv_restart_tile, 'u0')) then + call register_variable_attribute(Atm%Fv_restart_tile, 'u0', "long_name", "u0", str_len=len("u0")) + call register_variable_attribute(Atm%Fv_restart_tile, 'u0', "units", "none", str_len=len("none")) + endif + if (variable_exists(Atm%Fv_restart_tile, 'v0')) then + call register_variable_attribute(Atm%Fv_restart_tile, 'v0', "long_name", "v0", str_len=len("v0")) + call register_variable_attribute(Atm%Fv_restart_tile, 'v0', "units", "none", str_len=len("none")) + endif + endif if (variable_exists(Atm%Fv_restart_tile, 'u')) then call register_variable_attribute(Atm%Fv_restart_tile, 'u', "long_name", "u", str_len=len("u")) call register_variable_attribute(Atm%Fv_restart_tile, 'u', "units", "none", str_len=len("none")) @@ -615,7 +631,7 @@ subroutine remap_restart(Atm) ! !------------------------------------------------------------------------- - real, allocatable:: ak_r(:), bk_r(:) + real, allocatable:: ak_r(:), bk_r(:), u0_r(:,:,:), v0_r(:,:,:) real, allocatable:: u_r(:,:,:), v_r(:,:,:), pt_r(:,:,:), delp_r(:,:,:) real, allocatable:: w_r(:,:,:), delz_r(:,:,:), ze0_r(:,:,:) real, allocatable:: q_r(:,:,:,:), qdiag_r(:,:,:,:) @@ -645,6 +661,8 @@ subroutine remap_restart(Atm) allocate ( ak_r(npz_rst+1) ) allocate ( bk_r(npz_rst+1) ) + allocate ( u0_r(isc:iec, jsc:jec+1,npz_rst) ) + allocate ( v0_r(isc:iec+1,jsc:jec ,npz_rst) ) allocate ( u_r(isc:iec, jsc:jec+1,npz_rst) ) allocate ( v_r(isc:iec+1,jsc:jec ,npz_rst) ) @@ -684,6 +702,10 @@ subroutine remap_restart(Atm) fname = 'INPUT/fv_core.res'//trim(stile_name)//'.nc' if (open_file(Fv_tile_restart_r, fname, "read", fv_domain, is_restart=.true.)) then + if (Atm(1)%flagstruct%is_ideal_case) then + call read_data(Fv_tile_restart_r, 'u0', u0_r) + call read_data(Fv_tile_restart_r, 'v0', v0_r) + endif call read_data(Fv_tile_restart_r, 'u', u_r) call read_data(Fv_tile_restart_r, 'v', v_r) if (.not.Atm(1)%flagstruct%hydrostatic) then @@ -766,15 +788,17 @@ subroutine remap_restart(Atm) ! ====== end PJP added DA functionailty====== call rst_remap(npz_rst, npz, isc, iec, jsc, jec, isd, ied, jsd, jed, ntracers, ntprog, & - delp_r, u_r, v_r, w_r, delz_r, pt_r, q_r, qdiag_r,& - Atm(1)%delp, Atm(1)%u, Atm(1)%v, Atm(1)%w, Atm(1)%delz, Atm(1)%pt, Atm(1)%q, & - Atm(1)%qdiag, ak_r, bk_r, Atm(1)%ptop, Atm(1)%ak, Atm(1)%bk, & + delp_r, u0_r, v0_r, u_r, v_r, w_r, delz_r, pt_r, q_r, qdiag_r, Atm(1)%delp, & + Atm(1)%u0, Atm(1)%v0, Atm(1)%u, Atm(1)%v, Atm(1)%w, Atm(1)%delz, Atm(1)%pt, & + Atm(1)%q, Atm(1)%qdiag, ak_r, bk_r, Atm(1)%ptop, Atm(1)%ak, Atm(1)%bk, & Atm(1)%flagstruct%hydrostatic, Atm(1)%flagstruct%make_nh, Atm(1)%domain, & - Atm(1)%gridstruct%square_domain) + Atm(1)%gridstruct%square_domain, Atm(1)%flagstruct%is_ideal_case) !end do deallocate( ak_r ) deallocate( bk_r ) + deallocate( u0_r ) + deallocate( v0_r ) deallocate( u_r ) deallocate( v_r ) deallocate( pt_r ) @@ -1391,6 +1415,12 @@ subroutine fv_io_register_restart_BCs(Atm) #endif #endif #endif + if (Atm%flagstruct%is_ideal_case) then + call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & + fname_ne, fname_sw, 'u0', Atm%u0, Atm%neststruct%u_BC, jstag=1) + call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & + fname_ne, fname_sw, 'v0', Atm%v0, Atm%neststruct%v_BC, istag=1) + endif call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'u', Atm%u, Atm%neststruct%u_BC, jstag=1) call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index 96d9a3e2b..49ddc77b8 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -28,10 +28,9 @@ module fv_mp_mod #if defined(SPMD) ! !USES: use fms_mod, only : fms_end - use mpp_mod, only : FATAL, MPP_DEBUG, NOTE, MPP_CLOCK_SYNC,MPP_CLOCK_DETAILED, WARNING + use mpp_mod, only : FATAL, MPP_DEBUG, NOTE, WARNING use mpp_mod, only : mpp_pe, mpp_npes, mpp_root_pe, mpp_error, mpp_set_warn_level use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist, mpp_sync - use mpp_mod, only : mpp_clock_begin, mpp_clock_end, mpp_clock_id use mpp_mod, only : mpp_chksum, stdout, stderr, mpp_broadcast use mpp_mod, only : mpp_min, mpp_max, mpp_sum use mpp_mod, only : mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_gather @@ -323,7 +322,6 @@ subroutine domain_decomp(grid_num,npx,npy,nregions,grid_type,nested,layout,io_la nregions = 1 num_contact = 0 npes_per_tile = npes_x*npes_y !/nregions !Set up for concurrency - is_symmetry = .true. call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout ) if ( npes_x == 0 ) then @@ -333,7 +331,7 @@ subroutine domain_decomp(grid_num,npx,npy,nregions,grid_type,nested,layout,io_la npes_y = layout(2) endif - if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true. + if ( npx==npy .and. npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true. if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y @@ -357,8 +355,15 @@ subroutine domain_decomp(grid_num,npx,npy,nregions,grid_type,nested,layout,io_la case (4) ! Cartesian, double periodic type="Cartesian: double periodic" nregions = 1 - num_contact = 2 - npes_per_tile = npes/nregions + if (.not. nested) then + num_contact = 2 + else !accomodate a cartesian nest + num_contact = 0 + if ( npx==npy .and. npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true. + endif + !npes_per_tile = npes/nregions + !the previous line will crash if there is a nest, all "npes" will be distributed on the first grid only + npes_per_tile = npes_x*npes_y if(npes_x*npes_y == npes_per_tile) then layout = (/npes_x,npes_y/) else @@ -466,6 +471,7 @@ subroutine domain_decomp(grid_num,npx,npy,nregions,grid_type,nested,layout,io_la istart2(8) = nx; iend2(8) = nx; jstart2(8) = 1; jend2(8) = ny is_symmetry = .false. case (4) ! Cartesian, double periodic + if (.not. nested) then !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST) tile1(1) = 1; tile2(1) = 1 istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny @@ -474,6 +480,7 @@ subroutine domain_decomp(grid_num,npx,npy,nregions,grid_type,nested,layout,io_la tile1(2) = 1; tile2(2) = 1 istart1(2) = 1; iend1(2) = nx; jstart1(2) = 1; jend1(2) = 1 istart2(2) = 1; iend2(2) = nx; jstart2(2) = ny; jend2(2) = ny + endif case (5) ! latlon patch case (6) !latlon strip diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index 15cbe5f4e..c8aae969e 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -3532,9 +3532,9 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) enddo enddo enddo - call timing_on('COMM_TOTAL') + call timing_on('COMM_TOTAL') call mpp_update_domains(q, domain, complete=.true.) - call timing_off('COMM_TOTAL') + call timing_off('COMM_TOTAL') do n=1,ntimes diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index f045c3f61..5edb281f9 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -37,7 +37,7 @@ module fv_restart_mod remap_restart, fv_io_write_BCs, fv_io_read_BCs use fv_grid_utils_mod, only: ptop_min, fill_ghost, g_sum, & make_eta_level, cubed_to_latlon, great_circle_dist - use fv_diagnostics_mod, only: prt_maxmin + use fv_diagnostics_mod, only: prt_maxmin, gn use init_hydro_mod, only: p_var use mpp_domains_mod, only: mpp_update_domains, domain2d, DGRID_NE use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain @@ -257,6 +257,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ !3. External_ic if (Atm(n)%flagstruct%external_ic) then + if( is_master() ) write(*,*) 'Calling get_external_ic' call get_external_ic(Atm(n), .not. do_read_restart) if( is_master() ) write(*,*) 'IC generated from the specified external source' @@ -322,6 +323,14 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & Atm(n)%flagstruct%n_zs_filter, ' times' endif + if ( Atm(n)%flagstruct%fv_land .and. allocated(sgh_g) .and. allocated(oro_g) ) then + do j=jsc,jec + do i=isc,iec + Atm(n)%sgh(i,j) = sgh_g(i,j) + Atm(n)%oro(i,j) = oro_g(i,j) + enddo + enddo + endif endif call mpp_update_domains( Atm(n)%phis, Atm(n)%domain, complete=.true. ) else @@ -329,10 +338,8 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if( is_master() ) write(*,*) 'phis set to zero' endif !mountain - - !5. Idealized test case - else + elseif (Atm(n)%flagstruct%is_ideal_case) then ideal_test_case(n) = 1 @@ -378,7 +385,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ !Turn this off on the nested grid if you are just interpolating topography from the coarse grid! !These parameters are needed in LM3/LM4, and are communicated through restart files - if ( Atm(n)%flagstruct%fv_land ) then + if ( Atm(n)%flagstruct%fv_land .and. allocated(sgh_g) .and. allocated(oro_g)) then do j=jsc,jec do i=isc,iec Atm(n)%sgh(i,j) = sgh_g(i,j) @@ -387,6 +394,13 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ enddo endif + Atm(n)%u0 = Atm(n)%u + Atm(n)%v0 = Atm(n)%v + + else + + call mpp_error(FATAL, "If there is no restart file, either external_ic or is_ideal_case must be set true.") + endif !external_ic vs. restart vs. idealized @@ -679,8 +693,16 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) enddo #endif - call prt_maxmin('U ', Atm(n)%u(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) - call prt_maxmin('V ', Atm(n)%v(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) + call prt_maxmin('U (local) ', Atm(n)%u(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) + call prt_maxmin('V (local) ', Atm(n)%v(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) + ! compute ua, va + call cubed_to_latlon(Atm(n)%u, Atm(n)%v, Atm(n)%ua, Atm(n)%va, & + Atm(n)%gridstruct, & + Atm(n)%npx, Atm(n)%npy, npz, 1, & + Atm(n)%gridstruct%grid_type, Atm(n)%domain, & + Atm(n)%gridstruct%bounded_domain, Atm(n)%flagstruct%c2l_ord, Atm(n)%bd) + call prt_maxmin('UA ', Atm(n)%ua, isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) + call prt_maxmin('VA ', Atm(n)%va, isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. Atm(n)%flagstruct%make_nh ) then call mpp_error(NOTE, " Initializing w to 0") @@ -777,6 +799,7 @@ subroutine fill_nested_grid_topo(Atm, proc_in) isd_p, ied_p, jsd_p, jed_p ) allocate(g_dat( isg:ieg, jsg:jeg, 1) ) + call timing_on('COMM_TOTAL') !!! FIXME: For whatever reason this code CRASHES if the lower-left corner @@ -803,6 +826,7 @@ subroutine fill_nested_grid_topo(Atm, proc_in) endif call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm%phis, g_dat(isg:,jsg:,1), & Atm%neststruct%ind_h, Atm%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, Atm%bd) @@ -876,22 +900,23 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') !Call mpp_global_field on the procs that have the required data. - !Then broadcast from the head PE to the receiving PEs - if (Atm(1)%neststruct%parent_proc .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%delp(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) - if (gid == sending_proc) then !crazy logic but what we have for now - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + !Then broadcast from the head PE to the receiving PEs + if (Atm(1)%neststruct%parent_proc .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%delp(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) + if (gid == sending_proc) then !crazy logic but what we have for now + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm(1)%delp, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, npz, Atm(1)%bd) @@ -903,26 +928,27 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%q(isd_p:ied_p,jsd_p:jed_p,:,nq), g_dat, position=CENTER) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%q(isd_p:ied_p,jsd_p:jed_p,:,nq), g_dat, position=CENTER) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm(1)%q(isd:ied,jsd:jed,:,nq), g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, npz, Atm(1)%bd) - call mpp_sync_self + call mpp_sync_self end do @@ -935,23 +961,24 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%pt(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%pt(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call mpp_sync_self call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm(1)%pt, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, npz, Atm(1)%bd) @@ -970,23 +997,24 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%pkz(isc_p:iec_p,jsc_p:jec_p,:), g_dat, position=CENTER) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%pkz(isc_p:iec_p,jsc_p:jec_p,:), g_dat, position=CENTER) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call mpp_sync_self call timing_off('COMM_TOTAL') + if (process) then allocate(pt_coarse(isd:ied,jsd:jed,npz)) call fill_nested_grid(pt_coarse, g_dat, & @@ -1064,23 +1092,24 @@ subroutine fill_nested_grid_data(Atm, proc_in) !delz call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%delz(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%delz(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif - call mpp_sync_self + call mpp_sync_self call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm(1)%delz, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, npz, Atm(1)%bd) @@ -1089,23 +1118,24 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%w(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%w(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif - call mpp_sync_self + call mpp_sync_self call timing_off('COMM_TOTAL') + if (process) call fill_nested_grid(Atm(1)%w, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & 0, 0, isg, ieg, jsg, jeg, npz, Atm(1)%bd) @@ -1123,23 +1153,24 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%u(isd_p:ied_p,jsd_p:jed_p+1,:), g_dat, position=NORTH) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%u(isd_p:ied_p,jsd_p:jed_p+1,:), g_dat, position=NORTH) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call mpp_sync_self call timing_off('COMM_TOTAL') + call mpp_sync_self if (process) call fill_nested_grid(Atm(1)%u, g_dat, & Atm(1)%neststruct%ind_u, Atm(1)%neststruct%wt_u, & @@ -1153,22 +1184,23 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then - call mpp_global_field( & - Atm(1)%parent_grid%domain, & - Atm(1)%parent_grid%v(isd_p:ied_p+1,jsd_p:jed_p,:), g_dat, position=EAST) - if (gid == sending_proc) then - do p=1,size(Atm(1)%pelist) - call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) - enddo - endif - endif - if (ANY(Atm(1)%pelist == gid)) then - call mpp_recv(g_dat, size(g_dat), sending_proc) + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then + call mpp_global_field( & + Atm(1)%parent_grid%domain, & + Atm(1)%parent_grid%v(isd_p:ied_p+1,jsd_p:jed_p,:), g_dat, position=EAST) + if (gid == sending_proc) then + do p=1,size(Atm(1)%pelist) + call mpp_send(g_dat,size(g_dat),Atm(1)%pelist(p)) + enddo endif + endif + if (ANY(Atm(1)%pelist == gid)) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif call mpp_sync_self - call timing_off('COMM_TOTAL') + + call timing_off('COMM_TOTAL') if (process) call fill_nested_grid(Atm(1)%v, g_dat, & Atm(1)%neststruct%ind_v, Atm(1)%neststruct%wt_v, & @@ -1354,8 +1386,10 @@ subroutine fv_restart_end(Atm) call pmaxmn_g('ZS', Atm%phis, isc, iec, jsc, jec, 1, 1./grav, Atm%gridstruct%area_64, Atm%domain) call pmaxmn_g('PS ', Atm%ps, isc, iec, jsc, jec, 1, 0.01 , Atm%gridstruct%area_64, Atm%domain) call prt_maxmin('PS*', Atm%ps, isc, iec, jsc, jec, Atm%ng, 1, 0.01) - call prt_maxmin('U ', Atm%u(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm%ng, npz, 1.) - call prt_maxmin('V ', Atm%v(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm%ng, npz, 1.) + call prt_maxmin('U (local) ', Atm%u(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) + call prt_maxmin('V (local) ', Atm%v(isc:iec,jsc:jec,1:npz), isc, iec, jsc, jec, 0, npz, 1.) + call prt_maxmin('UA ', Atm%ua, isc, iec, jsc, jec, Atm%ng, npz, 1.) + call prt_maxmin('VA ', Atm%va, isc, iec, jsc, jec, Atm%ng, npz, 1.) if ( .not. Atm%flagstruct%hydrostatic ) & call prt_maxmin('W ', Atm%w , isc, iec, jsc, jec, Atm%ng, npz, 1.) call prt_maxmin('T ', Atm%pt, isc, iec, jsc, jec, Atm%ng, npz, 1.) @@ -1409,6 +1443,9 @@ subroutine pmaxmn_g(qname, q, is, ie, js, je, km, fac, area, domain) ! real qmin, qmax, gmean integer i,j,k + character(len=8) :: display_name + + logical, SAVE :: first_time = .true. qmin = q(is,js,1) qmax = qmin @@ -1432,7 +1469,12 @@ subroutine pmaxmn_g(qname, q, is, ie, js, je, km, fac, area, domain) call mp_reduce_max(qmax) gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1, .true.) - if(is_master()) write(6,*) qname, qmax*fac, qmin*fac, gmean*fac + + if(is_master()) then + j = min(len(trim(qname)),8) + display_name = qname(1:j) + write(6,*) display_name, trim(gn), qmax*fac, qmin*fac, gmean*fac + endif end subroutine pmaxmn_g diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index 5a634dec5..1ffa8d0a3 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -72,7 +72,6 @@ module fv_surf_map_mod character(len=6) :: surf_format = 'netcdf' logical :: namelist_read = .false. - real(kind=R_GRID) da_min real cos_grid character(len=3) :: grid_string = '' @@ -123,7 +122,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ real(kind=4), allocatable :: ft(:,:), zs(:,:) real, allocatable :: lon1(:), lat1(:) real dx1, dx2, dy1, dy2, lats, latn, r2d - real(kind=R_GRID) da_max + real(kind=R_GRID) da_max, da_min real zmean, z2mean, delg, rgrav ! real z_sp, f_sp, z_np, f_np integer i, j, n, mdim @@ -331,11 +330,11 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ allocate ( oro_g(isd:ied, jsd:jed) ) allocate ( sgh_g(isd:ied, jsd:jed) ) - call timing_on('map_to_cubed') + call timing_on('map_to_cubed') call map_to_cubed_raw(igh, nlon, jt, lat1(jstart:jend+1), lon1, zs, ft, grid, agrid, & phis, oro_g, sgh_g, npx, npy, jstart, jend, stretch_fac, bounded_domain, npx_global, bd) if (is_master()) write(*,*) 'map_to_cubed_raw: master PE done' - call timing_off('map_to_cubed') + call timing_off('map_to_cubed') deallocate ( zs ) deallocate ( ft ) @@ -386,7 +385,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ if ( is_master() ) write(*,*) 'ORO', trim(grid_string), ' min=', da_min, ' Max=', da_max call global_mx(area, ng, da_min, da_max, bd) - call timing_on('Terrain_filter') + call timing_on('Terrain_filter') ! Del-2: high resolution only if ( zs_filter ) then if(is_master()) then @@ -449,9 +448,9 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ if ( is_master() ) write(*,*) 'Before filter SGH', trim(grid_string), ' min=', da_min, ' Max=', da_max -!----------------------------------------------- -! Filter the standard deviation of mean terrain: -!----------------------------------------------- + !----------------------------------------------- + ! Filter the standard deviation of mean terrain: + !----------------------------------------------- call global_mx(area, ng, da_min, da_max, bd) if(zs_filter) call del4_cubed_sphere(npx, npy, sgh_g, area, dx, dy, dxc, dyc, sin_sg, 1, zero_ocean, oro_g, bounded_domain, domain, bd) @@ -485,7 +484,7 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & real, intent(inout):: oro(isd:ied,jsd,jed) type(domain2d), intent(INOUT) :: domain integer mdim - real(kind=R_GRID) da_max + real(kind=R_GRID) da_max, da_min if (is_master()) print*, ' Calling FV3_zs_filter...' @@ -1150,9 +1149,8 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, end subroutine del4_cubed_sphere - subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & - q2, f2, h2, npx, npy, jstart, jend, stretch_fac, & + phis, oro, sgh, npx, npy, jstart, jend, stretch_fac, & bounded_domain, npx_global, bd) ! Input @@ -1168,9 +1166,9 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & real(kind=R_GRID), intent(IN) :: stretch_fac logical, intent(IN) :: bounded_domain ! Output - real, intent(out):: q2(bd%isd:bd%ied,bd%jsd:bd%jed) ! Mapped data at the target resolution - real, intent(out):: f2(bd%isd:bd%ied,bd%jsd:bd%jed) ! oro - real, intent(out):: h2(bd%isd:bd%ied,bd%jsd:bd%jed) ! variances of terrain + real, intent(out):: phis(bd%isd:bd%ied,bd%jsd:bd%jed) ! phis (surface geopotential) mapped data at the target resolution + real, intent(out):: oro(bd%isd:bd%ied,bd%jsd:bd%jed) ! oro (land mask) + real, intent(out):: sgh(bd%isd:bd%ied,bd%jsd:bd%jed) ! sgh variances of terrain ! Local real :: lon_g(-igh:im+igh) real lat_g(jt), cos_g(jt) @@ -1307,23 +1305,23 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & (i < is .and. j > je) .or. & (i > ie .and. j < js) .or. & (i > ie .and. j > je)) .and. .not. bounded_domain) then - q2(i,j) = 1.e25 - f2(i,j) = 1.e25 - h2(i,j) = 1.e25 + phis(i,j) = 1.e25 + oro(i,j) = 1.e25 + sgh(i,j) = 1.e25 goto 4444 end if if ( agrid(i,j,2) < -pi5+stretch_fac*pi5/real(npx_global-1) ) then ! SP: - q2(i,j) = qsp - f2(i,j) = fsp - h2(i,j) = hsp + phis(i,j) = qsp + oro(i,j) = fsp + sgh(i,j) = hsp goto 4444 elseif ( agrid(i,j,2) > pi5-stretch_fac*pi5/real(npx_global-1) ) then ! NP: - q2(i,j) = qnp - f2(i,j) = fnp - h2(i,j) = hnp + phis(i,j) = qnp + oro(i,j) = fnp + sgh(i,j) = hnp goto 4444 endif @@ -1422,9 +1420,9 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & enddo if ( np > 0 ) then - q2(i,j) = qsum / asum - f2(i,j) = fsum / asum - h2(i,j) = hsum / real(np) - q2(i,j)**2 + phis(i,j) = qsum / asum + oro(i,j) = fsum / asum + sgh(i,j) = hsum / real(np) - phis(i,j)**2 min_pts = min(min_pts, np) else write(*,*) 'min and max lat_g is ', r2d*minval(lat_g), r2d*maxval(lat_g), mpp_pe() @@ -1436,7 +1434,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & enddo enddo - if(is_master()) write(*,*) 'surf_map: minimum pts per cell (master PE)=', min_pts + if(is_master()) write(*,*) 'surf_map: minimum pts per cell (master PE)=', min_pts if ( min_pts <3 ) then if(is_master()) write(*,*) 'Warning: too few points used in creating the cell mean terrain !!!' endif @@ -1493,7 +1491,7 @@ subroutine handle_err(status) if (status .ne. nf_noerr) then print *, nf_strerror(status) - stop 'Stopped' + stop 'fv_surf_map_mod: Stopped due to file error' endif end subroutine handle_err diff --git a/tools/fv_timing.F90 b/tools/fv_timing.F90 index 6b55b2fdb..2067ec852 100644 --- a/tools/fv_timing.F90 +++ b/tools/fv_timing.F90 @@ -268,11 +268,11 @@ subroutine timing_prt(gid) print * print *, & - ' -----------------------------------------------------' + ' ---------------------------------------------------------------------' print *, & - ' Block User time System Time Total Time GID ' + ' Block User time System Time Total Time GID' print *, & - ' -----------------------------------------------------' + ' ---------------------------------------------------------------------' do n = 1, tblk print '(3x,a20,2x,3(1x,f12.4), 2x, I6)', blkname(n), & diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index 03760411a..7973e60b4 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -43,11 +43,12 @@ module test_cases_mod use mpp_parameter_mod, only: AGRID_PARAM=>AGRID,CGRID_NE_PARAM=>CGRID_NE, & SCALAR_PAIR use gfdl_mp_mod, only: mqs3d - use fv_diagnostics_mod, only: prt_maxmin, ppme, eqv_pot, qcly0, is_ideal_case + use fv_diagnostics_mod, only: prt_maxmin, ppme, eqv_pot, qcly0 use mpp_mod, only: mpp_pe, mpp_chksum, stdout use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_grid_bounds_type, R_GRID use tracer_manager_mod, only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS + use w_forcing_mod, only: init_w_forcing implicit none private @@ -110,8 +111,8 @@ module test_cases_mod ! 19 = LJZ update to 17 with Cetrone-Houze marine sounding ! and several bubble and sounding options ! 101 = LES with isothermal atmosphere (not implemented) - - +! 102 = Beare et al. (Boundary-Layer Meteorol. 2006) SBL LES case +! 103 = DYCOMS II (Stevens et al. 2003, MWR) SCu case (requires forcing terms) @@ -122,13 +123,16 @@ module test_cases_mod logical :: no_wind = .false. logical :: gaussian_dt = .false. logical :: do_marine_sounding = .false. - real :: dt_amp = 2.1 + real :: dt_amp = 2.1 ! K + real :: dt_rad = 2000. !m real :: alpha = 0.0 integer :: Nsolitons = 2 real :: soliton_size = 750.e3, soliton_Umax = 50. logical :: checker_tr real :: small_earth_scale = 1.0 real :: umean = 0.0 + real :: vmean = 0.0 + logical :: w_forcing ! Case 0 parameters real :: p0_c0 = 3.0 @@ -160,6 +164,10 @@ module test_cases_mod real , allocatable :: case9_B(:,:) real :: AofT(2) + ! case 20 -doubly periodic- idealized TC + real :: dp_TC = 1115. + real :: rp_TC = 100000. + real :: Ts_TC = 300. ! Validating fields used in statistics real , allocatable :: phi0(:,:,:) ! Validating Field @@ -182,7 +190,7 @@ module test_cases_mod public :: case9_forcing1, case9_forcing2, case51_forcing public :: init_double_periodic public :: checker_tracers - public :: radius, omega, small_earth_scale + public :: radius, omega, small_earth_scale, w_forcing INTERFACE mp_update_dwinds MODULE PROCEDURE mp_update_dwinds_2d @@ -634,7 +642,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1), zt, zdist real :: zvir - integer :: Cl, Cl2 + integer :: Cl, Cl2, itrac ! Super-Cell real :: us0 = 30. @@ -682,6 +690,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, integer :: is, ie, js, je integer :: isd, ied, jsd, jed + integer :: counter is = bd%is ie = bd%ie @@ -1703,7 +1712,8 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, delta_T = 480000.0 lapse_rate = 0.005 !$OMP parallel do default(none) shared(is,ie,js,je,npz,eta,ak,bk,T_0,lapse_rate,eta_t, & -!$OMP delta_T,ptop,delp,Ubar,eta_v,agrid,grid,pcen,pt,r0,radius,omega) & +!$OMP delta_T,ptop,delp,Ubar,eta_v,agrid,grid,pcen,pt,& +!$OMP r0,radius,omega,flagstruct) & !$OMP private(T_mean,press,pt1,pt2,pt3,pt4,pt5,pt6,pt7,pt8,pt9,p1,r) do z=1,npz eta(z) = 0.5*( (ak(z)+ak(z+1))/1.e5 + bk(z)+bk(z+1) ) @@ -1716,7 +1726,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do zz=1,z press = press + delp(is,js,zz) enddo - if (is_master()) write(*,230) z, eta(z), press/100., T_mean + if (is_master() .and. flagstruct%fv_debug) write(*,230) z, eta(z), press/100., T_mean do j=js,je do i=is,ie ! A-grid cell center: i,j @@ -1870,8 +1880,10 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! call get_vorticity(is, ie, js, je, isd, ied, jsd, jed, npz, u, v, q(is:ie,js:je,:,2)) ! call pv_entropy(is, ie, js, je, ng, npz, q(is:ie,js:je,:,2), f0, pt, pkz, delp, grav) - write(stdout(), *) 'PI:', pi - write(stdout(), *) 'PHIS:', mpp_chksum(phis(is:ie,js:je)) + if (flagstruct%fv_debug) then + write(stdout(), *) 'PI:', pi + write(stdout(), *) 'PHIS:', mpp_chksum(phis(is:ie,js:je)) + endif else if ( (test_case==-12) .or. (test_case==-13) ) then @@ -1880,7 +1892,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pk,peln,pe,pkz,gz,phis,ps,grid,agrid,hydrostatic, & nwat, adiabatic, test_case == -13, domain, bd) - write(stdout(), *) 'PHIS:', mpp_chksum(phis(is:ie,js:je)) + if (flagstruct%fv_debug) write(stdout(), *) 'PHIS:', mpp_chksum(phis(is:ie,js:je)) else if ( test_case==15 .or. test_case==19 ) then !------------------------------------ @@ -2533,7 +2545,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ak(k) = (((t00-height*gamma)/t00)**(1./exponent)-1.)/(px - 1.)*px*p00 bk(k) = (((t00-height*gamma)/t00)**(1./exponent)-px)/(1.-px) endif - if (is_master()) write(*,*) k, ak(k), bk(k), height, ak(k)+bk(k)*p00 + if (flagstruct%fv_debug .and. is_master()) write(*,*) k, ak(k), bk(k), height, ak(k)+bk(k)*p00 enddo ptop = ak(1) @@ -2681,7 +2693,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, vbar = vtmp - 4.25 endif - if( is_master() ) then + if( is_master() .and. flagstruct%fv_debug) then write(6,*) k, utmp, vtmp endif @@ -3332,7 +3344,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo endif - else if (test_case == 55 .or. test_case == 56 .or. test_case == 57) then + else if (test_case == 55 .or. test_case == 56 .or. test_case == 57 .or. test_case == 58) then !Tropical cyclone test case: DCMIP 5X @@ -3341,6 +3353,10 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !test_case 57 uses a globally-uniform f-plane + ! test_case 58 same as 57 with a second TC in the atlantic + ! to test mutiple moving nests + counter = 0 + ! Initialize surface Pressure !Vortex perturbation p0(1) = 180. * pi / 180. @@ -3356,12 +3372,18 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, p00 = 101500. ps = p00 + ps_u = p00 + ps_v = p00 + + 58 continue ! create second TC + + if (is_master() .and. test_case == 58) print*, 'INITIALIZING TC at: ', p0(1), p0(2) do j=js,je do i=is,ie p2(:) = agrid(i,j,1:2) r = great_circle_dist( p0, p2, radius ) - ps(i,j) = p00 - dp*exp(-(r/rp)**1.5) + ps(i,j) = ps(i,j) - dp*exp(-(r/rp)**1.5) phis(i,j) = 0. enddo enddo @@ -3394,14 +3416,14 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=is,ie+1 p2(:) = 0.5*(grid(i,j,1:2)+grid(i,j+1,1:2)) r = great_circle_dist( p0, p2, radius ) - ps_v(i,j) = p00 - dp*exp(-(r/rp)**1.5) + ps_v(i,j) = ps_v(i,j) - dp*exp(-(r/rp)**1.5) enddo enddo do j=js,je+1 do i=is,ie p2(:) = 0.5*(grid(i,j,1:2)+grid(i+1,j,1:2)) r = great_circle_dist( p0, p2, radius ) - ps_u(i,j) = p00 - dp*exp(-(r/rp)**1.5) + ps_u(i,j) = ps_u(i,j) - dp*exp(-(r/rp)**1.5) enddo enddo @@ -3434,18 +3456,24 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, zvir = rvgas/rdgas - 1. !endif - p0 = (/ pi, pi/18. /) + ! p0 = (/ pi, pi/18. /) + + if (counter ==0) then + exppr = 1.5 + exppz = 2. + gamma = 0.007 + Ts0 = 302.15 + q00 = 0.021 + t00 = Ts0*(1.+zvir*q00) + exponent = rdgas*gamma/grav + ztrop = 15000. + zp = 7000. + dp = 1115. + u=0. + v=0. + pt=1. + endif - exppr = 1.5 - exppz = 2. - gamma = 0.007 - Ts0 = 302.15 - q00 = 0.021 - t00 = Ts0*(1.+zvir*q00) - exponent = rdgas*gamma/grav - ztrop = 15000. - zp = 7000. - dp = 1115. cor = 2.*omega*sin(p0(2)) !Coriolis at vortex center !Initialize winds separately on the D-grid @@ -3476,7 +3504,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, vtmp = utmp*d2 utmp = utmp*d1 - v(i,j,k) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) + v(i,j,k) = v(i,j,k) + utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) endif enddo @@ -3509,7 +3537,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, vtmp = utmp*d2 utmp = utmp*d1 - u(i,j,k) = utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) + u(i,j,k) = u(i,j,k) + utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) endif enddo @@ -3535,13 +3563,26 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, q(i,j,k,1) = q00*exp(-height/zq1)*exp(-(height/zq2)**exppz) p2(:) = agrid(i,j,1:2) r = great_circle_dist( p0, p2, radius ) - pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & - /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) + if (counter == 0) then + pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & + /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) + elseif (counter > 0 .and. r < 4*rp) then ! find a cleaner way + pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & + /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) + endif end if enddo enddo enddo + if (test_case == 58 .and. counter==0) then + p0(1) = -30. * pi / 180. + p0(2) = 10. * pi / 180. + counter=1 + if (is_master()) print*, 'Initializing second TC' + goto 58 + endif + !Note that this is already the moist pressure do j=js,je do i=is,ie @@ -3616,6 +3657,55 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if (cl > 0 .and. cl2 > 0) then call terminator_tracers(is,ie,js,je,isd,ied,jsd,jed,npz, & q, delp,ncnst,agrid(isd:ied,jsd:jed,1),agrid(isd:ied,jsd:jed,2),bd) + endif + + itrac = get_tracer_index(MODEL_ATMOS, 'qdry') + if (itrac > 0) then + + do k=1,npz + do j=js,je + do i=is,ie + dum = 1. + do n=1,nwat + dum = dum - q(i,j,k,n) + enddo + q(i,j,k,itrac) = dum + enddo + enddo + enddo + + call mpp_update_domains(q,domain) + endif + + itrac = get_tracer_index(MODEL_ATMOS, 'qmoist') + if (itrac > 0) then + + do k=1,npz + do j=js,je + do i=is,ie + dum = 1. + do n=2,nwat + dum = dum - q(i,j,k,n) + enddo + q(i,j,k,itrac) = dum + enddo + enddo + enddo + + call mpp_update_domains(q,domain) + endif + + itrac = get_tracer_index(MODEL_ATMOS, 'qtotal') + if (itrac > 0) then + + do k=1,npz + do j=js,je + do i=is,ie + q(i,j,k,itrac) = 1. + enddo + enddo + enddo + call mpp_update_domains(q,domain) endif @@ -3623,8 +3713,6 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) - is_ideal_case = .true. - nullify(agrid) nullify(grid) @@ -4490,20 +4578,39 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, type(fv_flags_type), target :: flagstruct real, dimension(bd%is:bd%ie):: pm, qs - real, dimension(1:npz):: pk1, ts1, qs1 + real, dimension(1:npz):: ts1, qs1 + real, dimension(npz+1):: pk1, pe1 real :: us0 = 30. - real :: dist, r0, f0_const, prf, rgrav - real :: ptmp, ze, zc, zm, utmp, vtmp, xr, yr + real :: dist, dist0, r0, f0_const, prf, rgrav + real :: ptmp, ze, zc, zm, utmp, vtmp, xr, yr, tmp real :: t00, p00, xmax, xc, xx, yy, pk0, pturb, ztop real :: ze1(npz+1) - real:: dz1(npz) + real:: dz1(npz), qc1(npz), qv1(npz) real :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1) real:: zvir real :: sigma, mu, amp, zint, zmid, qsum, pint, pmid real :: N2, N2b, th0, ths, pks, rkap, ampb, thl - real :: dz, thp, pp, zt, p_t, pkp - integer :: o3mr + real :: dz, thp, pp, zt, p_t, pkp, dlogp, logpb, lcl, tl, qt + integer :: o3mr, liq_wat + +!Test case 20 + real, dimension(npz+1) :: pe0, gz0, ue, ve, we, pte, qe + real :: d, dp, cor, exppr, exppz, gamma, Ts0, q00, exponent, ztrop, height, zp, rp + real :: qtrop, ttrop, zq1, zq2, r + real :: dum, dum1, dum2, dum3, dum4, dum5, dum6, uetmp, vetmp + real :: pe_u(bd%is:bd%ie,npz+1,bd%js:bd%je+1) + real :: pe_v(bd%is:bd%ie+1,npz+1,bd%js:bd%je) + real :: ps_u(bd%is:bd%ie,bd%js:bd%je+1) + real :: ps_v(bd%is:bd%ie+1,bd%js:bd%je) + + real(kind=R_GRID) :: p1(2), p2(2), p3(2), p4(2) + real(kind=R_GRID) :: e1(3), e2(3), ex(3), ey(3) + integer :: z + real(kind=R_GRID) :: p0(2) ! Temporary Point + real :: d1, d2 + integer :: i, j, k, m, icenter, jcenter + real, parameter :: hlv = 2.5e6 ! gfs: latent heat of evaporation real, pointer, dimension(:,:,:) :: agrid, grid real(kind=R_GRID), pointer, dimension(:,:) :: area @@ -4636,27 +4743,6 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, call hydro_eq(npz, is, ie, js, je, ps, phis, dry_mass, & delp, ak, bk, pt, delz, area, ng, .false., hydrostatic, hybrid_z, domain) - ! *** Add Initial perturbation *** - if (bubble_do) then - r0 = 100.*sqrt(dx_const**2 + dy_const**2) - icenter = npx/2 - jcenter = npy/2 - - do j=js,je - do i=is,ie - dist = (i-icenter)*dx_const*(i-icenter)*dx_const & - +(j-jcenter)*dy_const*(j-jcenter)*dy_const - dist = min(r0, sqrt(dist)) - do k=1,npz - prf = ak(k) + ps(i,j)*bk(k) - if ( prf > 100.E2 ) then - pt(i,j,k) = pt(i,j,k) + 2.0*(1. - (dist/r0)) * prf/ps(i,j) -! pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) - endif - enddo - enddo - enddo - endif if ( hydrostatic ) then call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & @@ -4668,6 +4754,28 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, moist_phys, hydrostatic, nwat, domain, flagstruct%adiabatic, .true. ) endif + ! *** Add Initial perturbation *** + if (bubble_do) then + !r0 = 100.*sqrt(dx_const**2 + dy_const**2) + icenter = (npx-1)/2 + jcenter = (npy-1)/2 + zc = 1500. !1500 m center AGL + + do j=js,je + do i=is,ie + dist0 = (real(i-icenter)*dx_const/dt_rad)**2 + (real(j-jcenter)*dy_const/dt_rad)**2 + ze = 0. + do k=npz,1,-1 + zm = ze - 0.5*delz(i,j,k) ! layer center + ze = ze - delz(i,j,k) + dist = dist0 + ((zm-zc)/zc)**2 !1500 m height + dist = min(1.0,dist) + pt(i,j,k) = pt(i,j,k) + dt_amp*cos(0.5*pi*dist) + enddo + enddo + enddo + endif + q = 0. do k=1,npz do j=js,je @@ -4712,7 +4820,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, case ( 15 ) !--------------------------- -! Doubly periodic bubble +! Doubly periodic warm bubble !--------------------------- t00 = 250. @@ -4826,6 +4934,10 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, enddo enddo + call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & + pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & + moist_phys, .false., nwat, domain, flagstruct%adiabatic, .true.) + pturb = 15. xmax = 51.2E3 xc = xmax / 2. @@ -5011,13 +5123,13 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ! u-wind do j=js,je+1 do i=is,ie - u(i,j,k) = utmp - 8. + u(i,j,k) = utmp - 8. + Umean enddo enddo ! v-wind do j=js,je do i=is,ie+1 - v(i,j,k) = vtmp - 4. + v(i,j,k) = vtmp - 4. + Vmean enddo enddo enddo @@ -5128,7 +5240,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ! *** Add Initial perturbation (Gaussian) *** pturb = dt_amp - r0 = 10.e3 + r0 = dt_rad ! 10.e3 zc = 1.4e3 ! center of bubble from surface icenter = (npx-1)/2 + 1 jcenter = (npy-1)/2 + 1 @@ -5149,7 +5261,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ! *** Add Initial perturbation (Ellipse) *** pturb = dt_amp - r0 = 10.e3 + r0 = dt_rad ! 10.e3 zc = 1.4e3 ! center of bubble from surface icenter = (npx-1)/2 + 1 jcenter = (npy-1)/2 + 1 @@ -5158,7 +5270,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ptmp = ( (zm-zc)/zc ) **2 do j=js,je do i=is,ie - dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/r0)**2 + dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/(r0*8))**2 pt(i,j,k) = pt(i,j,k) + pturb*max(1.-sqrt(dist),0.) enddo enddo @@ -5166,6 +5278,246 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, endif + + + case ( 20 ) +!--------------------------------------------------------- +! Tropical cyclone +! adapted from case 55 - Joseph M. +!--------------------------------------------------------- + + !p0(1) = (0.) * pi / 180. + p0(1) = (-50.) * pi / 180. !weird physics IC (tsc) when this is around 0 + p0(2) = (flagstruct%deglat) * pi / 180. + + !original + !dp = 1115. + !rp = 100000. + + dp = dp_TC + rp = rp_TC + Ts0 = Ts_TC + + if (is_master()) print*, "Initializing TC (dp,rp):", dp, rp, & + "in a doubly periodic domain at: lon/lat (deg)", p0(1) * 180./pi, p0(2)*180. /pi + + p00 = 101500. + + ps = p00 + + do j=js,je + do i=is,ie + p2(:) = agrid(i,j,1:2) + r = great_circle_dist( p0, p2, radius ) + ps(i,j) = p00 - dp*exp(-(r/rp)**1.5) + phis(i,j) = 0. + enddo + enddo + + call prt_maxmin('PS', ps(is:ie,js:je), is, ie, js, je, 0, 1, 0.01) + + ! Initialize delta-P + do z=1,npz + do j=js,je + do i=is,ie + delp(i,j,z) = ak(z+1)-ak(z) + ps(i,j)*(bk(z+1)-bk(z)) + enddo + enddo + enddo + + !Pressure + do j=js,je + do i=is,ie + pe(i,1,j) = ptop + enddo + do k=2,npz+1 + do i=is,ie + pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) + enddo + enddo + enddo + + !Pressure on v-grid and u-grid points + do j=js,je + do i=is,ie+1 + p2(:) = 0.5*(grid(i,j,1:2)+grid(i,j+1,1:2)) + r = great_circle_dist( p0, p2, radius ) + ps_v(i,j) = p00 - dp*exp(-(r/rp)**1.5) + enddo + enddo + do j=js,je+1 + do i=is,ie + p2(:) = 0.5*(grid(i,j,1:2)+grid(i+1,j,1:2)) + r = great_circle_dist( p0, p2, radius ) + ps_u(i,j) = p00 - dp*exp(-(r/rp)**1.5) + enddo + enddo + + !Pressure + do j=js,je + do i=is,ie+1 + pe_v(i,1,j) = ptop + enddo + do k=2,npz+1 + do i=is,ie+1 + pe_v(i,k,j) = ak(k) + ps_v(i,j)*bk(k) + enddo + enddo + enddo + do j=js,je+1 + do i=is,ie + pe_u(i,1,j) = ptop + enddo + do k=2,npz+1 + do i=is,ie + pe_u(i,k,j) = ak(k) + ps_u(i,j)*bk(k) + enddo + enddo + enddo + + !Everything else + !if (adiabatic) then + ! zvir = 0. + !else + zvir = rvgas/rdgas - 1. + !endif + + ! Use p0 from above + !p0 = (/ pi, pi/18. /) + + exppr = 1.5 + exppz = 2. + gamma = 0.007 + !Ts0 = 302.15 + q00 = 0.021 + t00 = Ts0*(1.+zvir*q00) + exponent = rdgas*gamma/grav + ztrop = 15000. + zp = 7000. + cor = 2.*omega*sin(p0(2)) !Coriolis at vortex center + + !Initialize winds separately on the D-grid + do j=js,je + do i=is,ie+1 + p1(:) = grid(i ,j ,1:2) + p2(:) = grid(i,j+1 ,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + + d1 = sin(p0(2))*cos(p3(2)) - cos(p0(2))*sin(p3(2))*cos(p3(1)-p0(1)) + d2 = cos(p0(2))*sin(p3(1)-p0(1)) + d = max(1.e-15,sqrt(d1**2+d2**2)) + + r = great_circle_dist( p0, p3, radius ) + + do k=1,npz + ptmp = 0.5*(pe_v(i,k,j)+pe_v(i,k+1,j)) + height = (t00/gamma)*(1.-(ptmp/ps_v(i,j))**exponent) + if (height > ztrop) then + v(i,j,k) = 0. + else + utmp = 1.d0/d*(-cor*r/2.d0+sqrt((cor*r/2.d0)**(2.d0) & + - exppr*(r/rp)**exppr*rdgas*(t00-gamma*height) & + /(exppz*height*rdgas*(t00-gamma*height)/(grav*zp**exppz) & + +(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz))))) + vtmp = utmp*d2 + utmp = utmp*d1 + + v(i,j,k) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) + + endif + enddo + enddo + enddo + do j=js,je+1 + do i=is,ie + p1(:) = grid(i, j,1:2) + p2(:) = grid(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + + d1 = sin(p0(2))*cos(p3(2)) - cos(p0(2))*sin(p3(2))*cos(p3(1)-p0(1)) + d2 = cos(p0(2))*sin(p3(1)-p0(1)) + d = max(1.e-15,sqrt(d1**2+d2**2)) + + r = great_circle_dist( p0, p3, radius ) + + do k=1,npz + ptmp = 0.5*(pe_u(i,k,j)+pe_u(i,k+1,j)) + height = (t00/gamma)*(1.-(ptmp/ps_u(i,j))**exponent) + if (height > ztrop) then + v(i,j,k) = 0. + else + utmp = 1.d0/d*(-cor*r/2.d0+sqrt((cor*r/2.d0)**(2.d0) & + - exppr*(r/rp)**exppr*rdgas*(t00-gamma*height) & + /(exppz*height*rdgas*(t00-gamma*height)/(grav*zp**exppz) & + +(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz))))) + vtmp = utmp*d2 + utmp = utmp*d1 + + u(i,j,k) = utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) + endif + enddo + + enddo + enddo + + qtrop = 1.e-11 + ttrop = t00 - gamma*ztrop + zq1 = 3000. + zq2 = 8000. + + q(:,:,:,:) = 0. + + do k=1,npz + do j=js,je + do i=is,ie + ptmp = 0.5*(pe(i,k,j)+pe(i,k+1,j)) + height = (t00/gamma)*(1.-(ptmp/ps(i,j))**exponent) + if (height > ztrop) then + q(i,j,k,1) = qtrop + pt(i,j,k) = Ttrop + else + q(i,j,k,1) = q00*exp(-height/zq1)*exp(-(height/zq2)**exppz) + p2(:) = agrid(i,j,1:2) + r = great_circle_dist( p0, p2, radius ) + pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & + /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) + end if + enddo + enddo + enddo + + + !Note that this is already the moist pressure + do j=js,je + do i=is,ie + ps(i,j) = pe(i,npz+1,j) + enddo + enddo + + if (.not.hydrostatic) then + do k=1,npz + do j=js,je + do i=is,ie + delz(i,j,k) = rdgas*pt(i,j,k)*(1.+zvir*q(i,j,k,1))/grav*log(pe(i,k,j)/pe(i,k+1,j)) + w(i,j,k) = 0.0 + enddo + enddo + enddo + endif + + call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng, bd) + + call prt_maxmin('PS end', ps(is:ie,js:je), is, ie, js, je, 0, 1, 0.01) + call prt_maxmin('Delz', delz(is:ie,js:je,:), is, ie, js, je, 0, 1, 0.01) + + call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & + pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & + .true., hydrostatic, nwat, domain, flagstruct%adiabatic) + case ( 21 ) !--------------------------------------------------------- ! Mountain wave @@ -5297,8 +5649,8 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, pks = pk0 + amp*(1./ths - 1./th0) p_t = exp(1./kappa*log(pks)) - if (is_master()) write(*,'(I, 2F)') npz+1, ak(npz+1), bk(npz+1) - if (is_master()) write(*,'(2F)') ths*pk0, p_t + if (is_master()) write(*,'(I3, 2F11.3)') npz+1, ak(npz+1), bk(npz+1) + if (is_master()) write(*,'(2F11.3)') ths*pk0, p_t do k=npz,1,-1 ze = ze+dz @@ -5319,7 +5671,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, endif thp = ths pkp = pks - if (is_master()) write(*,'(I, 5F)') k, ak(k), bk(k), ak(k+1)-ak(k) + p00*(bk(k+1)-bk(k)), ths*pk0, pp + if (is_master()) write(*,'(I3, 5F11.3)') k, ak(k), bk(k), ak(k+1)-ak(k) + p00*(bk(k+1)-bk(k)), ths*pk0, pp enddo @@ -5527,9 +5879,226 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, enddo enddo + case ( 102 ) + + !No topography (simpler) + + t00 = 265. + N2 = 0.0 + N2b = 0.01**2 + p00 = 1.e5 + pk0 = exp(kappa*log(p00)) + th0 = t00/pk0 + !NOTE since lowest 100 m is well mixed amp is different + amp = -grav/(cp_air*th0) + ampb = grav*grav/(cp_air*N2b) + rkap = 1./kappa + + !Uniform 5 m grid spacing up to 300 m + dz = 5. + ze = 0.0 + zt = 300. + thp = th0 + pkp = pk0 + ak(npz+1) = 0.0 + bk(npz+1) = 1.0 + if (is_master()) print*, 'SBL Test case (102)' + if (is_master()) write(*,'(I3, 2F11.3)') npz+1, ak(npz+1), bk(npz+1) + ze1(npz+1) = ze + pk1(npz+1) = pk0 + pe1(npz+1) = p00 + + do k=npz,1,-1 + ze = ze+dz + ze1(k) = ze + if (ze >= 100.) then + ths = thp*exp(dz*N2b/grav) + pks = pkp + ampb*(1./ths - 1./thp) + else + ths = thp*exp(dz*N2/grav) + pks = pkp + amp*dz + endif + pk1(k) = pks + ts1(k) = ths*pks + pp = exp(1./kappa*log(pks)) + pe1(k) = pp + if (ze >= zt) then + ak(k) = pp + bk(k) = 0.0 + else + bk(k) = ((zt-ze)/zt)**2 + ak(k) = pp - bk(k)*p00 + endif + thp = ths + pkp = pks + + if (is_master()) write(*,'(I3, 6(2x,F11.3))') k, ak(k), bk(k), ak(k+1)-ak(k) + p00*(bk(k+1)-bk(k)), ths*pks, pp, ze1(k) + + enddo + + call mpp_sync() + + + phis = 0. + u = 8. !m/s + v = 0. + w = 0. + + do j=js,je + do i=is,ie + ps(i,j) = p00 + pe(i,npz+1,j) = p00 + pk(i,j,npz+1) = pk0 + peln(i,npz+1,j) = log(p00) + enddo + enddo + + do k=npz,1,-1 + do j=js,je + do i=is,ie + peln(i,k,j) = log(pe1(k)) + delp(i,j,k) = pe1(k+1) - pe1(k) + delz(i,j,k) = ze1(k+1) - ze1(k) + pe(i,k,j) = pe1(k) + pk(i,j,k) = pk1(k) + pkz(i,j,k) = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) + pkz(i,j,k) = exp(kappa*log(pkz(i,j,k))) + pt(i,j,k) = ts1(k) + enddo + enddo + enddo + ptop = ak(1) + + call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & + pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & + moist_phys, hydrostatic, nwat, domain, flagstruct%adiabatic, .not. hydrostatic ) + + case ( 103 ) !DYCOMS II SCu + zvir = rvgas/rdgas - 1. + + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') + liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') + if (liq_wat <= 0) call mpp_error(FATAL, "liq_wat not defined") + + !No topography (simpler) + + t00 = 289. + !N2 = 0.0 + !N2b = 0.01**2 + p00 = 101780. + pk0 = exp(kappa*log(p00)) + th0 = t00/pk0 + + !Variable grid spacing: 10 m near surface, 5m near cloud top + dz = 10. + ze = 0. + zt = 840. !cloud top + thp = th0 + pkp = pk0 + ak(npz+1) = 0.0 + bk(npz+1) = 1.0 + if (is_master()) print*, 'DYCOMS II SCu Test case (103)' + if (is_master()) write(*,'(I3, 2F11.3)') npz+1, ak(npz+1), bk(npz+1) + ze1(npz+1) = ze + pk1(npz+1) = pk0 + pe1(npz+1) = p00 + + do k=npz,1,-1 + dz1(k) = 10.-5.*sin(pi*ze/1600.)**8 + ze = ze+dz1(k) + ze1(k) = ze + enddo + + do k=1,npz + zmid = 0.5*(ze1(k)+ze1(k+1)) + lcl = 600. + !Temperature, moisture is in z-coordinates + if ( zmid > zt) then + tl = 297.5 + exp(log(zmid-zt)*1./3.) + qt = 1.5e-3 + qc1(k) = 0.0 + else + tl = t00 + qt = 9.e-3 + qc1(k) = max(0.0,0.45e-3*(zmid-lcl)/200.) + endif + qv1(k)= qt - qc1(k) + + !Liquid water temperature + !new constants! + ts1(k) = tl + (hlv*qc1(k) - grav*zmid)/cp_air + if (is_master()) write(*,'(I3, 4(2x,F11.3))') k, ts1(k), qv1(k), ze1(k), qc1(k) + enddo + + !Compute pressure, integrating upward + pp = p00 + do k=npz,1,-1 + dlogp = dz1(k)*grav/(Rdgas * ts1(k)*(1.+zvir*qv1(k))) + logpb = log(pp) + pp = exp(logpb - dlogp) + + if (ze1(k) >= zt) then + ak(k) = pp + bk(k) = 0.0 + else + bk(k) = ((zt-ze1(k))/zt)**2 + ak(k) = pp - bk(k)*p00 + endif + + pe1(k) = pp + pk1(k) = exp(kappa*log(pp)) + + if (is_master()) write(*,'(I3, 4(2x,F11.3))') k, ak(k), bk(k), pp, ze1(k) + + enddo + + call mpp_sync() + + + phis = 0. + u = 7. !m/s + v = -5.5 + w = 0. + + do j=js,je + do i=is,ie + ps(i,j) = p00 + pe(i,npz+1,j) = p00 + pk(i,j,npz+1) = pk0 + peln(i,npz+1,j) = log(p00) + enddo + enddo + + do k=npz,1,-1 + do j=js,je + do i=is,ie + peln(i,k,j) = log(pe1(k)) + delp(i,j,k) = pe1(k+1) - pe1(k) + delz(i,j,k) = ze1(k+1) - ze1(k) + pe(i,k,j) = pe1(k) + pk(i,j,k) = pk1(k) + pkz(i,j,k) = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) + pkz(i,j,k) = exp(kappa*log(pkz(i,j,k))) + pt(i,j,k) = ts1(k) + q(i,j,k,sphum) = qv1(k) + q(i,j,k,liq_wat) = qc1(k) + enddo + enddo + enddo + ptop = ak(1) + + call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & + pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & + moist_phys, hydrostatic, nwat, domain, flagstruct%adiabatic, .not. hydrostatic ) + + end select - is_ideal_case = .true. + if (w_forcing) then + call init_w_forcing(bd, npx, npy, npz, flagstruct%grid_type, agrid, flagstruct) + endif + + flagstruct%is_ideal_case = .true. nullify(grid) nullify(agrid) @@ -5575,7 +6144,8 @@ subroutine read_namelist_test_case_nml() integer :: ierr, f_unit, unit, ios namelist /test_case_nml/test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size, & - no_wind, gaussian_dt, dt_amp, do_marine_sounding, checker_tr, small_earth_scale, Umean + no_wind, gaussian_dt, dt_amp, dt_rad, do_marine_sounding, checker_tr, small_earth_scale, & + Umean, Vmean, w_forcing, dp_TC, rp_TC, Ts_TC #include @@ -6111,21 +6681,11 @@ subroutine SuperCell_Sounding_Marine(km, ps, pk1, tp, qp) ! if ( (is_master()) ) write(*,*) k, temp1, rh(k) if ( pk(k) > 0. ) then pp(k) = exp(log(pk(k))/kappa) -!#ifdef SUPER_K qs(k) = 380./pp(k)*exp(17.27*(temp1-273.)/(temp1-36.)) qs(k) = min( qv0, rh(k)*qs(k) ) - if ( (is_master()) ) write(*,*) 0.01*pp(k), qs(k) -!#else -! -!#ifdef USE_MIXED_TABLE -! qs(k) = min(qv0, rh(k)*mqs(temp1, pp(k), qs(k))) -!#else -! qs(k) = min(qv0, rh(k)*wqs(temp1, pp(k), qs(k))) -!#endif -! -!#endif + !if ( (is_master()) .and. n=1 ) write(*,*) 0.01*pp(k), qs(k) else - if ( (is_master()) ) write(*,*) n, k, pk(k) + !if ( (is_master()) ) write(*,*) n, k, pk(k) call mpp_error(FATAL, 'Super-Cell case: pk < 0') endif enddo @@ -6241,7 +6801,7 @@ subroutine Marine_Sounding(km, ps, pk1, tp, qp) #ifdef SUPER_K qs(k) = 380./pp(k)*exp(17.27*(temp1-273.)/(temp1-36.)) qs(k) = min( qv0, rh(k)*qs(k) ) - if ( (is_master()) ) write(*,*) 0.01*pp(k), qs(k) + !if ( (is_master()) ) write(*,*) 0.01*pp(k), qs(k) #else #ifdef USE_MIXED_TABLE @@ -6252,7 +6812,7 @@ subroutine Marine_Sounding(km, ps, pk1, tp, qp) #endif else - if ( (is_master()) ) write(*,*) n, k, pk(k) + !if ( (is_master()) ) write(*,*) n, k, pk(k) call mpp_error(FATAL, 'Super-Cell case: pk < 0') endif enddo @@ -7978,6 +8538,4 @@ subroutine sm1_edge(is, ie, js, je, km, i, j, ze, ntimes) end subroutine sm1_edge - - end module test_cases_mod diff --git a/tools/w_forcing.F90 b/tools/w_forcing.F90 new file mode 100644 index 000000000..a60b3c7e0 --- /dev/null +++ b/tools/w_forcing.F90 @@ -0,0 +1,180 @@ + +module w_forcing_mod + + use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_grid_bounds_type, R_GRID + use mpp_domains_mod, only: mpp_update_domains, domain2d + use mpp_mod, only: mpp_error, FATAL, mpp_root_pe, mpp_broadcast, mpp_sum, mpp_sync + use fv_mp_mod, only: is_master + implicit none + public + + !settings + integer :: w_forcing_type = 101 + real :: w_forcing_L = 40000. !m + real :: w_forcing_R = 2000. !m --- ?!? + real :: w_forcing_D = 4000. !m, depth + real :: w_forcing_H = 0. + real :: w_forcing_start = 0.0 !s + real :: w_forcing_end = -1. !2400.0 !s + real :: w_forcing_a = 2.0 !acceleration, m/s**2 + + real :: w_forcing_Divg = 3.75e-6 !1/s + real :: w_forcing_tau = 3600. !s + + !saved data + real :: w_forcing_i0 + real :: w_forcing_j0 + + +contains + + subroutine init_w_forcing(bd, npx, npy, npz, grid_type, agrid, flagstruct)!, wft) + + type(fv_grid_bounds_type), intent(IN) :: bd + real , intent(IN) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed) + integer,intent(IN) :: npx, npy, npz, grid_type!, wft + type(fv_flags_type), target, intent(IN) :: flagstruct + + !w_forcing_type = wft + + if (grid_type == 4) then + + select case (w_forcing_type) + case(1) ! half-ellipse acceleration (Ziegler et al., 2010; Prein et al. 2021) + w_forcing_i0 = real(npx-1)*0.5 + w_forcing_j0 = real(npy-1)*0.5 + case default + + end select + + endif + + if (is_master()) print*, ' CALLING INIT_W_FORCING ', w_forcing_type, w_forcing_i0, w_forcing_j0 + + end subroutine init_w_forcing + + subroutine do_w_forcing(bd, npx, npy, npz, w, delz, phis, grid_type, agrid, domain, flagstruct, dt, time) + + implicit none + + type(fv_grid_bounds_type), intent(IN) :: bd + real , intent(INOUT) :: w(bd%isd:, bd%jsd:,1:) + real , intent(IN) :: delz(bd%is: , bd%js: ,1:) + real , intent(IN) :: phis(bd%isd:bd%ied, bd%jsd:bd%jed) + real , intent(IN) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2) + integer,intent(IN) :: npx, npy, npz, grid_type + real , intent(IN) :: dt, time + type(fv_flags_type), target, intent(IN) :: flagstruct + type(domain2d), intent(INOUT) :: domain + + real :: Htop(bd%is:bd%ie,bd%js:bd%je) !height at the top of the current layer + real :: rad,radm1,ht,xL,wls,forc,dttau,lev + + integer :: i,j,k + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + logical, SAVE :: first_time = .true. + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (grid_type < 4) then + call mpp_error(FATAL, "Not implemented for grid_type < 4 yet.") + endif + + if (w_forcing_end > 0) then + if (time < w_forcing_start .or. time > w_forcing_end) return + endif + + if (first_time .and. is_master()) print*, ' CALLING DO_W_FORCING ' + + if (grid_type == 4) then + + select case (w_forcing_type) + case(1) + + + do j=js,je + do i=is,ie + Htop(i,j) = phis(i,j) !Height above MSL + enddo + enddo + do k=npz,1,-1 + do j=js,je + do i=is,ie + Htop(i,j) = Htop(i,j) - delz(i,j,k) + + xL = abs(i-w_forcing_i0)*flagstruct%dx_const + if (xL <= w_forcing_L) then + rad = (j-w_forcing_j0)*flagstruct%dx_const + rad = rad*rad/(w_forcing_R*w_forcing_R) + ht = Htop(i,j) + 0.5*delz(i,j,k) - w_forcing_H + rad = rad + ht*ht/(w_forcing_D*w_forcing_D) + radm1 = max(1.-sqrt(rad),0.) + w(i,j,k) = w(i,j,k) + w_forcing_a*radm1*radm1*dt + endif + + enddo + enddo + enddo + + case(101) + !PBL simulations with specified divergence + !Nudging domain to w = Dz + !do not apply in sponge layer + + dttau=dt/w_forcing_tau + forc = 1./(1.+dttau) + do j=js,je + do i=is,ie + Htop(i,j) = -delz(i,j,npz)*0.5 + wls = -w_forcing_Divg*Htop(i,j) + w(i,j,npz) = (w(i,j,npz) + dttau*wls)*forc + enddo + enddo + do k=npz-1,3,-1 + do j=js,je + do i=is,ie + Htop(i,j) = Htop(i,j) - 0.5*(delz(i,j,k-1)+delz(i,j,k)) + wls = -w_forcing_Divg*Htop(i,j) + w(i,j,k) = (w(i,j,k) + dttau*wls)*forc + enddo + enddo + enddo + + if (first_time .and. is_master()) then + i=is + j=js + lev=-delz(i,j,npz)*0.5 + wls = -w_forcing_Divg*lev + print*, npz, wls, w(i,j,npz), dttau + do k=npz,3,-1 + lev = lev - 0.5*(delz(i,j,k-1)+delz(i,j,k)) + wls = -w_forcing_divg*lev + print*, k, wls, w(i,j,k) + enddo + endif + + case default + call mpp_error(FATAL, " Value of w_forcing_type not implemented.") + + end select + + end if + + call mpp_update_domains(w, domain) + + first_time = .false. + + end subroutine do_w_forcing + + +end module w_forcing_mod