diff --git a/Externals.cfg b/Externals.cfg
index a17f8e2ec6..d36ba61489 100644
--- a/Externals.cfg
+++ b/Externals.cfg
@@ -98,4 +98,4 @@ tag = v1.0.8
required = False
[externals_description]
-schema_version = 1.0.0
+schema_version = 1.0.0
\ No newline at end of file
diff --git a/bld/CLMBuildNamelist.pm b/bld/CLMBuildNamelist.pm
index dae7b5f7f0..9b0af55452 100755
--- a/bld/CLMBuildNamelist.pm
+++ b/bld/CLMBuildNamelist.pm
@@ -1577,6 +1577,7 @@ sub process_namelist_inline_logic {
setup_logic_glacier($opts, $nl_flags, $definition, $defaults, $nl, $envxml_ref);
setup_logic_dynamic_plant_nitrogen_alloc($opts, $nl_flags, $definition, $defaults, $nl, $physv);
setup_logic_luna($opts, $nl_flags, $definition, $defaults, $nl, $physv);
+ setup_logic_hillslope($opts, $nl_flags, $definition, $defaults, $nl);
setup_logic_o3_veg_stress_method($opts, $nl_flags, $definition, $defaults, $nl,$physv);
setup_logic_hydrstress($opts, $nl_flags, $definition, $defaults, $nl);
setup_logic_dynamic_roots($opts, $nl_flags, $definition, $defaults, $nl, $physv);
@@ -2706,6 +2707,8 @@ sub setup_logic_do_transient_pfts {
$cannot_be_true = "$var cannot be combined with use_cndv";
} elsif (&value_is_true($nl->get_value('use_fates'))) {
$cannot_be_true = "$var cannot be combined with use_fates";
+ } elsif (&value_is_true($nl->get_value('use_hillslope'))) {
+ $cannot_be_true = "$var cannot be combined with use_hillslope";
}
if ($cannot_be_true) {
@@ -2781,6 +2784,8 @@ sub setup_logic_do_transient_crops {
# do_transient_crops. However, this hasn't been tested, so to be safe,
# we are not allowing this combination for now.
$cannot_be_true = "$var has not been tested with FATES, so for now these two options cannot be combined";
+ } elsif (&value_is_true($nl->get_value('use_hillslope'))) {
+ $cannot_be_true = "$var cannot be combined with use_hillslope";
}
if ($cannot_be_true) {
@@ -2876,6 +2881,8 @@ sub setup_logic_do_transient_lakes {
if (&value_is_true($nl->get_value($var))) {
if (&value_is_true($nl->get_value('collapse_urban'))) {
$log->fatal_error("$var cannot be combined with collapse_urban");
+ } elsif (&value_is_true($nl->get_value('use_hillslope'))) {
+ $log->fatal_error("$var cannot be combined with use_hillslope");
}
if ($n_dom_pfts > 0 || $n_dom_landunits > 0 || $toosmall_soil > 0 || $toosmall_crop > 0 || $toosmall_glacier > 0 || $toosmall_lake > 0 || $toosmall_wetland > 0 || $toosmall_urban > 0) {
$log->fatal_error("$var cannot be combined with any of the of the following > 0: n_dom_pfts > 0, n_dom_landunit > 0, toosmall_soil > 0._r8, toosmall_crop > 0._r8, toosmall_glacier > 0._r8, toosmall_lake > 0._r8, toosmall_wetland > 0._r8, toosmall_urban > 0._r8");
@@ -2939,6 +2946,8 @@ sub setup_logic_do_transient_urban {
if (&value_is_true($nl->get_value($var))) {
if (&value_is_true($nl->get_value('collapse_urban'))) {
$log->fatal_error("$var cannot be combined with collapse_urban");
+ } elsif (&value_is_true($nl->get_value('use_hillslope'))) {
+ $log->fatal_error("$var cannot be combined with use_hillslope");
}
if ($n_dom_pfts > 0 || $n_dom_landunits > 0 || $toosmall_soil > 0 || $toosmall_crop > 0 || $toosmall_glacier > 0 || $toosmall_lake > 0 || $toosmall_wetland > 0 || $toosmall_urban > 0) {
$log->fatal_error("$var cannot be combined with any of the of the following > 0: n_dom_pfts > 0, n_dom_landunit > 0, toosmall_soil > 0._r8, toosmall_crop > 0._r8, toosmall_glacier > 0._r8, toosmall_lake > 0._r8, toosmall_wetland > 0._r8, toosmall_urban > 0._r8");
@@ -3268,12 +3277,8 @@ sub setup_logic_hydrology_switches {
add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'use_subgrid_fluxes');
add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'snow_cover_fraction_method');
my $subgrid = $nl->get_value('use_subgrid_fluxes' );
- my $origflag = $nl->get_value('origflag' );
my $h2osfcflag = $nl->get_value('h2osfcflag' );
my $scf_method = $nl->get_value('snow_cover_fraction_method');
- if ( $origflag == 1 && &value_is_true($subgrid) ) {
- $log->fatal_error("if origflag is ON, use_subgrid_fluxes can NOT also be on!");
- }
if ( $h2osfcflag == 1 && ! &value_is_true($subgrid) ) {
$log->fatal_error("if h2osfcflag is ON, use_subgrid_fluxes can NOT be off!");
}
@@ -3297,9 +3302,6 @@ sub setup_logic_hydrology_switches {
if ( defined($use_vic) && defined($lower) && (&value_is_true($use_vic)) && $lower != 3 && $lower != 4) {
$log->fatal_error( "If use_vichydro is on -- lower_boundary_condition can only be table or aquifer" );
}
- if ( defined($origflag) && defined($use_vic) && (&value_is_true($use_vic)) && $origflag == 1 ) {
- $log->fatal_error( "If use_vichydro is on -- origflag can NOT be equal to 1" );
- }
if ( defined($h2osfcflag) && defined($lower) && $h2osfcflag == 0 && $lower != 4 ) {
$log->fatal_error( "If h2osfcflag is 0 lower_boundary_condition can only be aquifer" );
}
@@ -3481,6 +3483,28 @@ sub setup_logic_luna {
#-------------------------------------------------------------------------------
+sub setup_logic_hillslope {
+ #
+ # Hillslope model
+ #
+ my ($opts, $nl_flags, $definition, $defaults, $nl) = @_;
+
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'use_hillslope' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'downscale_hillslope_meteorology' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'hillslope_head_gradient_method' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'hillslope_transmissivity_method' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'hillslope_pft_distribution_method' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'hillslope_soil_profile_method' );
+ add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'use_hillslope_routing', 'use_hillslope'=>$nl_flags->{'use_hillslope'} );
+ my $use_hillslope = $nl->get_value('use_hillslope');
+ my $use_hillslope_routing = $nl->get_value('use_hillslope_routing');
+ if ( (! &value_is_true($use_hillslope)) && &value_is_true($use_hillslope_routing) ) {
+ $log->fatal_error("Cannot turn on use_hillslope_routing when use_hillslope is off\n" );
+ }
+}
+
+#-------------------------------------------------------------------------------
+
sub setup_logic_hydrstress {
#
# Plant hydraulic stress model
@@ -4209,7 +4233,6 @@ sub setup_logic_soil_resis {
add_default($opts, $nl_flags->{'inputdata_rootdir'}, $definition, $defaults, $nl, 'soil_resis_method' );
}
-#-------------------------------------------------------------------------------
sub setup_logic_canopyfluxes {
#
@@ -4585,6 +4608,7 @@ sub write_output_files {
# CLM component
my @groups;
+
@groups = qw(clm_inparm ndepdyn_nml popd_streams urbantv_streams light_streams
soil_moisture_streams lai_streams atm2lnd_inparm lnd2atm_inparm clm_canopyhydrology_inparm cnphenology
cropcal_streams
@@ -4594,7 +4618,7 @@ sub write_output_files {
soilhydrology_inparm luna friction_velocity mineral_nitrogen_dynamics
soilwater_movement_inparm rooting_profile_inparm
soil_resis_inparm bgc_shared canopyfluxes_inparm aerosol
- clmu_inparm clm_soilstate_inparm clm_nitrogen clm_snowhydrology_inparm
+ clmu_inparm clm_soilstate_inparm clm_nitrogen clm_snowhydrology_inparm hillslope_hydrology_inparm hillslope_properties_inparm
cnprecision_inparm clm_glacier_behavior crop_inparm irrigation_inparm
surfacealbedo_inparm water_tracers_inparm tillage_inparm);
diff --git a/bld/namelist_files/namelist_defaults_ctsm.xml b/bld/namelist_files/namelist_defaults_ctsm.xml
index d3b3cc9715..3ef6615b73 100644
--- a/bld/namelist_files/namelist_defaults_ctsm.xml
+++ b/bld/namelist_files/namelist_defaults_ctsm.xml
@@ -611,6 +611,18 @@ attributes from the config_cache.xml file (with keys converted to upper-case).
-6.d+2
-6.d+1
+
+
+.false.
+.false.
+.false.
+.false.
+Darcy
+LayerSum
+Standard
+Uniform
+.true.
+
.false.
.true.
diff --git a/bld/namelist_files/namelist_definition_ctsm.xml b/bld/namelist_files/namelist_definition_ctsm.xml
index 3e3735b903..0469af2344 100644
--- a/bld/namelist_files/namelist_definition_ctsm.xml
+++ b/bld/namelist_files/namelist_definition_ctsm.xml
@@ -800,6 +800,41 @@ LUNA operates on C3 and non-crop vegetation (see vcmax_opt for how other veg is
LUNA: Leaf Utilization of Nitrogen for Assimilation
+
+Toggle to turn on the hillslope model
+
+
+
+Toggle to turn on meteorological downscaling in hillslope model
+
+
+
+Toggle to turn on surface water routing in the hillslope hydrology model
+
+
+
+Method for calculating hillslope saturated head gradient
+
+
+
+Method for calculating transmissivity of hillslope columns
+
+
+
+Method for distributing pfts across hillslope columns
+
+
+
+Method for distributing soil thickness across hillslope columns
+
+
Toggle to turn on the plant hydraulic stress model
@@ -2499,12 +2534,6 @@ If surface water is active or not
(deprecated -- will be removed)
-
-Use original CLM4 soil hydraulic properties
-(deprecated -- will be removed)
-
-
diff --git a/bld/unit_testers/build-namelist_test.pl b/bld/unit_testers/build-namelist_test.pl
index 9b579dd9ce..58b3056ef8 100755
--- a/bld/unit_testers/build-namelist_test.pl
+++ b/bld/unit_testers/build-namelist_test.pl
@@ -811,21 +811,6 @@ sub cat_and_create_namelistinfile {
GLC_TWO_WAY_COUPLING=>"FALSE",
phys=>"clm4_5",
},
- "-vic with origflag=1" =>{ options=>"-vichydro -envxml_dir .",
- namelst=>"origflag=1",
- GLC_TWO_WAY_COUPLING=>"FALSE",
- phys=>"clm4_5",
- },
- "l_bnd=flux with origflag=0"=>{ options=>"-envxml_dir .",
- namelst=>"origflag=0, lower_boundary_condition=1",
- GLC_TWO_WAY_COUPLING=>"FALSE",
- phys=>"clm4_5",
- },
- "l_bnd=zflux with origflag=0"=>{ options=>"-envxml_dir .",
- namelst=>"origflag=0, lower_boundary_condition=2",
- GLC_TWO_WAY_COUPLING=>"FALSE",
- phys=>"clm4_5",
- },
"bedrock with l_bnc=flux" =>{ options=>"-envxml_dir .",
namelst=>"use_bedrock=.true., lower_boundary_condition=1",
GLC_TWO_WAY_COUPLING=>"FALSE",
diff --git a/cime_config/testdefs/testlist_clm.xml b/cime_config/testdefs/testlist_clm.xml
index 4761a2111f..c3341952bb 100644
--- a/cime_config/testdefs/testlist_clm.xml
+++ b/cime_config/testdefs/testlist_clm.xml
@@ -3308,8 +3308,8 @@
-
-
+
+
@@ -3451,4 +3451,45 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cime_config/testdefs/testmods_dirs/clm/Hillslope/include_user_mods b/cime_config/testdefs/testmods_dirs/clm/Hillslope/include_user_mods
new file mode 100644
index 0000000000..fe0e18cf88
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/Hillslope/include_user_mods
@@ -0,0 +1 @@
+../default
diff --git a/cime_config/testdefs/testmods_dirs/clm/Hillslope/shell_commands b/cime_config/testdefs/testmods_dirs/clm/Hillslope/shell_commands
new file mode 100644
index 0000000000..6f3602d2e6
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/Hillslope/shell_commands
@@ -0,0 +1,4 @@
+./xmlchange CLM_BLDNML_OPTS="-bgc sp"
+DIN_LOC_ROOT=$(./xmlquery --value DIN_LOC_ROOT)
+meshfile=$DIN_LOC_ROOT/lnd/clm2/testdata/ESMFmesh_10x15_synthetic_cosphill_1.0.nc
+./xmlchange ATM_DOMAIN_MESH=${meshfile},LND_DOMAIN_MESH=${meshfile}
diff --git a/cime_config/testdefs/testmods_dirs/clm/Hillslope/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/Hillslope/user_nl_clm
new file mode 100644
index 0000000000..afdcf4d1fc
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/Hillslope/user_nl_clm
@@ -0,0 +1,11 @@
+use_hillslope = .true.
+use_hillslope_routing = .true.
+downscale_hillslope_meteorology = .false.
+hillslope_head_gradient_method = 'Darcy'
+hillslope_transmissivity_method = 'LayerSum'
+hillslope_pft_distribution_method = 'PftLowlandUpland'
+hillslope_soil_profile_method = 'Uniform'
+
+fsurdat = '$DIN_LOC_ROOT/lnd/clm2/testdata/surfdata_10x15_78pfts_simyr2000_synthetic_cosphill_1.3.nc'
+
+use_ssre = .false.
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeC/include_user_mods b/cime_config/testdefs/testmods_dirs/clm/HillslopeC/include_user_mods
new file mode 100644
index 0000000000..fa2e50a80d
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeC/include_user_mods
@@ -0,0 +1 @@
+../Hillslope
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeC/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/HillslopeC/user_nl_clm
new file mode 100644
index 0000000000..10450766d0
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeC/user_nl_clm
@@ -0,0 +1,7 @@
+! Various hillslope options not exercised by other testmods
+use_hillslope_routing = .false.
+downscale_hillslope_meteorology = .true.
+hillslope_head_gradient_method = 'Kinematic'
+hillslope_transmissivity_method = 'Uniform'
+hillslope_pft_distribution_method = 'DominantPftUniform'
+hillslope_soil_profile_method = 'SetLowlandUpland'
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeD/include_user_mods b/cime_config/testdefs/testmods_dirs/clm/HillslopeD/include_user_mods
new file mode 100644
index 0000000000..fa2e50a80d
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeD/include_user_mods
@@ -0,0 +1 @@
+../Hillslope
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeD/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/HillslopeD/user_nl_clm
new file mode 100644
index 0000000000..04a2332df7
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeD/user_nl_clm
@@ -0,0 +1,3 @@
+! Various hillslope options not exercised by other testmods
+hillslope_pft_distribution_method = 'DominantPftLowland'
+hillslope_soil_profile_method = 'Linear'
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/include_user_mods b/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/include_user_mods
new file mode 100644
index 0000000000..fa2e50a80d
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/include_user_mods
@@ -0,0 +1 @@
+../Hillslope
diff --git a/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/user_nl_clm
new file mode 100644
index 0000000000..7be761eccc
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/clm/HillslopeFromFile/user_nl_clm
@@ -0,0 +1,2 @@
+hillslope_pft_distribution_method = 'FromFile'
+hillslope_soil_profile_method = 'FromFile'
diff --git a/cime_config/testdefs/testmods_dirs/clm/oldhyd/user_nl_clm b/cime_config/testdefs/testmods_dirs/clm/oldhyd/user_nl_clm
index 351bce0a82..5ef1fc660a 100644
--- a/cime_config/testdefs/testmods_dirs/clm/oldhyd/user_nl_clm
+++ b/cime_config/testdefs/testmods_dirs/clm/oldhyd/user_nl_clm
@@ -1,4 +1,3 @@
snow_cover_fraction_method = 'NiuYang2007'
h2osfcflag = 0
- origflag = 1
use_subgrid_fluxes = .false.
diff --git a/doc/ChangeLog b/doc/ChangeLog
index 9659db297a..395ab0d929 100644
--- a/doc/ChangeLog
+++ b/doc/ChangeLog
@@ -64,6 +64,231 @@ Other details
Pull Requests that document the changes (include PR ids):
https://github.com/ESCOMP/ctsm/pull/2355
+===============================================================
+===============================================================
+Tag name: ctsm5.1.dev170
+Originator(s): samrabin (Sam Rabin, UCAR/TSS, samrabin@ucar.edu)
+Date: Wed Feb 28 11:01:43 MST 2024
+One-line Summary: Add hillslope hydrology
+
+Purpose and description of changes
+----------------------------------
+
+Changes include multiple soil columns per vegetated landunit, additional meteorological downscaling, new subsurface lateral flow equations, and a hillslope routing parameterization.
+
+Described in:
+Swenson, S. C., Clark, M., Fan, Y., Lawrence, D. M., & Perket, J. (2019). Representing intra-hillslope lateral subsurface flow in the community land model. Journal of Advances in Modeling Earth Systems, 11, 4044–4065. https://doi.org/10.1029/2019MS001833
+
+
+Significant changes to scientifically-supported configurations
+--------------------------------------------------------------
+
+Does this tag change answers significantly for any of the following physics configurations?
+(Details of any changes will be given in the "Answer changes" section below.)
+
+ [Put an [X] in the box for any configuration with significant answer changes.]
+
+[ ] clm5_1
+
+[ ] clm5_0
+
+[ ] ctsm5_0-nwp
+
+[ ] clm4_5
+
+
+Notes of particular relevance for developers:
+---------------------------------------------
+
+Changes to tests or testing:
+* oldhyd test changes answers due to removal of origflag parameter
+* Adds several hillslope-specific tests
+
+
+Testing summary:
+----------------
+
+ regular tests (aux_clm: https://github.com/ESCOMP/CTSM/wiki/System-Testing-Guide#pre-merge-system-testing):
+
+ derecho ----- DIFF
+ izumi ------- DIFF
+
+
+Answer changes
+--------------
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: all
+ - nature of change: roundoff
+
+ If bitwise differences were observed, how did you show they were no worse
+ than roundoff? Roundoff differences means one or more lines of code change results
+ only by roundoff level (because order of operation changes for example). Roundoff
+ changes to state fields usually grow to greater than roundoff as the simulation progresses.
+ * FSDS answers change due to rounding differences, since the history field now uses a column-level variable instead of a gridcell-level one. Note that this is JUST the history field that's affected, which is why there are no diffs in any other variable. (Confirmed using branch at https://github.com/samsrabin/CTSM/tree/hillslope-revert-fsds-diffs.)
+ * The origflag parameter (used to reproduce CLM4 behavior) was removed, so anything using that will break. This includes the oldhyd test.
+
+
+Other details
+-------------
+
+Pull Requests that document the changes (include PR ids):
+* ESCOMP/CTSM#1715: Hillslope hydrology (https://github.com/ESCOMP/CTSM/pull/1715)
+* ESCOMP/CTSM#2390: Hillslope merge (https://github.com/ESCOMP/CTSM/pull/2390)
+
+===============================================================
+===============================================================
+Tag name: ctsm5.1.dev169
+Originator(s): samrabin (Sam Rabin, UCAR/TSS, samrabin@ucar.edu)
+Date: Thu 22 Feb 2024 09:42:57 AM MST
+One-line Summary: Merge b4b-dev
+
+Purpose and description of changes
+----------------------------------
+
+Brings in 3 PRs from b4b-dev to master:
+- Do not crash "make all" even if pylint isn't clean (ESCOMP/CTSM#2353; Sam Rabin)
+- Resolve pylint issues (ESCOMP/CTSM#2354; Sam Rabin)
+- Move FSURDATMODIFYCTSM test to Derecho (ESCOMP/CTSM#2364; Sam Rabin)
+
+Significant changes to scientifically-supported configurations
+--------------------------------------------------------------
+
+Does this tag change answers significantly for any of the following physics configurations?
+(Details of any changes will be given in the "Answer changes" section below.)
+
+[ ] clm5_1
+
+[ ] clm5_0
+
+[ ] ctsm5_0-nwp
+
+[ ] clm4_5
+
+
+Bugs fixed
+----------
+
+CTSM issues fixed:
+- Fixes ESCOMP/CTSM#2255: make lint is not clean in ctsm5.1.dev152
+- Fixes ESCOMP/CTSM#2316: "make all" doesn't run black if lint fails
+- FIXES ESCOMP/CTSM#2362: FSURDATMODIFYCTSM test should be moved to Derecho or Izumi
+
+
+Notes of particular relevance for developers:
+---------------------------------------------
+
+Changes to tests or testing:
+- FSURDATMODIFYCTSM test changed from derecho_intel (didn't work in debug mode) to derecho_gnu. I.e., from
+ FSURDATMODIFYCTSM_D_Mmpi-serial_Ld1.5x5_amazon.I2000Clm50SpRs.derecho_intel
+ to
+ FSURDATMODIFYCTSM_D_Mmpi-serial_Ld1.5x5_amazon.I2000Clm50SpRs.derecho_gnu
+
+
+Testing summary:
+----------------
+
+ [PASS means all tests PASS; OK means tests PASS other than expected fails.]
+
+ regular tests (aux_clm: https://github.com/ESCOMP/CTSM/wiki/System-Testing-Guide#pre-merge-system-testing):
+
+ derecho ----- OK
+ izumi ------- OK
+
+ any other testing (give details below):
+ - "make all" in python/ is clean.
+
+
+Other details
+-------------
+
+Pull Requests that document the changes (include PR ids):
+- ESCOMP/CTSM#2353: Do not crash "make all" even if pylint isn't clean (https://github.com/ESCOMP/CTSM/pull/2353)
+- ESCOMP/CTSM#2354: Resolve pylint issues (https://github.com/ESCOMP/CTSM/pull/2354)
+- ESCOMP/CTSM#2364: Move FSURDATMODIFYCTSM test to Derecho (https://github.com/ESCOMP/CTSM/pull/2364)
+
+===============================================================
+===============================================================
+Tag name: ctsm5.1.dev168
+Originator(s): slevis (Samuel Levis,UCAR/TSS,303-665-1310)
+Date: Fri 16 Feb 2024 01:27:41 PM MST
+One-line Summary: Remove a source of negative snocan in CanopyFluxesMod
+
+Purpose and description of changes
+----------------------------------
+
+In ctsm5.2 testing, this test
+LWISO_Ld10.f10_f10_mg37.I2000Clm50BgcCrop.derecho_gnu.clm-coldStart
+complained of a tiny negative ice1_grc tracer not matching the bulk
+value. My troubleshooting led me to more than tiny negative snocan
+originating in a line of code that this PR now changes to prevent
+negative values.
+
+Significant changes to scientifically-supported configurations
+--------------------------------------------------------------
+
+Does this tag change answers significantly for any of the following physics configurations?
+(Details of any changes will be given in the "Answer changes" section below.)
+
+ [Put an [X] in the box for any configuration with significant answer changes.]
+
+[ ] clm5_1
+
+[ ] clm5_0
+
+[ ] ctsm5_0-nwp
+
+[ ] clm4_5
+
+
+Bugs fixed
+----------
+CTSM issues fixed (include CTSM Issue #):
+Fixes #2366
+
+Notes of particular relevance for developers:
+---------------------------------------------
+Caveats for developers (e.g., code that is duplicated that requires double maintenance):
+ It was suggested at the ctsm software meeting yesterday that, in addition to
+ including "max(0._r8," in this line of code, that I reorder the code
+ by bringing "liqcan(p) =" before "snocan(p) =". I have decided against this
+ because the existing order repeats in a following paragraph of code right
+ after this one. It's likely that the group's suggestion would have worked, but
+ I did not want to delay this PR for a longer evaluation because CTSM5.2 is
+ waiting for this merge, in order to proceed with next steps.
+
+
+Testing summary:
+----------------
+
+ regular tests (aux_clm: https://github.com/ESCOMP/CTSM/wiki/System-Testing-Guide#pre-merge-system-testing):
+
+ derecho ----- OK
+ izumi ------- OK
+
+
+Answer changes
+--------------
+
+Changes answers relative to baseline: YES
+
+ Summarize any changes to answers, i.e.,
+ - what code configurations: all
+ - what platforms/compilers: all
+ - nature of change: roundoff
+ A short test, e.g.
+ SMS_Ln9.ne30pg2_ne30pg2_mg17.I1850Clm50Sp.derecho_intel.clm-clm50cam6LndTuningMode
+ has these maximum differences:
+RMS H2OCAN 4.7359E-19 NORMALIZED 4.0163E-18
+RMS SNOCAN 4.4873E-19 NORMALIZED 9.1036E-18
+ while the differences grow in longer tests.
+
+Other details
+-------------
+Pull Requests that document the changes (include PR ids):
+ https://github.com/ESCOMP/ctsm/pull/2371
+
===============================================================
===============================================================
Tag name: ctsm5.1.dev167
diff --git a/doc/ChangeSum b/doc/ChangeSum
index ee031b2fd8..32b60a502f 100644
--- a/doc/ChangeSum
+++ b/doc/ChangeSum
@@ -1,6 +1,9 @@
Tag Who Date Summary
============================================================================================================================
ctsm5.1.dev171 slevis 03/01/2024 Set initial t_soisno=272 for soils and 274K for urban road
+ ctsm5.1.dev170 samrabin 02/28/2024 Add hillslope hydrology
+ ctsm5.1.dev169 samrabin 02/22/2024 Merge b4b-dev
+ ctsm5.1.dev168 slevis 02/16/2024 Remove a source of negative snocan in CanopyFluxesMod
ctsm5.1.dev167 samrabin 02/08/2024 Delete _FillValue and history from parameter files
ctsm5.1.dev166 multiple 01/24/2024 BFB merge tag
ctsm5.1.dev165 slevis 01/19/2024 Turn Meier2022, tillage, residue removal on for ctsm5.1, fix #2212
diff --git a/python/Makefile b/python/Makefile
index 271e977046..b43e1c5e53 100644
--- a/python/Makefile
+++ b/python/Makefile
@@ -19,7 +19,7 @@ ifneq ($(verbose), not-set)
endif
PYLINT=pylint
-PYLINT_ARGS=-j 4 --rcfile=ctsm/.pylintrc
+PYLINT_ARGS=-j 4 --rcfile=ctsm/.pylintrc --fail-under=0
PYLINT_SRC = \
ctsm
# NOTE: These don't pass pylint checking and should be added when we put into effort to get them to pass
@@ -27,7 +27,7 @@ PYLINT_SRC = \
# ../cime_config/buildlib \
# ../cime_config/buildnml
-all: test lint black
+all: test black lint
@echo
@echo
@echo "Successfully ran all standard tests"
diff --git a/python/ctsm/.pylintrc b/python/ctsm/.pylintrc
index 2087913e8a..ceff04c7d8 100644
--- a/python/ctsm/.pylintrc
+++ b/python/ctsm/.pylintrc
@@ -436,7 +436,10 @@ good-names=i,
_,
# --- default list is above here, our own list is below here ---
# Allow logger as a global name in each module, because this seems to follow general recommended convention:
- logger
+ logger,
+# Allow these names, which are commonly used in matplotlib instructions
+ ax,
+ im
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
diff --git a/python/ctsm/crop_calendars/check_constant_vars.py b/python/ctsm/crop_calendars/check_constant_vars.py
new file mode 100644
index 0000000000..aa25a412fe
--- /dev/null
+++ b/python/ctsm/crop_calendars/check_constant_vars.py
@@ -0,0 +1,385 @@
+"""
+For variables that should stay constant, make sure they are
+"""
+
+import numpy as np
+from ctsm.crop_calendars.cropcal_module import import_rx_dates
+
+
+def check_one_constant_var_setup(this_ds, case, var):
+ """
+ Various setup steps for check_one_constant_var()
+ """
+ if "gs" in this_ds[var].dims:
+ time_coord = "gs"
+ elif "time" in this_ds[var].dims:
+ time_coord = "time"
+ else:
+ raise RuntimeError(f"Which of these is the time coordinate? {this_ds[var].dims}")
+ i_time_coord = this_ds[var].dims.index(time_coord)
+
+ this_da = this_ds[var]
+ ra_sp = np.moveaxis(this_da.copy().values, i_time_coord, 0)
+ incl_patches = []
+ bad_patches = np.array([])
+ str_list = []
+
+ # Read prescription file, if needed
+ rx_ds = None
+ if isinstance(case, dict):
+ if var == "GDDHARV" and "rx_gdds_file" in case:
+ rx_ds = import_rx_dates(
+ "gdd", case["rx_gdds_file"], this_ds, set_neg1_to_nan=False
+ ).squeeze()
+
+ return time_coord, this_da, ra_sp, incl_patches, str_list, rx_ds, bad_patches
+
+
+def loop_through_bad_patches(
+ verbose,
+ emojus,
+ var,
+ everything_ok,
+ str_list,
+ rx_ds,
+ time_1,
+ t1_yr,
+ t1_vals,
+ timestep,
+ t_yr,
+ t_vals,
+ bad_patches_this_time,
+ found_in_rx,
+ vary_patches,
+ vary_lons,
+ vary_lats,
+ vary_crops,
+ vary_crops_int,
+ any_bad,
+):
+ """
+ Loop through and check any patches that were "bad" according to check_constant_vars().
+
+ This is pretty inefficient, but it works.
+ """
+ patch = None # In case bad_patches_this_time is empty
+ for i, patch in enumerate(bad_patches_this_time):
+ this_patch = vary_patches[i]
+ this_lon = vary_lons[i]
+ this_lat = vary_lats[i]
+ this_crop = vary_crops[i]
+ this_crop_int = vary_crops_int[i]
+
+ # If prescribed input had missing value (-1), it's fine for it to vary.
+ if rx_ds:
+ rx_var = f"gs1_{this_crop_int}"
+ if this_lon in rx_ds.lon.values and this_lat in rx_ds.lat.values:
+ rx_vals = rx_ds[rx_var].sel(lon=this_lon, lat=this_lat).values
+ n_unique = len(np.unique(rx_vals))
+ if n_unique == 1:
+ found_in_rx[i] = True
+ if rx_vals == -1:
+ continue
+ elif n_unique > 1:
+ raise RuntimeError(
+ f"How does lon {this_lon} lat {this_lat} {this_crop} have "
+ + f"time-varying {var}?"
+ )
+ else:
+ raise RuntimeError(f"lon {this_lon} lat {this_lat} {this_crop} not in rx dataset?")
+
+ # Print info (or save to print later)
+ any_bad = True
+ if verbose:
+ this_str = (
+ f" Patch {this_patch} (lon {this_lon} lat {this_lat}) "
+ + f"{this_crop} ({this_crop_int})"
+ )
+ if rx_ds and not found_in_rx[i]:
+ this_str = this_str.replace("(lon", "* (lon")
+ if not np.isnan(t1_vals[patch]):
+ t1_val_print = int(t1_vals[patch])
+ else:
+ t1_val_print = "NaN"
+ if not np.isnan(t_vals[patch]):
+ t_val_print = int(t_vals[patch])
+ else:
+ t_val_print = "NaN"
+ if var == "SDATES":
+ str_list.append(
+ f"{this_str}: Sowing {t1_yr} jday {t1_val_print}, {t_yr} "
+ + f"jday {t_val_print}"
+ )
+ else:
+ str_list.append(
+ f"{this_str}: {t1_yr} {var} {t1_val_print}, {t_yr} {var} " + f"{t_val_print}"
+ )
+ else:
+ if everything_ok:
+ print(f"{emojus} CLM output {var} unexpectedly vary over time:")
+ everything_ok = False
+ print(f"{var} timestep {timestep} does not match timestep {time_1}")
+ break
+ return any_bad, patch
+
+
+def ensure_all_patches_checked(this_ds, this_da, ra_sp, incl_patches):
+ """
+ In check_one_constant_var(), make sure every patch was checked once (or is all-NaN except
+ possibly final season)
+ """
+ incl_patches = np.sort(incl_patches)
+ if not np.array_equal(incl_patches, np.unique(incl_patches)):
+ raise RuntimeError("Patch(es) checked more than once!")
+ incl_patches = list(incl_patches)
+ incl_patches += list(
+ np.where(
+ np.all(
+ np.isnan(
+ ra_sp[
+ :-1,
+ ]
+ ),
+ axis=0,
+ )
+ )[0]
+ )
+ incl_patches = np.sort(incl_patches)
+ if not np.array_equal(incl_patches, np.unique(incl_patches)):
+ raise RuntimeError("Patch(es) checked but also all-NaN??")
+ if not np.array_equal(incl_patches, np.arange(this_ds.dims["patch"])):
+ for patch in np.arange(this_ds.dims["patch"]):
+ if patch not in incl_patches:
+ raise RuntimeError(
+ f"Not all patches checked! E.g., {patch}: {this_da.isel(patch=patch).values}"
+ )
+
+
+def check_one_constant_var_loop_through_timesteps(
+ this_ds,
+ ignore_nan,
+ verbose,
+ emojus,
+ var,
+ everything_ok,
+ time_coord,
+ this_da,
+ str_list,
+ rx_ds,
+ time_1,
+ these_patches,
+ t1_yr,
+ t1_vals,
+ any_bad,
+ any_bad_before_checking_rx,
+ bad_patches,
+):
+ """
+ In check_one_constant_var(), loop through timesteps
+ """
+ found_in_rx = None
+ for timestep in np.arange(time_1 + 1, this_ds.dims[time_coord]):
+ t_yr = this_ds[time_coord].values[timestep]
+ t_vals = np.squeeze(this_da.isel({time_coord: timestep, "patch": these_patches}).values)
+ ok_p = t1_vals == t_vals
+
+ # If allowed, ignore where either t or t1 is NaN. Should only be used for runs where
+ # land use varies over time.
+ if ignore_nan:
+ ok_p = np.squeeze(np.bitwise_or(ok_p, np.isnan(t1_vals + t_vals)))
+
+ if not np.all(ok_p):
+ any_bad_before_checking_rx = True
+ bad_patches_this_time = list(np.where(np.bitwise_not(ok_p))[0])
+ bad_patches = np.concatenate(
+ (bad_patches, np.array(these_patches)[bad_patches_this_time])
+ )
+ if rx_ds:
+ found_in_rx = np.array([False for x in bad_patches])
+ vary_patches = list(np.array(these_patches)[bad_patches_this_time])
+ vary_lons = this_ds.patches1d_lon.values[bad_patches_this_time]
+ vary_lats = this_ds.patches1d_lat.values[bad_patches_this_time]
+ vary_crops = this_ds.patches1d_itype_veg_str.values[bad_patches_this_time]
+ vary_crops_int = this_ds.patches1d_itype_veg.values[bad_patches_this_time]
+
+ any_bad_any_crop = False
+ for crop_int in np.unique(vary_crops_int):
+ rx_var = f"gs1_{crop_int}"
+ vary_lons_this_crop = vary_lons[np.where(vary_crops_int == crop_int)]
+ vary_lats_this_crop = vary_lats[np.where(vary_crops_int == crop_int)]
+ these_rx_vals = np.diag(
+ rx_ds[rx_var].sel(lon=vary_lons_this_crop, lat=vary_lats_this_crop).values
+ )
+ if len(these_rx_vals) != len(vary_lats_this_crop):
+ raise RuntimeError(
+ f"Expected {len(vary_lats_this_crop)} rx values; got "
+ + f"{len(these_rx_vals)}"
+ )
+ if not np.any(these_rx_vals != -1):
+ continue
+ any_bad_any_crop = True
+ break
+ if not any_bad_any_crop:
+ continue
+
+ # Loop through and check any patches that were "bad"
+ any_bad = loop_through_bad_patches(
+ verbose,
+ emojus,
+ var,
+ everything_ok,
+ str_list,
+ rx_ds,
+ time_1,
+ t1_yr,
+ t1_vals,
+ timestep,
+ t_yr,
+ t_vals,
+ bad_patches_this_time,
+ found_in_rx,
+ vary_patches,
+ vary_lons,
+ vary_lats,
+ vary_crops,
+ vary_crops_int,
+ any_bad,
+ )
+
+ return any_bad_before_checking_rx, bad_patches, found_in_rx, any_bad
+
+
+def check_one_constant_var(
+ this_ds, case, ignore_nan, verbose, emojus, var, any_bad, any_bad_before_checking_rx
+):
+ """
+ Ensure that a variable that should be constant actually is
+ """
+ everything_ok = True
+
+ (
+ time_coord,
+ this_da,
+ ra_sp,
+ incl_patches,
+ str_list,
+ rx_ds,
+ bad_patches,
+ ) = check_one_constant_var_setup(this_ds, case, var)
+
+ for time_1 in np.arange(this_ds.dims[time_coord] - 1):
+ condn = ~np.isnan(ra_sp[time_1, ...])
+ if time_1 > 0:
+ condn = np.bitwise_and(condn, np.all(np.isnan(ra_sp[:time_1, ...]), axis=0))
+ these_patches = np.where(condn)[0]
+ if these_patches.size == 0:
+ continue
+ these_patches = list(np.where(condn)[0])
+ incl_patches += these_patches
+
+ t1_yr = this_ds[time_coord].values[time_1]
+ t1_vals = np.squeeze(this_da.isel({time_coord: time_1, "patch": these_patches}).values)
+
+ (
+ any_bad_before_checking_rx,
+ bad_patches,
+ found_in_rx,
+ any_bad,
+ ) = check_one_constant_var_loop_through_timesteps(
+ this_ds,
+ ignore_nan,
+ verbose,
+ emojus,
+ var,
+ everything_ok,
+ time_coord,
+ this_da,
+ str_list,
+ rx_ds,
+ time_1,
+ these_patches,
+ t1_yr,
+ t1_vals,
+ any_bad,
+ any_bad_before_checking_rx,
+ bad_patches,
+ )
+
+ if verbose and any_bad:
+ print(f"{emojus} CLM output {var} unexpectedly vary over time:")
+ str_list.sort()
+ if found_in_rx is None:
+ raise RuntimeError("Somehow any_bad True but found_in_rx None")
+ if rx_ds and np.any(~found_in_rx): # pylint: disable=invalid-unary-operand-type
+ str_list = [
+ "*: Not found in prescribed input file (maybe minor lon/lat mismatch)"
+ ] + str_list
+ elif not rx_ds:
+ str_list = ["(No rx file checked)"] + str_list
+ print("\n".join(str_list))
+
+ # Make sure every patch was checked once (or is all-NaN except possibly final season)
+ ensure_all_patches_checked(this_ds, this_da, ra_sp, incl_patches)
+
+ if not any_bad:
+ if any_bad_before_checking_rx:
+ print(
+ f"✅ CLM output {var} do not vary through {this_ds.dims[time_coord]} growing "
+ + "seasons of output (except for patch(es) with missing rx)."
+ )
+ else:
+ print(
+ f"✅ CLM output {var} do not vary through {this_ds.dims[time_coord]} growing "
+ + "seasons of output."
+ )
+
+ return any_bad, any_bad_before_checking_rx, bad_patches
+
+
+def check_constant_vars(
+ this_ds, case, ignore_nan, const_growing_seasons=None, verbose=True, throw_error=True
+):
+ """
+ For variables that should stay constant, make sure they are
+ """
+ if isinstance(case, str):
+ const_vars = [case]
+ elif isinstance(case, list):
+ const_vars = case
+ elif isinstance(case, dict):
+ const_vars = case["const_vars"]
+ else:
+ raise TypeError(f"case must be str or dict, not {type(case)}")
+
+ if not const_vars:
+ return None
+
+ if const_growing_seasons:
+ gs_0 = this_ds.gs.values[0]
+ gs_n = this_ds.gs.values[-1]
+ if const_growing_seasons.start > gs_0 or const_growing_seasons.stop < gs_n:
+ print(
+ f"❗ Only checking const_vars over {const_growing_seasons.start}-"
+ + f"{const_growing_seasons.stop} (run includes {gs_0}-{gs_n})"
+ )
+ this_ds = this_ds.sel(gs=const_growing_seasons)
+
+ any_bad = False
+ any_bad_before_checking_rx = False
+ if throw_error:
+ emojus = "❌"
+ else:
+ emojus = "❗"
+ if not isinstance(const_vars, list):
+ const_vars = [const_vars]
+
+ for var in const_vars:
+ any_bad, any_bad_before_checking_rx, bad_patches = check_one_constant_var(
+ this_ds, case, ignore_nan, verbose, emojus, var, any_bad, any_bad_before_checking_rx
+ )
+
+ if any_bad and throw_error:
+ raise RuntimeError("Stopping due to failed check_constant_vars().")
+
+ bad_patches = np.unique(bad_patches)
+ return [int(p) for p in bad_patches]
diff --git a/python/ctsm/crop_calendars/check_rx_obeyed.py b/python/ctsm/crop_calendars/check_rx_obeyed.py
new file mode 100644
index 0000000000..99b8d80bde
--- /dev/null
+++ b/python/ctsm/crop_calendars/check_rx_obeyed.py
@@ -0,0 +1,216 @@
+"""
+Check that prescribed crop calendars were obeyed
+"""
+
+import numpy as np
+
+import ctsm.crop_calendars.cropcal_utils as utils
+from ctsm.crop_calendars.cropcal_constants import DEFAULT_GDD_MIN
+
+
+def get_pct_harv_at_mature(harvest_reason_da):
+ """
+ Get percentage of harvests that happened at maturity
+ """
+ n_harv_at_mature = len(np.where(harvest_reason_da.values == 1)[0])
+ with np.errstate(invalid="ignore"):
+ harv_reason_gt_0 = harvest_reason_da.values > 0
+ n_harv = len(np.where(harv_reason_gt_0)[0])
+ if n_harv == 0:
+ return np.nan
+ pct_harv_at_mature = n_harv_at_mature / n_harv * 100
+ pct_harv_at_mature = np.format_float_positional(
+ pct_harv_at_mature, precision=2, unique=False, fractional=False, trim="k"
+ ) # Round to 2 significant digits
+ return pct_harv_at_mature
+
+
+def check_rx_obeyed_handle_gdharv(output_var, gdd_min, ds_thisveg, rx_array):
+ """
+ In check_rx_obeyed(), account for the GDD harvest threshold minimum set in PlantCrop()
+ """
+ if gdd_min is None:
+ gdd_min = DEFAULT_GDD_MIN
+ print(
+ f"gdd_min not provided when doing check_rx_obeyed() for {output_var}; using "
+ + f"default {gdd_min}"
+ )
+ with np.errstate(invalid="ignore"):
+ rx_array[(rx_array >= 0) & (rx_array < gdd_min)] = gdd_min
+
+ # ...harvest reason
+ # 0: Should never happen in any simulation
+ # 1: Harvesting at maturity
+ # 2: Harvesting at max season length (mxmat)
+ # 3: Crop was incorrectly planted in last time step of Dec. 31
+ # 4: Today was supposed to be the planting day, but the previous crop still hasn't been
+ # harvested.
+ # 5: Harvest the day before the next sowing date this year.
+ # 6: Same as #5.
+ # 7: Harvest the day before the next sowing date (today is Dec. 31 and the sowing date
+ # is Jan. 1)
+ harvest_reason_da = ds_thisveg["HARVEST_REASON"]
+ unique_harvest_reasons = np.unique(
+ harvest_reason_da.values[np.where(~np.isnan(harvest_reason_da.values))]
+ )
+ pct_harv_at_mature = get_pct_harv_at_mature(harvest_reason_da)
+ return gdd_min, unique_harvest_reasons, pct_harv_at_mature
+
+
+def check_rx_obeyed_setup(dates_ds, which_ds, output_var, verbose):
+ """
+ Various setup steps for check_rx_obeyed()
+ """
+ all_ok = 2
+ diff_str_list = []
+ gdd_tolerance = 1
+
+ if "GDDHARV" in output_var and verbose:
+ harvest_reason_da = dates_ds["HARVEST_REASON"]
+ unique_harvest_reasons = np.unique(
+ harvest_reason_da.values[np.where(~np.isnan(harvest_reason_da.values))]
+ )
+ pct_harv_at_mature = get_pct_harv_at_mature(harvest_reason_da)
+ print(
+ f"{which_ds} harvest reasons: {unique_harvest_reasons} ({pct_harv_at_mature}% harv at "
+ + "maturity)"
+ )
+
+ return all_ok, diff_str_list, gdd_tolerance
+
+
+def get_extreme_info(diff_array, rx_array, mxn, dims, gs_da, patches1d_lon, patches1d_lat):
+ """
+ Get information about extreme gridcells (for debugging)
+ """
+ if mxn == np.min: # pylint: disable=comparison-with-callable
+ diff_array = np.ma.masked_array(diff_array, mask=np.abs(diff_array) == 0)
+ themxn = mxn(diff_array)
+
+ # Find the first patch-gs that has the mxn value
+ matching_indices = np.where(diff_array == themxn)
+ first_indices = [x[0] for x in matching_indices]
+
+ # Get the lon, lat, and growing season of that patch-gs
+ patch_index = first_indices[dims.index("patch")]
+ this_lon = patches1d_lon.values[patch_index]
+ this_lat = patches1d_lat.values[patch_index]
+ season_index = first_indices[dims.index("gs")]
+ this_gs = gs_da.values[season_index]
+
+ # Get the prescribed value for this patch-gs
+ this_rx = rx_array[patch_index][0]
+
+ return round(themxn, 3), round(this_lon, 3), round(this_lat, 3), this_gs, round(this_rx)
+
+
+def check_rx_obeyed(
+ vegtype_list, rx_ds, dates_ds, which_ds, output_var, gdd_min=None, verbose=False
+):
+ """
+ Check that prescribed crop calendars were obeyed
+ """
+ all_ok, diff_str_list, gdd_tolerance = check_rx_obeyed_setup(
+ dates_ds, which_ds, output_var, verbose
+ )
+
+ for vegtype_str in vegtype_list:
+ thisveg_patches = np.where(dates_ds.patches1d_itype_veg_str == vegtype_str)[0]
+ if thisveg_patches.size == 0:
+ continue
+ ds_thisveg = dates_ds.isel(patch=thisveg_patches)
+
+ vegtype_int = utils.vegtype_str2int(vegtype_str)[0]
+ rx_da = rx_ds[f"gs1_{vegtype_int}"]
+ rx_array = rx_da.values[
+ ds_thisveg.patches1d_jxy.values.astype(int) - 1,
+ ds_thisveg.patches1d_ixy.values.astype(int) - 1,
+ ]
+ rx_array = np.expand_dims(rx_array, axis=1)
+ sim_array = ds_thisveg[output_var].values
+ sim_array_dims = ds_thisveg[output_var].dims
+
+ # Ignore patches without prescribed value
+ with np.errstate(invalid="ignore"):
+ rx_array[np.where(rx_array < 0)] = np.nan
+
+ # Account for...
+ if "GDDHARV" in output_var:
+ # ...GDD harvest threshold minimum set in PlantCrop()
+ gdd_min, unique_harvest_reasons, pct_harv_at_mature = check_rx_obeyed_handle_gdharv(
+ output_var, gdd_min, ds_thisveg, rx_array
+ )
+
+ if np.any(sim_array != rx_array):
+ diff_array = sim_array - rx_array
+
+ # Allow negative GDDHARV values when harvest occurred because sowing was scheduled for
+ # the next day
+ if output_var == "GDDHARV_PERHARV":
+ diff_array = np.ma.masked_array(
+ diff_array,
+ mask=(diff_array < 0) & (ds_thisveg["HARVEST_REASON_PERHARV"].values == 5),
+ )
+ elif output_var == "GDDHARV":
+ with np.errstate(invalid="ignore"):
+ diff_lt_0 = diff_array < 0
+ harv_reason_5 = ds_thisveg["HARVEST_REASON"].values == 5
+ diff_array = np.ma.masked_array(diff_array, mask=diff_lt_0 & harv_reason_5)
+
+ with np.errstate(invalid="ignore"):
+ abs_gt_0 = abs(diff_array) > 0
+ if np.any(np.abs(diff_array[abs_gt_0]) > 0):
+ min_diff, min_lon, min_lat, min_gs, min_rx = get_extreme_info(
+ diff_array,
+ rx_array,
+ np.nanmin,
+ sim_array_dims,
+ dates_ds.gs,
+ ds_thisveg.patches1d_lon,
+ ds_thisveg.patches1d_lat,
+ )
+ max_diff, max_lon, max_lat, max_gs, max_rx = get_extreme_info(
+ diff_array,
+ rx_array,
+ np.nanmax,
+ sim_array_dims,
+ dates_ds.gs,
+ ds_thisveg.patches1d_lon,
+ ds_thisveg.patches1d_lat,
+ )
+
+ diffs_eg_txt = (
+ f"{vegtype_str} ({vegtype_int}): diffs range {min_diff} (lon {min_lon}, lat "
+ + f"{min_lat}, gs {min_gs}, rx ~{min_rx}) to {max_diff} (lon {max_lon}, lat "
+ + f"{max_lat}, gs {max_gs}, rx ~{max_rx})"
+ )
+ if "GDDHARV" in output_var:
+ diffs_eg_txt += (
+ f"; harvest reasons: {unique_harvest_reasons} ({pct_harv_at_mature}"
+ + "% harvested at maturity)"
+ )
+ if "GDDHARV" in output_var and np.nanmax(abs(diff_array)) <= gdd_tolerance:
+ if all_ok > 0:
+ all_ok = 1
+ diff_str_list.append(f" {diffs_eg_txt}")
+ else:
+ all_ok = 0
+ if verbose:
+ print(
+ f"❌ {which_ds}: Prescribed {output_var} *not* always obeyed. E.g., "
+ + f"{diffs_eg_txt}"
+ )
+ else:
+ break
+
+ if all_ok == 2:
+ print(f"✅ {which_ds}: Prescribed {output_var} always obeyed")
+ elif all_ok == 1:
+ # print(f"🟨 {which_ds}: Prescribed {output_var} *not* always obeyed, but acceptable:")
+ # for x in diff_str_list: print(x)
+ print(
+ f"🟨 {which_ds}: Prescribed {output_var} *not* always obeyed, but acceptable (diffs <= "
+ + f"{gdd_tolerance})"
+ )
+ elif not verbose:
+ print(f"❌ {which_ds}: Prescribed {output_var} *not* always obeyed. E.g., {diffs_eg_txt}")
diff --git a/python/ctsm/crop_calendars/check_rxboth_run.py b/python/ctsm/crop_calendars/check_rxboth_run.py
index 6dae071937..ae4decde30 100644
--- a/python/ctsm/crop_calendars/check_rxboth_run.py
+++ b/python/ctsm/crop_calendars/check_rxboth_run.py
@@ -1,12 +1,32 @@
-# %% Setup
-
+"""
+Check the results of a run with prescribed sowing dates and maturity requirements
+"""
+import sys
+import argparse
+import glob
+import os
import numpy as np
-import sys, argparse
-import cropcal_module as cc
-import glob, os
+
+# Import the CTSM Python utilities.
+# sys.path.insert() is necessary for RXCROPMATURITY to work. The fact that it's calling this script
+# in the RUN phase seems to require the python/ directory to be manually added to path.
+_CTSM_PYTHON = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, "python"
+)
+sys.path.insert(1, _CTSM_PYTHON)
+import ctsm.crop_calendars.cropcal_module as cc # pylint: disable=wrong-import-position
+from ctsm.crop_calendars.check_rx_obeyed import ( # pylint: disable=wrong-import-position
+ check_rx_obeyed,
+)
+from ctsm.crop_calendars.check_constant_vars import ( # pylint: disable=wrong-import-position
+ check_constant_vars,
+)
def main(argv):
+ """
+ Main method: Check the results of a run with prescribed sowing dates and maturity requirements
+ """
# Set arguments
parser = argparse.ArgumentParser(description="ADD DESCRIPTION HERE")
parser.add_argument(
@@ -40,7 +60,7 @@ def main(argv):
args = parser.parse_args(argv)
# Note that _PERHARV will be stripped off upon import
- myVars = [
+ my_vars = [
"GRAINC_TO_FOOD_PERHARV",
"GRAINC_TO_FOOD_ANN",
"SDATES",
@@ -60,18 +80,18 @@ def main(argv):
# These should be constant in a Prescribed Calendars (rxboth) run, as long as the inputs were
# static.
case = {
- "constantVars": ["SDATES", "GDDHARV"],
+ "const_vars": ["SDATES", "GDDHARV"],
"rx_sdates_file": args.rx_sdates_file,
"rx_gdds_file": args.rx_gdds_file,
}
case["ds"] = cc.import_output(
annual_outfiles,
- myVars=myVars,
- y1=args.first_usable_year,
- yN=args.last_usable_year,
+ my_vars=my_vars,
+ year_1=args.first_usable_year,
+ year_n=args.last_usable_year,
)
- cc.check_constant_vars(case["ds"], case, ignore_nan=True, verbose=True, throw_error=True)
+ check_constant_vars(case["ds"], case, ignore_nan=True, verbose=True, throw_error=True)
# Import GGCMI sowing and harvest dates, and check sims
casename = "Prescribed Calendars"
@@ -84,24 +104,31 @@ def main(argv):
# Equalize lons/lats
lonlat_tol = 1e-4
- for v in ["rx_sdates_ds", "rx_gdds_ds"]:
- if v in case:
- for l in ["lon", "lat"]:
- max_diff_orig = np.max(np.abs(case[v][l].values - case["ds"][l].values))
+ for ds_name in ["rx_sdates_ds", "rx_gdds_ds"]:
+ if ds_name in case:
+ for coord_name in ["lon", "lat"]:
+ max_diff_orig = np.max(
+ np.abs(case[ds_name][coord_name].values - case["ds"][coord_name].values)
+ )
if max_diff_orig > lonlat_tol:
raise RuntimeError(
- f"{v} {l} values differ too much ({max_diff_orig} > {lonlat_tol})"
+ f"{ds_name} {coord_name} values differ too much ({max_diff_orig} > "
+ + f"{lonlat_tol})"
+ )
+ if max_diff_orig > 0:
+ case[ds_name] = case[ds_name].assign_coords(
+ {coord_name: case["ds"][coord_name].values}
+ )
+ max_diff = np.max(
+ np.abs(case[ds_name][coord_name].values - case["ds"][coord_name].values)
)
- elif max_diff_orig > 0:
- case[v] = case[v].assign_coords({l: case["ds"][l].values})
- max_diff = np.max(np.abs(case[v][l].values - case["ds"][l].values))
- print(f"{v} {l} max_diff {max_diff_orig} → {max_diff}")
+ print(f"{ds_name} {coord_name} max_diff {max_diff_orig} → {max_diff}")
else:
- print(f"{v} {l} max_diff {max_diff_orig}")
+ print(f"{ds_name} {coord_name} max_diff {max_diff_orig}")
# Check
if case["rx_sdates_file"]:
- cc.check_rx_obeyed(
+ check_rx_obeyed(
case["ds"].vegtype_str.values,
case["rx_sdates_ds"].isel(time=0),
case["ds"],
@@ -109,7 +136,7 @@ def main(argv):
"SDATES",
)
if case["rx_gdds_file"]:
- cc.check_rx_obeyed(
+ check_rx_obeyed(
case["ds"].vegtype_str.values,
case["rx_gdds_ds"].isel(time=0),
case["ds"],
diff --git a/python/ctsm/crop_calendars/convert_axis_time2gs.py b/python/ctsm/crop_calendars/convert_axis_time2gs.py
new file mode 100644
index 0000000000..d48514370d
--- /dev/null
+++ b/python/ctsm/crop_calendars/convert_axis_time2gs.py
@@ -0,0 +1,622 @@
+"""
+Convert time*mxharvests axes to growingseason axis
+"""
+import warnings
+import sys
+import numpy as np
+import xarray as xr
+
+try:
+ import pandas as pd
+except ModuleNotFoundError:
+ pass
+
+
+def pym_to_pg(pym_array, quiet=False):
+ """
+ In convert_axis_time2gs(), convert year x month array to growingseason axis
+ """
+ pg_array = np.reshape(pym_array, (pym_array.shape[0], -1))
+ ok_pg = pg_array[~np.isnan(pg_array)]
+ if not quiet:
+ print(
+ f"{ok_pg.size} included; unique N seasons = "
+ + f"{np.unique(np.sum(~np.isnan(pg_array), axis=1))}"
+ )
+ return pg_array
+
+
+def ignore_lastyear_complete_season(pg_array, excl, mxharvests):
+ """
+ Helper function for convert_axis_time2gs()
+ """
+ tmp_l = pg_array[:, :-mxharvests]
+ tmp_r = pg_array[:, -mxharvests:]
+ tmp_r[np.where(excl)] = np.nan
+ pg_array = np.concatenate((tmp_l, tmp_r), axis=1)
+ return pg_array
+
+
+def convert_axis_time2gs_setup(this_ds, verbose):
+ """
+ Various setup steps for convert_axis_time2gs_setup()
+ """
+ # How many non-NaN patch-seasons do we expect to have once we're done organizing things?
+ n_patch = this_ds.dims["patch"]
+ # Because some patches will be planted in the last year but not complete, we have to ignore any
+ # finalyear-planted seasons that do complete.
+ n_gs = this_ds.dims["time"] - 1
+ expected_valid = n_patch * n_gs
+
+ mxharvests = this_ds.dims["mxharvests"]
+
+ if verbose:
+ print(
+ f"Start: discrepancy of {np.sum(~np.isnan(this_ds.HDATES.values)) - expected_valid} "
+ + "patch-seasons"
+ )
+
+ # Set all non-positive date values to NaN. These are seasons that were never harvested
+ # (or never started): "non-seasons."
+ if this_ds.HDATES.dims != ("time", "mxharvests", "patch"):
+ raise RuntimeError(
+ "This code relies on HDATES dims ('time', 'mxharvests', 'patch'), not "
+ + f"{this_ds.HDATES.dims}"
+ )
+ hdates_ymp = this_ds.HDATES.copy().where(this_ds.HDATES > 0).values
+ hdates_pym = np.transpose(hdates_ymp.copy(), (2, 0, 1))
+ sdates_ymp = this_ds.SDATES_PERHARV.copy().where(this_ds.SDATES_PERHARV > 0).values
+ sdates_pym = np.transpose(sdates_ymp.copy(), (2, 0, 1))
+ with np.errstate(invalid="ignore"):
+ hdates_pym[hdates_pym <= 0] = np.nan
+ return n_patch, n_gs, expected_valid, mxharvests, hdates_ymp, hdates_pym, sdates_ymp, sdates_pym
+
+
+def set_up_ds_with_gs_axis(ds_in):
+ """
+ Set up empty Dataset with time axis as "gs" (growing season) instead of what CLM puts out.
+
+ Includes all the same variables as the input dataset, minus any that had dimensions mxsowings or
+ mxharvests.
+ """
+ # Get the data variables to include in the new dataset
+ data_vars = {}
+ for var in ds_in.data_vars:
+ if not any(x in ["mxsowings", "mxharvests"] for x in ds_in[var].dims):
+ data_vars[var] = ds_in[var]
+ # Set up the new dataset
+ gs_years = [t.year - 1 for t in ds_in.time.values[:-1]]
+ coords = ds_in.coords
+ coords["gs"] = gs_years
+ ds_out = xr.Dataset(data_vars=data_vars, coords=coords, attrs=ds_in.attrs)
+ return ds_out
+
+
+def print_onepatch_wrong_n_gs(
+ patch_index,
+ this_ds_orig,
+ sdates_ymp,
+ hdates_ymp,
+ sdates_pym,
+ hdates_pym,
+ sdates_pym2,
+ hdates_pym2,
+ sdates_pym3,
+ hdates_pym3,
+ sdates_pg,
+ hdates_pg,
+ sdates_pg2,
+ hdates_pg2,
+):
+ """
+ Print information about a patch (for debugging)
+ """
+
+ print(
+ f"patch {patch_index}: {this_ds_orig.patches1d_itype_veg_str.values[patch_index]}, lon "
+ f"{this_ds_orig.patches1d_lon.values[patch_index]} lat "
+ f"{this_ds_orig.patches1d_lat.values[patch_index]}"
+ )
+
+ print("Original SDATES (per sowing):")
+ print(this_ds_orig.SDATES.values[:, :, patch_index])
+
+ print("Original HDATES (per harvest):")
+ print(this_ds_orig.HDATES.values[:, :, patch_index])
+
+ if "pandas" in sys.modules:
+
+ def print_pandas_ymp(msg, cols, arrs_tuple):
+ print(f"{msg} ({np.sum(~np.isnan(arrs_tuple[0]))})")
+ mxharvests = arrs_tuple[0].shape[1]
+ arrs_list2 = []
+ cols2 = []
+ for harvest_index in np.arange(mxharvests):
+ for i, array in enumerate(arrs_tuple):
+ arrs_list2.append(array[:, harvest_index])
+ cols2.append(cols[i] + str(harvest_index))
+ arrs_tuple2 = tuple(arrs_list2)
+ dataframe = pd.DataFrame(np.stack(arrs_tuple2, axis=1))
+ dataframe.columns = cols2
+ print(dataframe)
+
+ print_pandas_ymp(
+ "Original",
+ ["sdate", "hdate"],
+ (
+ this_ds_orig.SDATES_PERHARV.values[:, :, patch_index],
+ this_ds_orig.HDATES.values[:, :, patch_index],
+ ),
+ )
+
+ print_pandas_ymp(
+ "Masked",
+ ["sdate", "hdate"],
+ (sdates_ymp[:, :, patch_index], hdates_ymp[:, :, patch_index]),
+ )
+
+ print_pandas_ymp(
+ 'After "Ignore harvests from before this output began"',
+ ["sdate", "hdate"],
+ (
+ np.transpose(sdates_pym, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym, (1, 2, 0))[:, :, patch_index],
+ ),
+ )
+
+ print_pandas_ymp(
+ 'After "In years with no sowing, pretend the first no-harvest is meaningful"',
+ ["sdate", "hdate"],
+ (
+ np.transpose(sdates_pym2, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym2, (1, 2, 0))[:, :, patch_index],
+ ),
+ )
+
+ print_pandas_ymp(
+ (
+ 'After "In years with sowing that are followed by inactive years, check whether the'
+ " last sowing was harvested before the patch was deactivated. If not, pretend the"
+ ' LAST no-harvest is meaningful."'
+ ),
+ ["sdate", "hdate"],
+ (
+ np.transpose(sdates_pym3, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym3, (1, 2, 0))[:, :, patch_index],
+ ),
+ )
+
+ def print_pandas_pg(msg, cols, arrs_tuple):
+ print(f"{msg} ({np.sum(~np.isnan(arrs_tuple[0]))})")
+ arrs_list = list(arrs_tuple)
+ for i, array in enumerate(arrs_tuple):
+ arrs_list[i] = np.reshape(array, (-1))
+ arrs_tuple2 = tuple(arrs_list)
+ dataframe = pd.DataFrame(np.stack(arrs_tuple2, axis=1))
+ dataframe.columns = cols
+ print(dataframe)
+
+ print_pandas_pg(
+ "Same, but converted to gs axis",
+ ["sdate", "hdate"],
+ (sdates_pg[patch_index, :], hdates_pg[patch_index, :]),
+ )
+
+ print_pandas_pg(
+ (
+ 'After "Ignore any harvests that were planted in the final year, because some cells'
+ ' will have incomplete growing seasons for the final year"'
+ ),
+ ["sdate", "hdate"],
+ (sdates_pg2[patch_index, :], hdates_pg2[patch_index, :]),
+ )
+ else:
+ print("Couldn't import pandas, so not displaying example bad patch ORIGINAL.")
+
+ def print_nopandas(array_1, array_2, msg):
+ print(msg)
+ if array_1.ndim == 1:
+ # I don't know why these aren't side-by-side!
+ print(np.stack((array_1, array_2), axis=1))
+ else:
+ print(np.concatenate((array_1, array_2), axis=1))
+
+ print_nopandas(sdates_ymp[:, :, patch_index], hdates_ymp[:, :, patch_index], "Masked:")
+
+ print_nopandas(
+ np.transpose(sdates_pym, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym, (1, 2, 0))[:, :, patch_index],
+ 'After "Ignore harvests from before this output began"',
+ )
+
+ print_nopandas(
+ np.transpose(sdates_pym2, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym2, (1, 2, 0))[:, :, patch_index],
+ 'After "In years with no sowing, pretend the first no-harvest is meaningful"',
+ )
+
+ print_nopandas(
+ np.transpose(sdates_pym3, (1, 2, 0))[:, :, patch_index],
+ np.transpose(hdates_pym3, (1, 2, 0))[:, :, patch_index],
+ (
+ 'After "In years with sowing that are followed by inactive years, check whether the'
+ " last sowing was harvested before the patch was deactivated. If not, pretend the"
+ ' LAST [easier to implement!] no-harvest is meaningful."'
+ ),
+ )
+
+ print_nopandas(
+ sdates_pg[patch_index, :], hdates_pg[patch_index, :], "Same, but converted to gs axis"
+ )
+
+ print_nopandas(
+ sdates_pg2[patch_index, :],
+ hdates_pg2[patch_index, :],
+ (
+ 'After "Ignore any harvests that were planted in the final year, because some cells'
+ ' will have incomplete growing seasons for the final year"'
+ ),
+ )
+
+ print("\n\n")
+
+
+def handle_years_with_no_sowing(this_ds, mxharvests, hdates_pym, sdates_pym):
+ """
+ In years with no sowing, pretend the first no-harvest is meaningful, unless that was
+ intentionally ignored earlier in convert_axis_time2gs().
+ """
+ sdates_orig_ymp = this_ds.SDATES.copy().values
+ sdates_orig_pym = np.transpose(sdates_orig_ymp.copy(), (2, 0, 1))
+ hdates_pym2 = hdates_pym.copy()
+ sdates_pym2 = sdates_pym.copy()
+ with np.errstate(invalid="ignore"):
+ sdates_gt_0 = sdates_orig_pym > 0
+ nosow_py = np.all(~sdates_gt_0, axis=2)
+ nosow_py_1st = nosow_py & np.isnan(hdates_pym[:, :, 0])
+ where_nosow_py_1st = np.where(nosow_py_1st)
+ hdates_pym2[where_nosow_py_1st[0], where_nosow_py_1st[1], 0] = -np.inf
+ sdates_pym2[where_nosow_py_1st[0], where_nosow_py_1st[1], 0] = -np.inf
+ for harvest_index in np.arange(mxharvests - 1):
+ if harvest_index == 0:
+ continue
+ if harvest_index == 1:
+ print("Warning: Untested with mxharvests > 2")
+ where_nosow_py = np.where(
+ nosow_py
+ & ~np.any(np.isnan(hdates_pym[:, :, 0:harvest_index]), axis=2)
+ & np.isnan(hdates_pym[:, :, harvest_index])
+ )
+ hdates_pym2[where_nosow_py[0], where_nosow_py[1], harvest_index + 1] = -np.inf
+ sdates_pym2[where_nosow_py[0], where_nosow_py[1], harvest_index + 1] = -np.inf
+ return sdates_orig_pym, hdates_pym2, sdates_pym2
+
+
+def handle_years_with_sowing_then_inactive(
+ verbose,
+ n_patch,
+ n_gs,
+ expected_valid,
+ mxharvests,
+ inactive_py,
+ sdates_orig_pym,
+ hdates_pym2,
+ sdates_pym2,
+):
+ """
+ In years with sowing that are followed by inactive years, check whether the last sowing was
+ harvested before the patch was deactivated. If not, pretend the LAST [easier to implement!]
+ no-harvest is meaningful.
+ """
+ sdates_orig_masked_pym = sdates_orig_pym.copy()
+ with np.errstate(invalid="ignore"):
+ sdates_le_0 = sdates_orig_masked_pym <= 0
+ sdates_orig_masked_pym[np.where(sdates_le_0)] = np.nan
+ with warnings.catch_warnings():
+ warnings.filterwarnings(action="ignore", message="All-NaN slice encountered")
+ last_sdate_first_n_gs_py = np.nanmax(sdates_orig_masked_pym[:, :-1, :], axis=2)
+ last_hdate_first_n_gs_py = np.nanmax(hdates_pym2[:, :-1, :], axis=2)
+ with np.errstate(invalid="ignore"):
+ hdate_lt_sdate = last_hdate_first_n_gs_py < last_sdate_first_n_gs_py
+ last_sowing_not_harvested_sameyear_first_n_gs_py = hdate_lt_sdate | np.isnan(
+ last_hdate_first_n_gs_py
+ )
+ inactive_last_n_gs_py = inactive_py[:, 1:]
+ last_sowing_never_harvested_first_n_gs_py = (
+ last_sowing_not_harvested_sameyear_first_n_gs_py & inactive_last_n_gs_py
+ )
+ last_sowing_never_harvested_py = np.concatenate(
+ (last_sowing_never_harvested_first_n_gs_py, np.full((n_patch, 1), False)), axis=1
+ )
+ last_sowing_never_harvested_pym = np.concatenate(
+ (
+ np.full((n_patch, n_gs + 1, mxharvests - 1), False),
+ np.expand_dims(last_sowing_never_harvested_py, axis=2),
+ ),
+ axis=2,
+ )
+ where_last_sowing_never_harvested_pym = last_sowing_never_harvested_pym
+ hdates_pym3 = hdates_pym2.copy()
+ sdates_pym3 = sdates_pym2.copy()
+ hdates_pym3[where_last_sowing_never_harvested_pym] = -np.inf
+ sdates_pym3[where_last_sowing_never_harvested_pym] = -np.inf
+
+ hdates_pg = pym_to_pg(hdates_pym3.copy(), quiet=~verbose)
+ sdates_pg = pym_to_pg(sdates_pym3.copy(), quiet=True)
+ if verbose:
+ print(
+ "After 'In years with no sowing, pretend the first no-harvest is meaningful: "
+ + f"discrepancy of {np.sum(~np.isnan(hdates_pg)) - expected_valid} patch-seasons"
+ )
+
+ return hdates_pym3, sdates_pym3, hdates_pg, sdates_pg
+
+
+def ignore_harvests_planted_in_final_year(
+ this_ds, verbose, n_gs, expected_valid, mxharvests, hdates_pg, sdates_pg
+):
+ """
+ Ignore any harvests that were planted in the final year, because some cells will have
+ incomplete growing seasons for the final year.
+ """
+ with np.errstate(invalid="ignore"):
+ hdates_ge_sdates = hdates_pg[:, -mxharvests:] >= sdates_pg[:, -mxharvests:]
+ lastyear_complete_season = hdates_ge_sdates | np.isinf(hdates_pg[:, -mxharvests:])
+
+ hdates_pg2 = ignore_lastyear_complete_season(
+ hdates_pg.copy(), lastyear_complete_season, mxharvests
+ )
+ sdates_pg2 = ignore_lastyear_complete_season(
+ sdates_pg.copy(), lastyear_complete_season, mxharvests
+ )
+ is_valid = ~np.isnan(hdates_pg2)
+ is_fake = np.isneginf(hdates_pg2)
+ is_fake = np.reshape(is_fake[is_valid], (this_ds.dims["patch"], n_gs))
+ discrepancy = np.sum(is_valid) - expected_valid
+ unique_n_seasons = np.unique(np.sum(is_valid, axis=1))
+ if verbose:
+ print(
+ "After 'Ignore any harvests that were planted in the final year, because other cells "
+ + "will have incomplete growing seasons for the final year': discrepancy of "
+ + f"{discrepancy} patch-seasons"
+ )
+ if "pandas" in sys.modules:
+ bincount = np.bincount(np.sum(is_valid, axis=1))
+ bincount = bincount[bincount > 0]
+ dataframe = pd.DataFrame({"Ngs": unique_n_seasons, "Count": bincount})
+ print(dataframe)
+ else:
+ print(f"unique N seasons = {unique_n_seasons}")
+ print(" ")
+ return hdates_pg2, sdates_pg2, is_valid, is_fake, discrepancy, unique_n_seasons
+
+
+def create_dataset(
+ this_ds,
+ my_vars,
+ n_gs,
+ hdates_ymp,
+ hdates_pym,
+ sdates_ymp,
+ sdates_pym,
+ hdates_pym2,
+ sdates_pym2,
+ hdates_pym3,
+ sdates_pym3,
+ hdates_pg,
+ sdates_pg,
+ hdates_pg2,
+ sdates_pg2,
+ is_valid,
+ is_fake,
+ discrepancy,
+ unique_n_seasons,
+):
+ """
+ Create Dataset with time axis as "gs" (growing season) instead of what CLM puts out
+ """
+ if discrepancy == 0:
+ this_ds_gs = set_up_ds_with_gs_axis(this_ds)
+ for var in this_ds.data_vars:
+ if this_ds[var].dims != ("time", "mxharvests", "patch") or (
+ my_vars and var not in my_vars
+ ):
+ continue
+
+ # Set invalid values to NaN
+ da_yhp = this_ds[var].copy()
+ da_yhp = da_yhp.where(~np.isneginf(da_yhp))
+
+ # Remove the nans and reshape to patches*growingseasons
+ da_pyh = da_yhp.transpose("patch", "time", "mxharvests")
+ ar_pg = np.reshape(da_pyh.values, (this_ds.dims["patch"], -1))
+ ar_valid_pg = np.reshape(ar_pg[is_valid], (this_ds.dims["patch"], n_gs))
+ # Change -infs to nans
+ ar_valid_pg[is_fake] = np.nan
+ # Save as DataArray to new Dataset, stripping _PERHARV from variable name
+ newname = var.replace("_PERHARV", "")
+ if newname in this_ds_gs:
+ raise RuntimeError(f"{newname} already in dataset!")
+ da_pg = xr.DataArray(
+ data=ar_valid_pg,
+ coords=[this_ds_gs.coords["patch"], this_ds_gs.coords["gs"]],
+ name=newname,
+ attrs=da_yhp.attrs,
+ )
+ this_ds_gs[newname] = da_pg
+ this_ds_gs[newname].attrs["units"] = this_ds[var].attrs["units"]
+ else:
+ # Print details about example bad patch(es)
+ if min(unique_n_seasons) < n_gs:
+ print(f"Too few seasons (min {min(unique_n_seasons)} < {n_gs})")
+ patch_index = np.where(np.sum(~np.isnan(hdates_pg2), axis=1) == min(unique_n_seasons))[
+ 0
+ ][0]
+ print_onepatch_wrong_n_gs(
+ patch_index,
+ this_ds,
+ sdates_ymp,
+ hdates_ymp,
+ sdates_pym,
+ hdates_pym,
+ sdates_pym2,
+ hdates_pym2,
+ sdates_pym3,
+ hdates_pym3,
+ sdates_pg,
+ hdates_pg,
+ sdates_pg2,
+ hdates_pg2,
+ )
+ if max(unique_n_seasons) > n_gs:
+ print(f"Too many seasons (max {max(unique_n_seasons)} > {n_gs})")
+ patch_index = np.where(np.sum(~np.isnan(hdates_pg2), axis=1) == max(unique_n_seasons))[
+ 0
+ ][0]
+ print_onepatch_wrong_n_gs(
+ patch_index,
+ this_ds,
+ sdates_ymp,
+ hdates_ymp,
+ sdates_pym,
+ hdates_pym,
+ sdates_pym2,
+ hdates_pym2,
+ sdates_pym3,
+ hdates_pym3,
+ sdates_pg,
+ hdates_pg,
+ sdates_pg2,
+ hdates_pg2,
+ )
+ raise RuntimeError(
+ "Can't convert time*mxharvests axes to growingseason axis: discrepancy of "
+ + f"{discrepancy} patch-seasons"
+ )
+
+ # Preserve units
+ for var_1 in this_ds_gs:
+ var_0 = var_1
+ if var_0 not in this_ds:
+ var_0 += "_PERHARV"
+ if var_0 not in this_ds:
+ continue
+ if "units" in this_ds[var_0].attrs:
+ this_ds_gs[var_1].attrs["units"] = this_ds[var_0].attrs["units"]
+ return this_ds_gs
+
+
+def convert_axis_time2gs(this_ds, verbose=False, my_vars=None, incl_orig=False):
+ """
+ Convert time*mxharvests axes to growingseason axis
+ """
+
+ (
+ n_patch,
+ n_gs,
+ expected_valid,
+ mxharvests,
+ hdates_ymp,
+ hdates_pym,
+ sdates_ymp,
+ sdates_pym,
+ ) = convert_axis_time2gs_setup(this_ds, verbose)
+
+ # Find years where patch was inactive
+ inactive_py = np.transpose(
+ np.isnan(this_ds.HDATES).all(dim="mxharvests").values
+ & np.isnan(this_ds.SDATES_PERHARV).all(dim="mxharvests").values
+ )
+ # Find seasons that were planted while the patch was inactive
+ with np.errstate(invalid="ignore"):
+ sown_inactive_py = inactive_py[:, :-1] & (hdates_pym[:, 1:, 0] < sdates_pym[:, 1:, 0])
+ sown_inactive_py = np.concatenate((np.full((n_patch, 1), False), sown_inactive_py), axis=1)
+
+ # "Ignore harvests from seasons sown (a) before this output began or (b) when the crop was
+ # inactive"
+ with np.errstate(invalid="ignore"):
+ first_season_before_first_year_p = hdates_pym[:, 0, 0] < sdates_pym[:, 0, 0]
+ first_season_before_first_year_py = np.full(hdates_pym.shape[:-1], fill_value=False)
+ first_season_before_first_year_py[:, 0] = first_season_before_first_year_p
+ sown_prerun_or_inactive_py = first_season_before_first_year_py | sown_inactive_py
+ sown_prerun_or_inactive_pym = np.concatenate(
+ (
+ np.expand_dims(sown_prerun_or_inactive_py, axis=2),
+ np.full((n_patch, n_gs + 1, mxharvests - 1), False),
+ ),
+ axis=2,
+ )
+ where_sown_prerun_or_inactive_pym = np.where(sown_prerun_or_inactive_pym)
+ hdates_pym[where_sown_prerun_or_inactive_pym] = np.nan
+ sdates_pym[where_sown_prerun_or_inactive_pym] = np.nan
+ if verbose:
+ print(
+ "After 'Ignore harvests from before this output began: discrepancy of "
+ + f"{np.sum(~np.isnan(hdates_pym)) - expected_valid} patch-seasons'"
+ )
+
+ # We need to keep some non-seasons---it's possible that "the yearY growing season" never
+ # happened (sowing conditions weren't met), but we still need something there so that we can
+ # make an array of dimension Npatch*Ngs. We do this by changing those non-seasons from NaN to
+ # -Inf before doing the filtering and reshaping, after which we'll convert them back to NaNs.
+
+ # "In years with no sowing, pretend the first no-harvest is meaningful, unless that was
+ # intentionally ignored above."
+ sdates_orig_pym, hdates_pym2, sdates_pym2 = handle_years_with_no_sowing(
+ this_ds, mxharvests, hdates_pym, sdates_pym
+ )
+
+ # "In years with sowing that are followed by inactive years, check whether the last sowing was
+ # harvested before the patch was deactivated. If not, pretend the LAST [easier to implement!]
+ # no-harvest is meaningful."
+ hdates_pym3, sdates_pym3, hdates_pg, sdates_pg = handle_years_with_sowing_then_inactive(
+ verbose,
+ n_patch,
+ n_gs,
+ expected_valid,
+ mxharvests,
+ inactive_py,
+ sdates_orig_pym,
+ hdates_pym2,
+ sdates_pym2,
+ )
+
+ # "Ignore any harvests that were planted in the final year, because some cells will have
+ # incomplete growing seasons for the final year."
+ (
+ hdates_pg2,
+ sdates_pg2,
+ is_valid,
+ is_fake,
+ discrepancy,
+ unique_n_seasons,
+ ) = ignore_harvests_planted_in_final_year(
+ this_ds, verbose, n_gs, expected_valid, mxharvests, hdates_pg, sdates_pg
+ )
+
+ # Create Dataset with time axis as "gs" (growing season) instead of what CLM puts out
+ this_ds_gs = create_dataset(
+ this_ds,
+ my_vars,
+ n_gs,
+ hdates_ymp,
+ hdates_pym,
+ sdates_ymp,
+ sdates_pym,
+ hdates_pym2,
+ sdates_pym2,
+ hdates_pym3,
+ sdates_pym3,
+ hdates_pg,
+ sdates_pg,
+ hdates_pg2,
+ sdates_pg2,
+ is_valid,
+ is_fake,
+ discrepancy,
+ unique_n_seasons,
+ )
+
+ if incl_orig:
+ return this_ds_gs, this_ds
+ return this_ds_gs
diff --git a/python/ctsm/crop_calendars/cropcal_constants.py b/python/ctsm/crop_calendars/cropcal_constants.py
new file mode 100644
index 0000000000..f015ac7db1
--- /dev/null
+++ b/python/ctsm/crop_calendars/cropcal_constants.py
@@ -0,0 +1,26 @@
+"""
+Constants used in crop calendar scripts
+"""
+
+# Define conversion multipliers, {from: {to1, to2, ...}, ...}
+multiplier_dict = {
+ # Mass
+ "g": {
+ "Mt": 1e-12,
+ },
+ "t": {
+ "Mt": 1e-6,
+ },
+ # Volume
+ "m3": {
+ "km3": 1e-9,
+ },
+ # Yield
+ "g/m2": {
+ "t/ha": 1e-6 * 1e4,
+ },
+}
+
+# Minimum harvest threshold allowed in PlantCrop()
+# Was 50 before cropcal runs 2023-01-28
+DEFAULT_GDD_MIN = 1.0
diff --git a/python/ctsm/crop_calendars/cropcal_figs_module.py b/python/ctsm/crop_calendars/cropcal_figs_module.py
index 8d7f472fec..d820460175 100644
--- a/python/ctsm/crop_calendars/cropcal_figs_module.py
+++ b/python/ctsm/crop_calendars/cropcal_figs_module.py
@@ -1,5 +1,11 @@
+"""
+Functions for making crop calendar figures
+"""
+
import numpy as np
+# It's fine if these can't be imported. The script using these will handle it.
+# pylint: disable=import-error
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
@@ -23,6 +29,9 @@
# Cases (line and scatter plots)
def cropcal_colors_cases(casename):
+ """
+ Define colors for each case
+ """
case_color_dict = {
"clm default": [x / 255 for x in [92, 219, 219]],
"prescribed calendars": [x / 255 for x in [250, 102, 240]],
@@ -32,11 +41,8 @@ def cropcal_colors_cases(casename):
case_color_dict["5.0 lu"] = case_color_dict["clm default"]
case_color_dict["5.2 lu"] = case_color_dict["prescribed calendars"]
- case_color = None
casename_for_colors = casename.lower().replace(" (0)", "").replace(" (1)", "")
- if casename_for_colors in case_color_dict:
- case_color = case_color_dict[casename_for_colors]
- return case_color
+ return case_color_dict.get(casename_for_colors, None)
def make_map(
@@ -65,6 +71,9 @@ def make_map(
vmin=None,
vrange=None,
):
+ """
+ Make map
+ """
if underlay is not None:
if underlay_color is None:
underlay_color = cropcal_colors["underlay"]
@@ -147,23 +156,25 @@ def make_map(
# Need to do this for subplot row labels
set_ticks(-1, fontsize, "y")
plt.yticks([])
- for x in ax.spines:
- ax.spines[x].set_visible(False)
+ for spine in ax.spines:
+ ax.spines[spine].set_visible(False)
if show_cbar:
return im, cbar
- else:
- return im, None
+ return im, None
def deal_with_ticklabels(cbar, cbar_max, ticklabels, ticklocations, units, im):
+ """
+ Handle settings related to ticklabels
+ """
if ticklocations is not None:
cbar.set_ticks(ticklocations)
if units is not None and units.lower() == "month":
cbar.set_ticklabels(
["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
)
- units == "Month"
+ units = "Month"
elif ticklabels is not None:
cbar.set_ticklabels(ticklabels)
if isinstance(im, mplcol.QuadMesh):
@@ -173,7 +184,7 @@ def deal_with_ticklabels(cbar, cbar_max, ticklabels, ticklocations, units, im):
if cbar_max is not None and clim_max > cbar_max:
if ticklabels is not None:
raise RuntimeError(
- "How to handle this now that you are specifying ticklocations separate from ticklabels?"
+ "How to handle this now that ticklocations is specified separately from ticklabels?"
)
ticks = cbar.get_ticks()
if ticks[-2] > cbar_max:
@@ -182,24 +193,28 @@ def deal_with_ticklabels(cbar, cbar_max, ticklabels, ticklocations, units, im):
)
ticklabels = ticks.copy()
ticklabels[-1] = cbar_max
- for i, x in enumerate(ticklabels):
- if x == int(x):
- ticklabels[i] = str(int(x))
+ for i, ticklabel in enumerate(ticklabels):
+ if ticklabel == int(ticklabel):
+ ticklabels[i] = str(int(ticklabel))
cbar.set_ticks(
ticks
- ) # Calling this before set_xticklabels() avoids "UserWarning: FixedFormatter should only be used together with FixedLocator" (https://stackoverflow.com/questions/63723514/userwarning-fixedformatter-should-only-be-used-together-with-fixedlocator)
+ ) # Calling this before set_xticklabels() avoids "UserWarning: FixedFormatter should only
+ # be used together with FixedLocator" (https://stackoverflow.com/questions/63723514)
cbar.set_ticklabels(ticklabels)
def set_ticks(lonlat_bin_width, fontsize, x_or_y):
+ """
+ Plot tick marks
+ """
if x_or_y == "x":
ticks = np.arange(-180, 181, lonlat_bin_width)
else:
ticks = np.arange(-60, 91, lonlat_bin_width)
ticklabels = [str(x) for x in ticks]
- for i, x in enumerate(ticks):
- if x % 2:
+ for i, tick in enumerate(ticks):
+ if tick % 2:
ticklabels[i] = ""
if x_or_y == "x":
diff --git a/python/ctsm/crop_calendars/cropcal_module.py b/python/ctsm/crop_calendars/cropcal_module.py
index 76c295974d..3fe6942f94 100644
--- a/python/ctsm/crop_calendars/cropcal_module.py
+++ b/python/ctsm/crop_calendars/cropcal_module.py
@@ -1,46 +1,23 @@
-import numpy as np
-import xarray as xr
-import warnings
-import sys
+"""
+Helper functions for various crop calendar stuff
+"""
+
import os
import glob
+import numpy as np
+import xarray as xr
-# Import the CTSM Python utilities.
-# sys.path.insert() is necessary for RXCROPMATURITY to work. The fact that it's calling this script in the RUN phase seems to require the python/ directory to be manually added to path.
-_CTSM_PYTHON = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, "python"
-)
-sys.path.insert(1, _CTSM_PYTHON)
import ctsm.crop_calendars.cropcal_utils as utils
-
-try:
- import pandas as pd
-except:
- pass
-
-
-# Define conversion multipliers, {from: {to1, to2, ...}, ...}
-multiplier_dict = {
- # Mass
- "g": {
- "Mt": 1e-12,
- },
- "t": {
- "Mt": 1e-6,
- },
- # Volume
- "m3": {
- "km3": 1e-9,
- },
- # Yield
- "g/m2": {
- "t/ha": 1e-6 * 1e4,
- },
-}
+from ctsm.crop_calendars.convert_axis_time2gs import convert_axis_time2gs
+from ctsm.crop_calendars.check_rx_obeyed import check_rx_obeyed
+from ctsm.crop_calendars.cropcal_constants import DEFAULT_GDD_MIN
+from ctsm.crop_calendars.import_ds import import_ds
-# After importing a file, restrict it to years of interest.
-def check_and_trim_years(y1, yN, ds_in):
+def check_and_trim_years(year_1, year_n, ds_in):
+ """
+ After importing a file, restrict it to years of interest.
+ """
### In annual outputs, file with name Y is actually results from year Y-1.
### Note that time values refer to when it was SAVED. So 1981-01-01 is for year 1980.
@@ -49,65 +26,80 @@ def get_year_from_cftime(cftime_date):
return cftime_date.year - 1
# Check that all desired years are included
- if get_year_from_cftime(ds_in.time.values[0]) > y1:
+ if get_year_from_cftime(ds_in.time.values[0]) > year_1:
raise RuntimeError(
- f"Requested y1 is {y1} but first year in outputs is {get_year_from_cftime(ds_in.time.values[0])}"
+ f"Requested year_1 is {year_1} but first year in outputs is "
+ + f"{get_year_from_cftime(ds_in.time.values[0])}"
)
- elif get_year_from_cftime(ds_in.time.values[-1]) < y1:
+ if get_year_from_cftime(ds_in.time.values[-1]) < year_1:
raise RuntimeError(
- f"Requested yN is {yN} but last year in outputs is {get_year_from_cftime(ds_in.time.values[-1])}"
+ f"Requested year_n is {year_n} but last year in outputs is "
+ + f"{get_year_from_cftime(ds_in.time.values[-1])}"
)
# Remove years outside range of interest
### Include an extra year at the end to finish out final seasons.
- ds_in = utils.safer_timeslice(ds_in, slice(f"{y1+1}-01-01", f"{yN+2}-01-01"))
+ ds_in = utils.safer_timeslice(ds_in, slice(f"{year_1+1}-01-01", f"{year_n+2}-01-01"))
# Make sure you have the expected number of timesteps (including extra year)
- Nyears_expected = yN - y1 + 2
- if ds_in.dims["time"] != Nyears_expected:
+ n_years_expected = year_n - year_1 + 2
+ if ds_in.dims["time"] != n_years_expected:
raise RuntimeError(
- f"Expected {Nyears_expected} timesteps in output but got {ds_in.dims['time']}"
+ f"Expected {n_years_expected} timesteps in output but got {ds_in.dims['time']}"
)
return ds_in
-def open_lu_ds(filename, y1, yN, existing_ds, ungrid=True):
+def open_lu_ds(filename, year_1, year_n, existing_ds, ungrid=True):
+ """
+ Open land-use dataset
+ """
# Open and trim to years of interest
- dsg = xr.open_dataset(filename).sel(time=slice(y1, yN))
+ this_ds_gridded = xr.open_dataset(filename).sel(time=slice(year_1, year_n))
# Assign actual lon/lat coordinates
- dsg = dsg.assign_coords(
+ this_ds_gridded = this_ds_gridded.assign_coords(
lon=("lsmlon", existing_ds.lon.values), lat=("lsmlat", existing_ds.lat.values)
)
- dsg = dsg.swap_dims({"lsmlon": "lon", "lsmlat": "lat"})
-
- if "AREA" in dsg:
- dsg["AREA_CFT"] = dsg.AREA * 1e6 * dsg.LANDFRAC_PFT * dsg.PCT_CROP / 100 * dsg.PCT_CFT / 100
- dsg["AREA_CFT"].attrs = {"units": "m2"}
- dsg["AREA_CFT"].load()
+ this_ds_gridded = this_ds_gridded.swap_dims({"lsmlon": "lon", "lsmlat": "lat"})
+
+ if "AREA" in this_ds_gridded:
+ this_ds_gridded["AREA_CFT"] = (
+ this_ds_gridded.AREA
+ * 1e6
+ * this_ds_gridded.LANDFRAC_PFT
+ * this_ds_gridded.PCT_CROP
+ / 100
+ * this_ds_gridded.PCT_CFT
+ / 100
+ )
+ this_ds_gridded["AREA_CFT"].attrs = {"units": "m2"}
+ this_ds_gridded["AREA_CFT"].load()
else:
print("Warning: AREA missing from Dataset, so AREA_CFT will not be created")
if not ungrid:
- return dsg
+ return this_ds_gridded
# Un-grid
query_ilons = [int(x) - 1 for x in existing_ds["patches1d_ixy"].values]
query_ilats = [int(x) - 1 for x in existing_ds["patches1d_jxy"].values]
- query_ivts = [list(dsg.cft.values).index(x) for x in existing_ds["patches1d_itype_veg"].values]
+ query_ivts = [
+ list(this_ds_gridded.cft.values).index(x) for x in existing_ds["patches1d_itype_veg"].values
+ ]
- ds = xr.Dataset(attrs=dsg.attrs)
- for v in ["AREA", "LANDFRAC_PFT", "PCT_CFT", "PCT_CROP", "AREA_CFT"]:
- if v not in dsg:
+ this_ds = xr.Dataset(attrs=this_ds_gridded.attrs)
+ for var in ["AREA", "LANDFRAC_PFT", "PCT_CFT", "PCT_CROP", "AREA_CFT"]:
+ if var not in this_ds_gridded:
continue
- if "time" in dsg[v].dims:
+ if "time" in this_ds_gridded[var].dims:
new_coords = existing_ds["GRAINC_TO_FOOD_ANN"].coords
else:
new_coords = existing_ds["patches1d_lon"].coords
- if "cft" in dsg[v].dims:
- ds[v] = (
- dsg[v]
+ if "cft" in this_ds_gridded[var].dims:
+ this_ds[var] = (
+ this_ds_gridded[var]
.isel(
lon=xr.DataArray(query_ilons, dims="patch"),
lat=xr.DataArray(query_ilats, dims="patch"),
@@ -117,8 +109,8 @@ def open_lu_ds(filename, y1, yN, existing_ds, ungrid=True):
.assign_coords(new_coords)
)
else:
- ds[v] = (
- dsg[v]
+ this_ds[var] = (
+ this_ds_gridded[var]
.isel(
lon=xr.DataArray(query_ilons, dims="patch"),
lat=xr.DataArray(query_ilats, dims="patch"),
@@ -126,715 +118,79 @@ def open_lu_ds(filename, y1, yN, existing_ds, ungrid=True):
)
.assign_coords(new_coords)
)
- for v in existing_ds:
- if "patches1d_" in v or "grid1d_" in v:
- ds[v] = existing_ds[v]
- ds["lon"] = dsg["lon"]
- ds["lat"] = dsg["lat"]
+ for var in existing_ds:
+ if "patches1d_" in var or "grid1d_" in var:
+ this_ds[var] = existing_ds[var]
+ this_ds["lon"] = this_ds_gridded["lon"]
+ this_ds["lat"] = this_ds_gridded["lat"]
# Which crops are irrigated?
- is_irrigated = np.full_like(ds["patches1d_itype_veg"], False)
- for vegtype_str in np.unique(ds["patches1d_itype_veg_str"].values):
+ is_irrigated = np.full_like(this_ds["patches1d_itype_veg"], False)
+ for vegtype_str in np.unique(this_ds["patches1d_itype_veg_str"].values):
if "irrigated" not in vegtype_str:
continue
vegtype_int = utils.ivt_str2int(vegtype_str)
- is_this_vegtype = np.where(ds["patches1d_itype_veg"].values == vegtype_int)[0]
+ is_this_vegtype = np.where(this_ds["patches1d_itype_veg"].values == vegtype_int)[0]
is_irrigated[is_this_vegtype] = True
- ["irrigated" in x for x in ds["patches1d_itype_veg_str"].values]
- ds["IRRIGATED"] = xr.DataArray(
+ this_ds["IRRIGATED"] = xr.DataArray(
data=is_irrigated,
- coords=ds["patches1d_itype_veg_str"].coords,
+ coords=this_ds["patches1d_itype_veg_str"].coords,
attrs={"long_name": "Is patch irrigated?"},
)
# How much area is irrigated?
- ds["IRRIGATED_AREA_CFT"] = ds["IRRIGATED"] * ds["AREA_CFT"]
- ds["IRRIGATED_AREA_CFT"].attrs = {
+ this_ds["IRRIGATED_AREA_CFT"] = this_ds["IRRIGATED"] * this_ds["AREA_CFT"]
+ this_ds["IRRIGATED_AREA_CFT"].attrs = {
"long name": "CFT area (irrigated types only)",
"units": "m^2",
}
- ds["IRRIGATED_AREA_GRID"] = (
- ds["IRRIGATED_AREA_CFT"]
- .groupby(ds["patches1d_gi"])
+ this_ds["IRRIGATED_AREA_GRID"] = (
+ this_ds["IRRIGATED_AREA_CFT"]
+ .groupby(this_ds["patches1d_gi"])
.sum()
.rename({"patches1d_gi": "gridcell"})
)
- ds["IRRIGATED_AREA_GRID"].attrs = {"long name": "Irrigated area in gridcell", "units": "m^2"}
-
- return ds
-
-
-def check_constant_vars(
- this_ds, case, ignore_nan, constantGSs=None, verbose=True, throw_error=True
-):
- if isinstance(case, str):
- constantVars = [case]
- elif isinstance(case, list):
- constantVars = case
- elif isinstance(case, dict):
- constantVars = case["constantVars"]
- else:
- raise TypeError(f"case must be str or dict, not {type(case)}")
-
- if not constantVars:
- return None
-
- if constantGSs:
- gs0 = this_ds.gs.values[0]
- gsN = this_ds.gs.values[-1]
- if constantGSs.start > gs0 or constantGSs.stop < gsN:
- print(
- f"❗ Only checking constantVars over {constantGSs.start}-{constantGSs.stop} (run includes {gs0}-{gsN})"
- )
- this_ds = this_ds.sel(gs=constantGSs)
-
- any_bad = False
- any_bad_before_checking_rx = False
- if throw_error:
- emojus = "❌"
- else:
- emojus = "❗"
- if not isinstance(constantVars, list):
- constantVars = [constantVars]
-
- for v in constantVars:
- ok = True
-
- if "gs" in this_ds[v].dims:
- time_coord = "gs"
- elif "time" in this_ds[v].dims:
- time_coord = "time"
- else:
- raise RuntimeError(f"Which of these is the time coordinate? {this_ds[v].dims}")
- i_time_coord = this_ds[v].dims.index(time_coord)
-
- this_da = this_ds[v]
- ra_sp = np.moveaxis(this_da.copy().values, i_time_coord, 0)
- incl_patches = []
- bad_patches = np.array([])
- strList = []
-
- # Read prescription file, if needed
- rx_ds = None
- if isinstance(case, dict):
- if v == "GDDHARV" and "rx_gdds_file" in case:
- rx_ds = import_rx_dates(
- "gdd", case["rx_gdds_file"], this_ds, set_neg1_to_nan=False
- ).squeeze()
-
- for t1 in np.arange(this_ds.dims[time_coord] - 1):
- condn = ~np.isnan(ra_sp[t1, ...])
- if t1 > 0:
- condn = np.bitwise_and(condn, np.all(np.isnan(ra_sp[:t1, ...]), axis=0))
- thesePatches = np.where(condn)[0]
- if thesePatches.size == 0:
- continue
- thesePatches = list(np.where(condn)[0])
- incl_patches += thesePatches
- # print(f't1 {t1}: {thesePatches}')
-
- t1_yr = this_ds[time_coord].values[t1]
- t1_vals = np.squeeze(this_da.isel({time_coord: t1, "patch": thesePatches}).values)
-
- for t in np.arange(t1 + 1, this_ds.dims[time_coord]):
- t_yr = this_ds[time_coord].values[t]
- t_vals = np.squeeze(this_da.isel({time_coord: t, "patch": thesePatches}).values)
- ok_p = t1_vals == t_vals
-
- # If allowed, ignore where either t or t1 is NaN. Should only be used for runs where land use varies over time.
- if ignore_nan:
- ok_p = np.squeeze(np.bitwise_or(ok_p, np.isnan(t1_vals + t_vals)))
-
- if not np.all(ok_p):
- any_bad_before_checking_rx = True
- bad_patches_thisT = list(np.where(np.bitwise_not(ok_p))[0])
- bad_patches = np.concatenate(
- (bad_patches, np.array(thesePatches)[bad_patches_thisT])
- )
- if rx_ds:
- found_in_rx = np.array([False for x in bad_patches])
- varyPatches = list(np.array(thesePatches)[bad_patches_thisT])
- varyLons = this_ds.patches1d_lon.values[bad_patches_thisT]
- varyLats = this_ds.patches1d_lat.values[bad_patches_thisT]
- varyCrops = this_ds.patches1d_itype_veg_str.values[bad_patches_thisT]
- varyCrops_int = this_ds.patches1d_itype_veg.values[bad_patches_thisT]
-
- any_bad_anyCrop = False
- for c in np.unique(varyCrops_int):
- rx_var = f"gs1_{c}"
- varyLons_thisCrop = varyLons[np.where(varyCrops_int == c)]
- varyLats_thisCrop = varyLats[np.where(varyCrops_int == c)]
- theseRxVals = np.diag(
- rx_ds[rx_var].sel(lon=varyLons_thisCrop, lat=varyLats_thisCrop).values
- )
- if len(theseRxVals) != len(varyLats_thisCrop):
- raise RuntimeError(
- f"Expected {len(varyLats_thisCrop)} rx values; got {len(theseRxVals)}"
- )
- if not np.any(theseRxVals != -1):
- continue
- any_bad_anyCrop = True
- break
- if not any_bad_anyCrop:
- continue
-
- # This bit is pretty inefficient, but I'm not going to optimize it until I actually need to use it.
- for i, p in enumerate(bad_patches_thisT):
- thisPatch = varyPatches[i]
- thisLon = varyLons[i]
- thisLat = varyLats[i]
- thisCrop = varyCrops[i]
- thisCrop_int = varyCrops_int[i]
-
- # If prescribed input had missing value (-1), it's fine for it to vary.
- if rx_ds:
- rx_var = f"gs1_{thisCrop_int}"
- if thisLon in rx_ds.lon.values and thisLat in rx_ds.lat.values:
- rx = rx_ds[rx_var].sel(lon=thisLon, lat=thisLat).values
- Nunique = len(np.unique(rx))
- if Nunique == 1:
- found_in_rx[i] = True
- if rx == -1:
- continue
- elif Nunique > 1:
- raise RuntimeError(
- f"How does lon {thisLon} lat {thisLat} {thisCrop} have time-varying {v}?"
- )
- else:
- raise RuntimeError(
- "lon {thisLon} lat {thisLat} {thisCrop} not in rx dataset?"
- )
-
- # Print info (or save to print later)
- any_bad = True
- if verbose:
- thisStr = f" Patch {thisPatch} (lon {thisLon} lat {thisLat}) {thisCrop} ({thisCrop_int})"
- if rx_ds and not found_in_rx[i]:
- thisStr = thisStr.replace("(lon", "* (lon")
- if not np.isnan(t1_vals[p]):
- t1_val_print = int(t1_vals[p])
- else:
- t1_val_print = "NaN"
- if not np.isnan(t_vals[p]):
- t_val_print = int(t_vals[p])
- else:
- t_val_print = "NaN"
- if v == "SDATES":
- strList.append(
- f"{thisStr}: Sowing {t1_yr} jday {t1_val_print}, {t_yr} jday {t_val_print}"
- )
- else:
- strList.append(
- f"{thisStr}: {t1_yr} {v} {t1_val_print}, {t_yr} {v} {t_val_print}"
- )
- else:
- if ok:
- print(f"{emojus} CLM output {v} unexpectedly vary over time:")
- ok = False
- print(f"{v} timestep {t} does not match timestep {t1}")
- break
- if verbose and any_bad:
- print(f"{emojus} CLM output {v} unexpectedly vary over time:")
- strList.sort()
- if rx_ds and np.any(~found_in_rx):
- strList = [
- "*: Not found in prescribed input file (maybe minor lon/lat mismatch)"
- ] + strList
- elif not rx_ds:
- strList = ["(No rx file checked)"] + strList
- print("\n".join(strList))
-
- # Make sure every patch was checked once (or is all-NaN except possibly final season)
- incl_patches = np.sort(incl_patches)
- if not np.array_equal(incl_patches, np.unique(incl_patches)):
- raise RuntimeError("Patch(es) checked more than once!")
- incl_patches = list(incl_patches)
- incl_patches += list(
- np.where(
- np.all(
- np.isnan(
- ra_sp[
- :-1,
- ]
- ),
- axis=0,
- )
- )[0]
- )
- incl_patches = np.sort(incl_patches)
- if not np.array_equal(incl_patches, np.unique(incl_patches)):
- raise RuntimeError("Patch(es) checked but also all-NaN??")
- if not np.array_equal(incl_patches, np.arange(this_ds.dims["patch"])):
- for p in np.arange(this_ds.dims["patch"]):
- if p not in incl_patches:
- break
- raise RuntimeError(
- f"Not all patches checked! E.g., {p}: {this_da.isel(patch=p).values}"
- )
-
- if not any_bad:
- if any_bad_before_checking_rx:
- print(
- f"✅ CLM output {v} do not vary through {this_ds.dims[time_coord]} growing seasons of output (except for patch(es) with missing rx)."
- )
- else:
- print(
- f"✅ CLM output {v} do not vary through {this_ds.dims[time_coord]} growing seasons of output."
- )
-
- if any_bad and throw_error:
- raise RuntimeError("Stopping due to failed check_constant_vars().")
-
- bad_patches = np.unique(bad_patches)
- return [int(p) for p in bad_patches]
-
-
-def check_rx_obeyed(
- vegtype_list, rx_ds, dates_ds, which_ds, output_var, gdd_min=None, verbose=False
-):
- all_ok = 2
- diff_str_list = []
- gdd_tolerance = 1
-
- if "GDDHARV" in output_var and verbose:
- harvest_reason_da = dates_ds["HARVEST_REASON"]
- unique_harvest_reasons = np.unique(
- harvest_reason_da.values[np.where(~np.isnan(harvest_reason_da.values))]
- )
- pct_harv_at_mature = get_pct_harv_at_mature(harvest_reason_da)
- print(
- f"{which_ds} harvest reasons: {unique_harvest_reasons} ({pct_harv_at_mature}% harv at maturity)"
- )
-
- for vegtype_str in vegtype_list:
- thisVeg_patches = np.where(dates_ds.patches1d_itype_veg_str == vegtype_str)[0]
- if thisVeg_patches.size == 0:
- continue
- ds_thisVeg = dates_ds.isel(patch=thisVeg_patches)
- patch_inds_lon_thisVeg = ds_thisVeg.patches1d_ixy.values.astype(int) - 1
- patch_inds_lat_thisVeg = ds_thisVeg.patches1d_jxy.values.astype(int) - 1
- patch_lons_thisVeg = ds_thisVeg.patches1d_lon
- patch_lats_thisVeg = ds_thisVeg.patches1d_lat
-
- vegtype_int = utils.vegtype_str2int(vegtype_str)[0]
- rx_da = rx_ds[f"gs1_{vegtype_int}"]
- rx_array = rx_da.values[patch_inds_lat_thisVeg, patch_inds_lon_thisVeg]
- rx_array = np.expand_dims(rx_array, axis=1)
- sim_array = ds_thisVeg[output_var].values
- sim_array_dims = ds_thisVeg[output_var].dims
-
- # Ignore patches without prescribed value
- with np.errstate(invalid="ignore"):
- rx_array[np.where(rx_array < 0)] = np.nan
-
- # Account for...
- if "GDDHARV" in output_var:
- # ...GDD harvest threshold minimum set in PlantCrop()
- if gdd_min == None:
- gdd_min = default_gdd_min()
- print(
- f"gdd_min not provided when doing check_rx_obeyed() for {output_var}; using default {gdd_min}"
- )
- with np.errstate(invalid="ignore"):
- rx_array[(rx_array >= 0) & (rx_array < gdd_min)] = gdd_min
-
- # ...harvest reason
- # 0: Should never happen in any simulation
- # 1: Harvesting at maturity
- # 2: Harvesting at max season length (mxmat)
- # 3: Crop was incorrectly planted in last time step of Dec. 31
- # 4: Today was supposed to be the planting day, but the previous crop still hasn't been harvested.
- # 5: Harvest the day before the next sowing date this year.
- # 6: Same as #5.
- # 7: Harvest the day before the next sowing date (today is Dec. 31 and the sowing date is Jan. 1)
- harvest_reason_da = ds_thisVeg["HARVEST_REASON"]
- unique_harvest_reasons = np.unique(
- harvest_reason_da.values[np.where(~np.isnan(harvest_reason_da.values))]
- )
- pct_harv_at_mature = get_pct_harv_at_mature(harvest_reason_da)
-
- if np.any(sim_array != rx_array):
- diff_array = sim_array - rx_array
-
- # Allow negative GDDHARV values when harvest occurred because sowing was scheduled for the next day
- if output_var == "GDDHARV_PERHARV":
- diff_array = np.ma.masked_array(
- diff_array,
- mask=(diff_array < 0) & (ds_thisVeg["HARVEST_REASON_PERHARV"].values == 5),
- )
- elif output_var == "GDDHARV":
- with np.errstate(invalid="ignore"):
- diff_lt_0 = diff_array < 0
- harv_reason_5 = ds_thisVeg["HARVEST_REASON"].values == 5
- diff_array = np.ma.masked_array(diff_array, mask=diff_lt_0 & harv_reason_5)
-
- with np.errstate(invalid="ignore"):
- abs_gt_0 = abs(diff_array) > 0
- if np.any(np.abs(diff_array[abs_gt_0]) > 0):
- min_diff, minLon, minLat, minGS, minRx = get_extreme_info(
- diff_array,
- rx_array,
- np.nanmin,
- sim_array_dims,
- dates_ds.gs,
- patch_lons_thisVeg,
- patch_lats_thisVeg,
- )
- max_diff, maxLon, maxLat, maxGS, maxRx = get_extreme_info(
- diff_array,
- rx_array,
- np.nanmax,
- sim_array_dims,
- dates_ds.gs,
- patch_lons_thisVeg,
- patch_lats_thisVeg,
- )
-
- diffs_eg_txt = f"{vegtype_str} ({vegtype_int}): diffs range {min_diff} (lon {minLon}, lat {minLat}, gs {minGS}, rx ~{minRx}) to {max_diff} (lon {maxLon}, lat {maxLat}, gs {maxGS}, rx ~{maxRx})"
- if "GDDHARV" in output_var:
- diffs_eg_txt += f"; harvest reasons: {unique_harvest_reasons} ({pct_harv_at_mature}% harvested at maturity)"
- if "GDDHARV" in output_var and np.nanmax(abs(diff_array)) <= gdd_tolerance:
- if all_ok > 0:
- all_ok = 1
- diff_str_list.append(f" {diffs_eg_txt}")
- else:
- all_ok = 0
- if verbose:
- print(
- f"❌ {which_ds}: Prescribed {output_var} *not* always obeyed. E.g., {diffs_eg_txt}"
- )
- else:
- break
+ this_ds["IRRIGATED_AREA_GRID"].attrs = {
+ "long name": "Irrigated area in gridcell",
+ "units": "m^2",
+ }
- if all_ok == 2:
- print(f"✅ {which_ds}: Prescribed {output_var} always obeyed")
- elif all_ok == 1:
- # print(f"🟨 {which_ds}: Prescribed {output_var} *not* always obeyed, but acceptable:")
- # for x in diff_str_list: print(x)
- print(
- f"🟨 {which_ds}: Prescribed {output_var} *not* always obeyed, but acceptable (diffs <= {gdd_tolerance})"
- )
- elif not verbose:
- print(f"❌ {which_ds}: Prescribed {output_var} *not* always obeyed. E.g., {diffs_eg_txt}")
+ return this_ds
-# Make sure that, e.g., GDDACCUM_PERHARV is always <= HUI_PERHARV
-def check_v0_le_v1(this_ds, vars, msg_txt=" ", both_nan_ok=False, throw_error=False):
- v0 = vars[0]
- v1 = vars[1]
- gdd_lt_hui = this_ds[v0] <= this_ds[v1]
+def check_v0_le_v1(this_ds, var_list, msg_txt=" ", both_nan_ok=False, throw_error=False):
+ """
+ Make sure that, e.g., GDDACCUM_PERHARV is always <= HUI_PERHARV
+ """
+ var0 = var_list[0]
+ var1 = var_list[1]
+ gdd_lt_hui = this_ds[var0] <= this_ds[var1]
if both_nan_ok:
- gdd_lt_hui = gdd_lt_hui | (np.isnan(this_ds[v0]) & np.isnan(this_ds[v1]))
+ gdd_lt_hui = gdd_lt_hui | (np.isnan(this_ds[var0]) & np.isnan(this_ds[var1]))
if np.all(gdd_lt_hui):
- print(f"✅{msg_txt}{v0} always <= {v1}")
+ print(f"✅{msg_txt}{var0} always <= {var1}")
else:
- msg = f"❌{msg_txt}{v0} *not* always <= {v1}"
+ msg = f"❌{msg_txt}{var0} *not* always <= {var1}"
gdd_lt_hui_vals = gdd_lt_hui.values
- p = np.where(~gdd_lt_hui_vals)[0][0]
+ patch_index = np.where(~gdd_lt_hui_vals)[0][0]
msg = (
msg
- + f"\ne.g., patch {p}: {this_ds.patches1d_itype_veg_str.values[p]}, lon {this_ds.patches1d_lon.values[p]} lat {this_ds.patches1d_lat.values[p]}:"
+ + f"\ne.g., patch {patch_index}: {this_ds.patches1d_itype_veg_str.values[patch_index]},"
+ + f" lon {this_ds.patches1d_lon.values[patch_index]} lat "
+ + f"{this_ds.patches1d_lat.values[patch_index]}:"
)
- msg = msg + f"\n{this_ds[v0].values[p,:]}"
- msg = msg + f"\n{this_ds[v1].values[p,:]}"
+ msg = msg + f"\n{this_ds[var0].values[patch_index,:]}"
+ msg = msg + f"\n{this_ds[var1].values[patch_index,:]}"
if throw_error:
print(msg)
else:
raise RuntimeError(msg)
-# Convert time*mxharvests axes to growingseason axis
-def convert_axis_time2gs(this_ds, verbose=False, myVars=None, incl_orig=False):
- # How many non-NaN patch-seasons do we expect to have once we're done organizing things?
- Npatch = this_ds.dims["patch"]
- # Because some patches will be planted in the last year but not complete, we have to ignore any finalyear-planted seasons that do complete.
- Ngs = this_ds.dims["time"] - 1
- expected_valid = Npatch * Ngs
-
- mxharvests = this_ds.dims["mxharvests"]
-
- if verbose:
- print(
- f"Start: discrepancy of {np.sum(~np.isnan(this_ds.HDATES.values)) - expected_valid} patch-seasons"
- )
-
- # Set all non-positive date values to NaN. These are seasons that were never harvested (or never started): "non-seasons."
- if this_ds.HDATES.dims != ("time", "mxharvests", "patch"):
- raise RuntimeError(
- f"This code relies on HDATES dims ('time', 'mxharvests', 'patch'), not {this_ds.HDATES.dims}"
- )
- hdates_ymp = this_ds.HDATES.copy().where(this_ds.HDATES > 0).values
- hdates_pym = np.transpose(hdates_ymp.copy(), (2, 0, 1))
- sdates_ymp = this_ds.SDATES_PERHARV.copy().where(this_ds.SDATES_PERHARV > 0).values
- sdates_pym = np.transpose(sdates_ymp.copy(), (2, 0, 1))
- with np.errstate(invalid="ignore"):
- hdates_pym[hdates_pym <= 0] = np.nan
-
- # Find years where patch was inactive
- inactive_py = np.transpose(
- np.isnan(this_ds.HDATES).all(dim="mxharvests").values
- & np.isnan(this_ds.SDATES_PERHARV).all(dim="mxharvests").values
- )
- # Find seasons that were planted while the patch was inactive
- with np.errstate(invalid="ignore"):
- sown_inactive_py = inactive_py[:, :-1] & (hdates_pym[:, 1:, 0] < sdates_pym[:, 1:, 0])
- sown_inactive_py = np.concatenate((np.full((Npatch, 1), False), sown_inactive_py), axis=1)
-
- # "Ignore harvests from seasons sown (a) before this output began or (b) when the crop was inactive"
- with np.errstate(invalid="ignore"):
- first_season_before_first_year_p = hdates_pym[:, 0, 0] < sdates_pym[:, 0, 0]
- first_season_before_first_year_py = np.full(hdates_pym.shape[:-1], fill_value=False)
- first_season_before_first_year_py[:, 0] = first_season_before_first_year_p
- sown_prerun_or_inactive_py = first_season_before_first_year_py | sown_inactive_py
- sown_prerun_or_inactive_pym = np.concatenate(
- (
- np.expand_dims(sown_prerun_or_inactive_py, axis=2),
- np.full((Npatch, Ngs + 1, mxharvests - 1), False),
- ),
- axis=2,
- )
- where_sown_prerun_or_inactive_pym = np.where(sown_prerun_or_inactive_pym)
- hdates_pym[where_sown_prerun_or_inactive_pym] = np.nan
- sdates_pym[where_sown_prerun_or_inactive_pym] = np.nan
- if verbose:
- print(
- f'After "Ignore harvests from before this output began: discrepancy of {np.sum(~np.isnan(hdates_pym)) - expected_valid} patch-seasons'
- )
-
- # We need to keep some non-seasons---it's possible that "the yearY growing season" never happened (sowing conditions weren't met), but we still need something there so that we can make an array of dimension Npatch*Ngs. We do this by changing those non-seasons from NaN to -Inf before doing the filtering and reshaping, after which we'll convert them back to NaNs.
-
- # "In years with no sowing, pretend the first no-harvest is meaningful, unless that was intentionally ignored above."
- sdates_orig_ymp = this_ds.SDATES.copy().values
- sdates_orig_pym = np.transpose(sdates_orig_ymp.copy(), (2, 0, 1))
- hdates_pym2 = hdates_pym.copy()
- sdates_pym2 = sdates_pym.copy()
- with np.errstate(invalid="ignore"):
- sdates_gt_0 = sdates_orig_pym > 0
- nosow_py = np.all(~sdates_gt_0, axis=2)
- nosow_py_1st = nosow_py & np.isnan(hdates_pym[:, :, 0])
- where_nosow_py_1st = np.where(nosow_py_1st)
- hdates_pym2[where_nosow_py_1st[0], where_nosow_py_1st[1], 0] = -np.inf
- sdates_pym2[where_nosow_py_1st[0], where_nosow_py_1st[1], 0] = -np.inf
- for h in np.arange(mxharvests - 1):
- if h == 0:
- continue
- elif h == 1:
- print("Warning: Untested with mxharvests > 2")
- where_nosow_py = np.where(
- nosow_py
- & ~np.any(np.isnan(hdates_pym[:, :, 0:h]), axis=2)
- & np.isnan(hdates_pym[:, :, h])
- )
- hdates_pym2[where_nosow_py[0], where_nosow_py[1], h + 1] = -np.inf
- sdates_pym2[where_nosow_py[0], where_nosow_py[1], h + 1] = -np.inf
-
- # "In years with sowing that are followed by inactive years, check whether the last sowing was harvested before the patch was deactivated. If not, pretend the LAST [easier to implement!] no-harvest is meaningful."
- sdates_orig_masked_pym = sdates_orig_pym.copy()
- with np.errstate(invalid="ignore"):
- sdates_le_0 = sdates_orig_masked_pym <= 0
- sdates_orig_masked_pym[np.where(sdates_le_0)] = np.nan
- with warnings.catch_warnings():
- warnings.filterwarnings(action="ignore", message="All-NaN slice encountered")
- last_sdate_firstNgs_py = np.nanmax(sdates_orig_masked_pym[:, :-1, :], axis=2)
- last_hdate_firstNgs_py = np.nanmax(hdates_pym2[:, :-1, :], axis=2)
- with np.errstate(invalid="ignore"):
- hdate_lt_sdate = last_hdate_firstNgs_py < last_sdate_firstNgs_py
- last_sowing_not_harvested_sameyear_firstNgs_py = hdate_lt_sdate | np.isnan(
- last_hdate_firstNgs_py
- )
- inactive_lastNgs_py = inactive_py[:, 1:]
- last_sowing_never_harvested_firstNgs_py = (
- last_sowing_not_harvested_sameyear_firstNgs_py & inactive_lastNgs_py
- )
- last_sowing_never_harvested_py = np.concatenate(
- (last_sowing_never_harvested_firstNgs_py, np.full((Npatch, 1), False)), axis=1
- )
- last_sowing_never_harvested_pym = np.concatenate(
- (
- np.full((Npatch, Ngs + 1, mxharvests - 1), False),
- np.expand_dims(last_sowing_never_harvested_py, axis=2),
- ),
- axis=2,
- )
- where_last_sowing_never_harvested_pym = last_sowing_never_harvested_pym
- hdates_pym3 = hdates_pym2.copy()
- sdates_pym3 = sdates_pym2.copy()
- hdates_pym3[where_last_sowing_never_harvested_pym] = -np.inf
- sdates_pym3[where_last_sowing_never_harvested_pym] = -np.inf
-
- # Convert to growingseason axis
- def pym_to_pg(pym, quiet=False):
- pg = np.reshape(pym, (pym.shape[0], -1))
- ok_pg = pg[~np.isnan(pg)]
- if not quiet:
- print(
- f"{ok_pg.size} included; unique N seasons = {np.unique(np.sum(~np.isnan(pg), axis=1))}"
- )
- return pg
-
- hdates_pg = pym_to_pg(hdates_pym3.copy(), quiet=~verbose)
- sdates_pg = pym_to_pg(sdates_pym3.copy(), quiet=True)
- if verbose:
- print(
- f'After "In years with no sowing, pretend the first no-harvest is meaningful: discrepancy of {np.sum(~np.isnan(hdates_pg)) - expected_valid} patch-seasons'
- )
-
- # "Ignore any harvests that were planted in the final year, because some cells will have incomplete growing seasons for the final year."
- with np.errstate(invalid="ignore"):
- hdates_ge_sdates = hdates_pg[:, -mxharvests:] >= sdates_pg[:, -mxharvests:]
- lastyear_complete_season = hdates_ge_sdates | np.isinf(hdates_pg[:, -mxharvests:])
-
- def ignore_lastyear_complete_season(pg, excl, mxharvests):
- tmp_L = pg[:, :-mxharvests]
- tmp_R = pg[:, -mxharvests:]
- tmp_R[np.where(excl)] = np.nan
- pg = np.concatenate((tmp_L, tmp_R), axis=1)
- return pg
-
- hdates_pg2 = ignore_lastyear_complete_season(
- hdates_pg.copy(), lastyear_complete_season, mxharvests
- )
- sdates_pg2 = ignore_lastyear_complete_season(
- sdates_pg.copy(), lastyear_complete_season, mxharvests
- )
- is_valid = ~np.isnan(hdates_pg2)
- is_fake = np.isneginf(hdates_pg2)
- is_fake = np.reshape(is_fake[is_valid], (this_ds.dims["patch"], Ngs))
- discrepancy = np.sum(is_valid) - expected_valid
- unique_Nseasons = np.unique(np.sum(is_valid, axis=1))
- if verbose:
- print(
- f'After "Ignore any harvests that were planted in the final year, because other cells will have incomplete growing seasons for the final year": discrepancy of {discrepancy} patch-seasons'
- )
- if "pandas" in sys.modules:
- bc = np.bincount(np.sum(is_valid, axis=1))
- bc = bc[bc > 0]
- df = pd.DataFrame({"Ngs": unique_Nseasons, "Count": bc})
- print(df)
- else:
- print(f"unique N seasons = {unique_Nseasons}")
- print(" ")
-
- # Create Dataset with time axis as "gs" (growing season) instead of what CLM puts out
- if discrepancy == 0:
- this_ds_gs = set_up_ds_with_gs_axis(this_ds)
- for v in this_ds.data_vars:
- if this_ds[v].dims != ("time", "mxharvests", "patch") or (myVars and v not in myVars):
- continue
-
- # Set invalid values to NaN
- da_yhp = this_ds[v].copy()
- da_yhp = da_yhp.where(~np.isneginf(da_yhp))
-
- # Remove the nans and reshape to patches*growingseasons
- da_pyh = da_yhp.transpose("patch", "time", "mxharvests")
- ar_pg = np.reshape(da_pyh.values, (this_ds.dims["patch"], -1))
- ar_valid_pg = np.reshape(ar_pg[is_valid], (this_ds.dims["patch"], Ngs))
- # Change -infs to nans
- ar_valid_pg[is_fake] = np.nan
- # Save as DataArray to new Dataset, stripping _PERHARV from variable name
- newname = v.replace("_PERHARV", "")
- if newname in this_ds_gs:
- raise RuntimeError(f"{newname} already in dataset!")
- da_pg = xr.DataArray(
- data=ar_valid_pg,
- coords=[this_ds_gs.coords["patch"], this_ds_gs.coords["gs"]],
- name=newname,
- attrs=da_yhp.attrs,
- )
- this_ds_gs[newname] = da_pg
- this_ds_gs[newname].attrs["units"] = this_ds[v].attrs["units"]
- else:
- # Print details about example bad patch(es)
- if min(unique_Nseasons) < Ngs:
- print(f"Too few seasons (min {min(unique_Nseasons)} < {Ngs})")
- p = np.where(np.sum(~np.isnan(hdates_pg2), axis=1) == min(unique_Nseasons))[0][0]
- print_onepatch_wrongNgs(
- p,
- this_ds,
- sdates_ymp,
- hdates_ymp,
- sdates_pym,
- hdates_pym,
- sdates_pym2,
- hdates_pym2,
- sdates_pym3,
- hdates_pym3,
- sdates_pg,
- hdates_pg,
- sdates_pg2,
- hdates_pg2,
- )
- if max(unique_Nseasons) > Ngs:
- print(f"Too many seasons (max {max(unique_Nseasons)} > {Ngs})")
- p = np.where(np.sum(~np.isnan(hdates_pg2), axis=1) == max(unique_Nseasons))[0][0]
- print_onepatch_wrongNgs(
- p,
- this_ds,
- sdates_ymp,
- hdates_ymp,
- sdates_pym,
- hdates_pym,
- sdates_pym2,
- hdates_pym2,
- sdates_pym3,
- hdates_pym3,
- sdates_pg,
- hdates_pg,
- sdates_pg2,
- hdates_pg2,
- )
- raise RuntimeError(
- f"Can't convert time*mxharvests axes to growingseason axis: discrepancy of {discrepancy} patch-seasons"
- )
-
- # Preserve units
- for v1 in this_ds_gs:
- v0 = v1
- if v0 not in this_ds:
- v0 += "_PERHARV"
- if v0 not in this_ds:
- continue
- if "units" in this_ds[v0].attrs:
- this_ds_gs[v1].attrs["units"] = this_ds[v0].attrs["units"]
-
- if incl_orig:
- return this_ds_gs, this_ds
- else:
- return this_ds_gs
-
-
-# Minimum harvest threshold allowed in PlantCrop()
-# Was 50 before cropcal runs 2023-01-28
-def default_gdd_min():
- return 1.0
-
-
-# Get information about extreme gridcells (for debugging)
-def get_extreme_info(diff_array, rx_array, mxn, dims, gs, patches1d_lon, patches1d_lat):
- if mxn == np.min:
- diff_array = np.ma.masked_array(diff_array, mask=(np.abs(diff_array) == 0))
- themxn = mxn(diff_array)
-
- # Find the first patch-gs that has the mxn value
- matching_indices = np.where(diff_array == themxn)
- first_indices = [x[0] for x in matching_indices]
-
- # Get the lon, lat, and growing season of that patch-gs
- p = first_indices[dims.index("patch")]
- thisLon = patches1d_lon.values[p]
- thisLat = patches1d_lat.values[p]
- s = first_indices[dims.index("gs")]
- thisGS = gs.values[s]
-
- # Get the prescribed value for this patch-gs
- thisRx = rx_array[p][0]
-
- return round(themxn, 3), round(thisLon, 3), round(thisLat, 3), thisGS, round(thisRx)
-
-
-# Get growing season lengths from a DataArray of hdate-sdate
def get_gs_len_da(this_da):
+ """
+ Get growing season lengths from a DataArray of hdate-sdate
+ """
tmp = this_da.values
with np.errstate(invalid="ignore"):
tmp_lt_0 = tmp < 0
@@ -844,21 +200,10 @@ def get_gs_len_da(this_da):
return this_da
-def get_pct_harv_at_mature(harvest_reason_da):
- Nharv_at_mature = len(np.where(harvest_reason_da.values == 1)[0])
- with np.errstate(invalid="ignore"):
- harv_reason_gt_0 = harvest_reason_da.values > 0
- Nharv = len(np.where(harv_reason_gt_0)[0])
- if Nharv == 0:
- return np.nan
- pct_harv_at_mature = Nharv_at_mature / Nharv * 100
- pct_harv_at_mature = np.format_float_positional(
- pct_harv_at_mature, precision=2, unique=False, fractional=False, trim="k"
- ) # Round to 2 significant digits
- return pct_harv_at_mature
-
-
def import_max_gs_length(paramfile_dir, my_clm_ver, my_clm_subver):
+ """
+ Import maximum growing season length
+ """
# Get parameter file
pattern = os.path.join(paramfile_dir, f"*{my_clm_ver}_params.{my_clm_subver}.nc")
paramfile = glob.glob(pattern)
@@ -886,8 +231,12 @@ def import_max_gs_length(paramfile_dir, my_clm_ver, my_clm_subver):
return mxmat_dict
-# E.g. import_rx_dates("sdate", sdates_rx_file, dates_ds0_orig)
-def import_rx_dates(var_prefix, date_inFile, dates_ds, set_neg1_to_nan=True):
+def import_rx_dates(var_prefix, date_infile, dates_ds, set_neg1_to_nan=True):
+ """
+ Import prescribed sowing/harvest dates
+
+ E.g. import_rx_dates("sdate", sdates_rx_file, dates_ds0_orig)
+ """
# Get run info:
# Max number of growing seasons per year
if "mxsowings" in dates_ds:
@@ -896,53 +245,112 @@ def import_rx_dates(var_prefix, date_inFile, dates_ds, set_neg1_to_nan=True):
mxsowings = 1
# Which vegetation types were simulated?
- itype_veg_toImport = np.unique(dates_ds.patches1d_itype_veg)
+ itype_veg_to_import = np.unique(dates_ds.patches1d_itype_veg)
- date_varList = []
- for i in itype_veg_toImport:
- for g in np.arange(mxsowings):
- thisVar = f"{var_prefix}{g+1}_{i}"
- date_varList = date_varList + [thisVar]
+ date_varlist = []
+ for i in itype_veg_to_import:
+ for j in np.arange(mxsowings):
+ this_var = f"{var_prefix}{j+1}_{i}"
+ date_varlist = date_varlist + [this_var]
- ds = utils.import_ds(date_inFile, myVars=date_varList)
+ this_ds = import_ds(date_infile, my_vars=date_varlist)
did_warn = False
- for v in ds:
- v_new = v.replace(var_prefix, "gs")
- ds = ds.rename({v: v_new})
+ for var in this_ds:
+ v_new = var.replace(var_prefix, "gs")
+ this_ds = this_ds.rename({var: v_new})
# Set -1 prescribed GDD values to NaN. Only warn the first time.
- if set_neg1_to_nan and var_prefix == "gdd" and v_new != v and np.any(ds[v_new].values < 0):
- if np.any((ds[v_new].values < 0) & (ds[v_new].values != -1)):
- raise RuntimeError(f"Unexpected negative value in {v}")
+ if (
+ set_neg1_to_nan
+ and var_prefix == "gdd"
+ and v_new != var
+ and np.any(this_ds[v_new].values < 0)
+ ):
+ if np.any((this_ds[v_new].values < 0) & (this_ds[v_new].values != -1)):
+ raise RuntimeError(f"Unexpected negative value in {var}")
if not did_warn:
- print(f"Setting -1 rx GDD values to NaN")
+ print("Setting -1 rx GDD values to NaN")
did_warn = True
- ds[v_new] = ds[v_new].where(ds[v_new] != -1)
+ this_ds[v_new] = this_ds[v_new].where(this_ds[v_new] != -1)
+
+ return this_ds
- return ds
+
+def check_no_negative(this_ds_in, varlist_no_negative, which_file, verbose):
+ """
+ In import_output(), check that there are no unexpected negative values.
+ """
+ tiny_neg_ok = 1e-12
+ this_ds = this_ds_in.copy()
+ for var in this_ds:
+ if not any(x in var for x in varlist_no_negative):
+ continue
+ the_min = np.nanmin(this_ds[var].values)
+ if the_min < 0:
+ if np.abs(the_min) <= tiny_neg_ok:
+ if verbose:
+ print(
+ f"Tiny negative value(s) in {var} (abs <= {tiny_neg_ok}) being set to 0"
+ + f" ({which_file})"
+ )
+ else:
+ print(
+ f"WARNING: Unexpected negative value(s) in {var}; minimum {the_min} "
+ + f"({which_file})"
+ )
+ values = this_ds[var].copy().values
+ with np.errstate(invalid="ignore"):
+ do_setto_0 = (values < 0) & (values >= -tiny_neg_ok)
+ values[np.where(do_setto_0)] = 0
+ this_ds[var] = xr.DataArray(
+ values,
+ coords=this_ds[var].coords,
+ dims=this_ds[var].dims,
+ attrs=this_ds[var].attrs,
+ )
+
+ elif verbose:
+ print(f"No negative value(s) in {var}; min {the_min} ({which_file})")
+ return this_ds
+
+
+def check_no_zeros(this_ds, varlist_no_zero, which_file, verbose):
+ """
+ In import_output(), check that there are no unexpected zeros.
+ """
+ for var in this_ds:
+ if not any(x in var for x in varlist_no_zero):
+ continue
+ if np.any(this_ds[var].values == 0):
+ print(f"WARNING: Unexpected zero(s) in {var} ({which_file})")
+ elif verbose:
+ print(f"No zero value(s) in {var} ({which_file})")
def import_output(
filename,
- myVars,
- y1=None,
- yN=None,
- myVegtypes=utils.define_mgdcrop_list(),
+ my_vars,
+ year_1=None,
+ year_n=None,
+ my_vegtypes=utils.define_mgdcrop_list(),
sdates_rx_ds=None,
gdds_rx_ds=None,
verbose=False,
):
+ """
+ Import CLM output
+ """
# Import
- this_ds = utils.import_ds(filename, myVars=myVars, myVegtypes=myVegtypes)
+ this_ds = import_ds(filename, my_vars=my_vars, my_vegtypes=my_vegtypes)
# Trim to years of interest (do not include extra year needed for finishing last growing season)
- if y1 and yN:
- this_ds = check_and_trim_years(y1, yN, this_ds)
+ if year_1 and year_n:
+ this_ds = check_and_trim_years(year_1, year_n, this_ds)
else: # Assume including all growing seasons except last complete one are "of interest"
- y1 = this_ds.time.values[0].year
- yN = this_ds.time.values[-1].year - 2
- this_ds = check_and_trim_years(y1, yN, this_ds)
+ year_1 = this_ds.time.values[0].year
+ year_n = this_ds.time.values[-1].year - 2
+ this_ds = check_and_trim_years(year_1, year_n, this_ds)
# What vegetation types are included?
vegtype_list = [
@@ -954,82 +362,24 @@ def import_output(
all_nan = np.full(this_ds[date_vars[0]].shape, True)
all_nonpos = np.full(this_ds[date_vars[0]].shape, True)
all_pos = np.full(this_ds[date_vars[0]].shape, True)
- for v in date_vars:
- all_nan = all_nan & np.isnan(this_ds[v].values)
+ for var in date_vars:
+ all_nan = all_nan & np.isnan(this_ds[var].values)
with np.errstate(invalid="ignore"):
- all_nonpos = all_nonpos & (this_ds[v].values <= 0)
- all_pos = all_pos & (this_ds[v].values > 0)
+ all_nonpos = all_nonpos & (this_ds[var].values <= 0)
+ all_pos = all_pos & (this_ds[var].values > 0)
if np.any(np.bitwise_not(all_nan | all_nonpos | all_pos)):
raise RuntimeError("Inconsistent missing/present values on mxharvests axis")
- # When doing transient runs, it's somehow possible for crops in newly-active patches to be *already alive*. They even have a sowing date (idop)! This will of course not show up in SDATES, but it does show up in SDATES_PERHARV.
- # I could put the SDATES_PERHARV dates into where they "should" be, but instead I'm just going to invalidate those "seasons."
- #
- # In all but the last calendar year, which patches had no sowing?
- no_sowing_yp = np.all(np.isnan(this_ds.SDATES.values[:-1, :, :]), axis=1)
- # In all but the first calendar year, which harvests' jdays are < their sowings' jdays? (Indicates sowing the previous calendar year.)
- with np.errstate(invalid="ignore"):
- hsdate1_gt_hdate1_yp = (
- this_ds.SDATES_PERHARV.values[1:, 0, :] > this_ds.HDATES.values[1:, 0, :]
- )
- # Where both, we have the problem.
- falsely_alive_yp = no_sowing_yp & hsdate1_gt_hdate1_yp
- if np.any(falsely_alive_yp):
- print(
- f"Warning: {np.sum(falsely_alive_yp)} patch-seasons being ignored: Seemingly sown the year before harvest, but no sowings occurred that year."
- )
- falsely_alive_yp = np.concatenate(
- (np.full((1, this_ds.dims["patch"]), False), falsely_alive_yp), axis=0
- )
- falsely_alive_y1p = np.expand_dims(falsely_alive_yp, axis=1)
- dummy_false_y1p = np.expand_dims(np.full_like(falsely_alive_yp, False), axis=1)
- falsely_alive_yhp = np.concatenate((falsely_alive_y1p, dummy_false_y1p), axis=1)
- for v in this_ds.data_vars:
- if this_ds[v].dims != ("time", "mxharvests", "patch"):
- continue
- this_ds[v] = this_ds[v].where(~falsely_alive_yhp)
-
- def check_no_negative(this_ds_in, varList_no_negative, which_file, verbose=False):
- tiny_negOK = 1e-12
- this_ds = this_ds_in.copy()
- for v in this_ds:
- if not any(x in v for x in varList_no_negative):
- continue
- the_min = np.nanmin(this_ds[v].values)
- if the_min < 0:
- if np.abs(the_min) <= tiny_negOK:
- if verbose:
- print(
- f"Tiny negative value(s) in {v} (abs <= {tiny_negOK}) being set to 0 ({which_file})"
- )
- else:
- print(
- f"WARNING: Unexpected negative value(s) in {v}; minimum {the_min} ({which_file})"
- )
- values = this_ds[v].copy().values
- with np.errstate(invalid="ignore"):
- do_setto_0 = (values < 0) & (values >= -tiny_negOK)
- values[np.where(do_setto_0)] = 0
- this_ds[v] = xr.DataArray(
- values, coords=this_ds[v].coords, dims=this_ds[v].dims, attrs=this_ds[v].attrs
- )
-
- elif verbose:
- print(f"No negative value(s) in {v}; min {the_min} ({which_file})")
- return this_ds
-
- def check_no_zeros(this_ds, varList_no_zero, which_file):
- for v in this_ds:
- if not any(x in v for x in varList_no_zero):
- continue
- if np.any(this_ds[v].values == 0):
- print(f"WARNING: Unexpected zero(s) in {v} ({which_file})")
- elif verbose:
- print(f"No zero value(s) in {v} ({which_file})")
+ # When doing transient runs, it's somehow possible for crops in newly-active patches to be
+ # *already alive*. They even have a sowing date (idop)! This will of course not show up in
+ # SDATES, but it does show up in SDATES_PERHARV.
+ # I could put the SDATES_PERHARV dates into where they "should" be, but instead I'm just going
+ # to invalidate those "seasons."
+ this_ds = handle_zombie_crops(this_ds)
# Check for no zero values where there shouldn't be
- varList_no_zero = ["DATE", "YEAR"]
- check_no_zeros(this_ds, varList_no_zero, "original file")
+ varlist_no_zero = ["DATE", "YEAR"]
+ check_no_zeros(this_ds, varlist_no_zero, "original file", verbose)
# Convert time*mxharvests axes to growingseason axis
this_ds_gs = convert_axis_time2gs(this_ds, verbose=verbose, incl_orig=False)
@@ -1046,21 +396,21 @@ def check_no_zeros(this_ds, varList_no_zero, which_file):
# Get HUI accumulation as fraction of required
this_ds_gs["HUIFRAC"] = this_ds_gs["HUI"] / this_ds_gs["GDDHARV"]
this_ds_gs["HUIFRAC_PERHARV"] = this_ds["HUI_PERHARV"] / this_ds["GDDHARV_PERHARV"]
- for v in ["HUIFRAC", "HUIFRAC_PERHARV"]:
- this_ds_gs[v].attrs["units"] = "Fraction of required"
+ for var in ["HUIFRAC", "HUIFRAC_PERHARV"]:
+ this_ds_gs[var].attrs["units"] = "Fraction of required"
# Avoid tiny negative values
- varList_no_negative = ["GRAIN", "REASON", "GDD", "HUI", "YEAR", "DATE", "GSLEN"]
- this_ds_gs = check_no_negative(this_ds_gs, varList_no_negative, "new file", verbose=verbose)
+ varlist_no_negative = ["GRAIN", "REASON", "GDD", "HUI", "YEAR", "DATE", "GSLEN"]
+ this_ds_gs = check_no_negative(this_ds_gs, varlist_no_negative, "new file", verbose)
# Check for no zero values where there shouldn't be
- varList_no_zero = ["REASON", "DATE"]
- check_no_zeros(this_ds_gs, varList_no_zero, "new file")
+ varlist_no_zero = ["REASON", "DATE"]
+ check_no_zeros(this_ds_gs, varlist_no_zero, "new file", verbose)
# Check that e.g., GDDACCUM <= HUI
- for vars in [["GDDACCUM", "HUI"], ["SYEARS", "HYEARS"]]:
- if all(v in this_ds_gs for v in vars):
- check_v0_le_v1(this_ds_gs, vars, both_nan_ok=True, throw_error=True)
+ for var_list in [["GDDACCUM", "HUI"], ["SYEARS", "HYEARS"]]:
+ if all(v in this_ds_gs for v in var_list):
+ check_v0_le_v1(this_ds_gs, var_list, both_nan_ok=True, throw_error=True)
# Check that prescribed calendars were obeyed
if sdates_rx_ds:
@@ -1071,9 +421,8 @@ def check_no_zeros(this_ds, varList_no_zero, which_file):
gdds_rx_ds,
this_ds,
"this_ds",
- "SDATES",
"GDDHARV",
- gdd_min=default_gdd_min(),
+ gdd_min=DEFAULT_GDD_MIN,
)
# Convert time axis to integer year, saving original as 'cftime'
@@ -1092,175 +441,37 @@ def check_no_zeros(this_ds, varList_no_zero, which_file):
return this_ds_gs
-# Print information about a patch (for debugging)
-def print_onepatch_wrongNgs(
- p,
- this_ds_orig,
- sdates_ymp,
- hdates_ymp,
- sdates_pym,
- hdates_pym,
- sdates_pym2,
- hdates_pym2,
- sdates_pym3,
- hdates_pym3,
- sdates_pg,
- hdates_pg,
- sdates_pg2,
- hdates_pg2,
-):
- try:
- import pandas as pd
- except:
- print("Couldn't import pandas, so not displaying example bad patch ORIGINAL.")
-
- print(
- f"patch {p}: {this_ds_orig.patches1d_itype_veg_str.values[p]}, lon"
- f" {this_ds_orig.patches1d_lon.values[p]} lat {this_ds_orig.patches1d_lat.values[p]}"
- )
-
- print("Original SDATES (per sowing):")
- print(this_ds_orig.SDATES.values[:, :, p])
-
- print("Original HDATES (per harvest):")
- print(this_ds_orig.HDATES.values[:, :, p])
-
- if "pandas" in sys.modules:
-
- def print_pandas_ymp(msg, cols, arrs_tuple):
- print(f"{msg} ({np.sum(~np.isnan(arrs_tuple[0]))})")
- mxharvests = arrs_tuple[0].shape[1]
- arrs_list2 = []
- cols2 = []
- for h in np.arange(mxharvests):
- for i, a in enumerate(arrs_tuple):
- arrs_list2.append(a[:, h])
- cols2.append(cols[i] + str(h))
- arrs_tuple2 = tuple(arrs_list2)
- df = pd.DataFrame(np.stack(arrs_tuple2, axis=1))
- df.columns = cols2
- print(df)
-
- print_pandas_ymp(
- "Original",
- ["sdate", "hdate"],
- (this_ds_orig.SDATES_PERHARV.values[:, :, p], this_ds_orig.HDATES.values[:, :, p]),
- )
-
- print_pandas_ymp("Masked", ["sdate", "hdate"], (sdates_ymp[:, :, p], hdates_ymp[:, :, p]))
-
- print_pandas_ymp(
- 'After "Ignore harvests from before this output began"',
- ["sdate", "hdate"],
- (
- np.transpose(sdates_pym, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym, (1, 2, 0))[:, :, p],
- ),
- )
-
- print_pandas_ymp(
- 'After "In years with no sowing, pretend the first no-harvest is meaningful"',
- ["sdate", "hdate"],
- (
- np.transpose(sdates_pym2, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym2, (1, 2, 0))[:, :, p],
- ),
- )
-
- print_pandas_ymp(
- (
- 'After "In years with sowing that are followed by inactive years, check whether the'
- " last sowing was harvested before the patch was deactivated. If not, pretend the"
- ' LAST no-harvest is meaningful."'
- ),
- ["sdate", "hdate"],
- (
- np.transpose(sdates_pym3, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym3, (1, 2, 0))[:, :, p],
- ),
- )
-
- def print_pandas_pg(msg, cols, arrs_tuple):
- print(f"{msg} ({np.sum(~np.isnan(arrs_tuple[0]))})")
- arrs_list = list(arrs_tuple)
- for i, a in enumerate(arrs_tuple):
- arrs_list[i] = np.reshape(a, (-1))
- arrs_tuple2 = tuple(arrs_list)
- df = pd.DataFrame(np.stack(arrs_tuple2, axis=1))
- df.columns = cols
- print(df)
-
- print_pandas_pg(
- "Same, but converted to gs axis", ["sdate", "hdate"], (sdates_pg[p, :], hdates_pg[p, :])
- )
-
- print_pandas_pg(
- (
- 'After "Ignore any harvests that were planted in the final year, because some cells'
- ' will have incomplete growing seasons for the final year"'
- ),
- ["sdate", "hdate"],
- (sdates_pg2[p, :], hdates_pg2[p, :]),
- )
- else:
-
- def print_nopandas(a1, a2, msg):
- print(msg)
- if a1.ndim == 1:
- # I don't know why these aren't side-by-side!
- print(np.stack((a1, a2), axis=1))
- else:
- print(np.concatenate((a1, a2), axis=1))
-
- print_nopandas(sdates_ymp[:, :, p], hdates_ymp[:, :, p], "Masked:")
-
- print_nopandas(
- np.transpose(sdates_pym, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym, (1, 2, 0))[:, :, p],
- 'After "Ignore harvests from before this output began"',
- )
-
- print_nopandas(
- np.transpose(sdates_pym2, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym2, (1, 2, 0))[:, :, p],
- 'After "In years with no sowing, pretend the first no-harvest is meaningful"',
+def handle_zombie_crops(this_ds):
+ """
+ When doing transient runs, it's somehow possible for crops in newly-active patches to be
+ *already alive*. They even have a sowing date (idop)! This will of course not show up in
+ SDATES, but it does show up in SDATES_PERHARV.
+ I could put the SDATES_PERHARV dates into where they "should" be, but instead I'm just going
+ to invalidate those "seasons."
+ """
+ # In all but the last calendar year, which patches had no sowing?
+ no_sowing_yp = np.all(np.isnan(this_ds.SDATES.values[:-1, :, :]), axis=1)
+ # In all but the first calendar year, which harvests' jdays are < their sowings' jdays?
+ # (Indicates sowing the previous calendar year.)
+ with np.errstate(invalid="ignore"):
+ hsdate1_gt_hdate1_yp = (
+ this_ds.SDATES_PERHARV.values[1:, 0, :] > this_ds.HDATES.values[1:, 0, :]
)
-
- print_nopandas(
- np.transpose(sdates_pym3, (1, 2, 0))[:, :, p],
- np.transpose(hdates_pym3, (1, 2, 0))[:, :, p],
- (
- 'After "In years with sowing that are followed by inactive years, check whether the'
- " last sowing was harvested before the patch was deactivated. If not, pretend the"
- ' LAST [easier to implement!] no-harvest is meaningful."'
- ),
+ # Where both, we have the problem.
+ falsely_alive_yp = no_sowing_yp & hsdate1_gt_hdate1_yp
+ if np.any(falsely_alive_yp):
+ print(
+ f"Warning: {np.sum(falsely_alive_yp)} patch-seasons being ignored: Seemingly sown the "
+ + "year before harvest, but no sowings occurred that year."
)
-
- print_nopandas(sdates_pg[p, :], hdates_pg[p, :], "Same, but converted to gs axis")
-
- print_nopandas(
- sdates_pg2[p, :],
- hdates_pg2[p, :],
- (
- 'After "Ignore any harvests that were planted in the final year, because some cells'
- ' will have incomplete growing seasons for the final year"'
- ),
+ falsely_alive_yp = np.concatenate(
+ (np.full((1, this_ds.dims["patch"]), False), falsely_alive_yp), axis=0
)
-
- print("\n\n")
-
-
-# Set up empty Dataset with time axis as "gs" (growing season) instead of what CLM puts out.
-# Includes all the same variables as the input dataset, minus any that had dimensions mxsowings or mxharvests.
-def set_up_ds_with_gs_axis(ds_in):
- # Get the data variables to include in the new dataset
- data_vars = dict()
- for v in ds_in.data_vars:
- if not any([x in ["mxsowings", "mxharvests"] for x in ds_in[v].dims]):
- data_vars[v] = ds_in[v]
- # Set up the new dataset
- gs_years = [t.year - 1 for t in ds_in.time.values[:-1]]
- coords = ds_in.coords
- coords["gs"] = gs_years
- ds_out = xr.Dataset(data_vars=data_vars, coords=coords, attrs=ds_in.attrs)
- return ds_out
+ falsely_alive_y1p = np.expand_dims(falsely_alive_yp, axis=1)
+ dummy_false_y1p = np.expand_dims(np.full_like(falsely_alive_yp, False), axis=1)
+ falsely_alive_yhp = np.concatenate((falsely_alive_y1p, dummy_false_y1p), axis=1)
+ for var in this_ds.data_vars:
+ if this_ds[var].dims != ("time", "mxharvests", "patch"):
+ continue
+ this_ds[var] = this_ds[var].where(~falsely_alive_yhp)
+ return this_ds
diff --git a/python/ctsm/crop_calendars/cropcal_utils.py b/python/ctsm/crop_calendars/cropcal_utils.py
index ba6c0b6e41..00ed2413d2 100644
--- a/python/ctsm/crop_calendars/cropcal_utils.py
+++ b/python/ctsm/crop_calendars/cropcal_utils.py
@@ -1,57 +1,15 @@
-"""utility functions"""
-"""copied from klindsay, https://github.com/klindsay28/CESM2_coup_carb_cycle_JAMES/blob/master/utils.py"""
-
-import re
-import warnings
-import importlib
-
-with warnings.catch_warnings():
- warnings.filterwarnings(action="ignore", category=DeprecationWarning)
- if importlib.find_loader("cf_units") is not None:
- import cf_units as cf
- if importlib.find_loader("cartopy") is not None:
- from cartopy.util import add_cyclic_point
-import cftime
+"""
+utility functions
+copied from klindsay, https://github.com/klindsay28/CESM2_coup_carb_cycle_JAMES/blob/master/utils.py
+"""
import numpy as np
import xarray as xr
-# from xr_ds_ex import xr_ds_ex
-
-
-# generate annual means, weighted by days / month
-def weighted_annual_mean(array, time_in="time", time_out="time"):
- if isinstance(array[time_in].values[0], cftime.datetime):
- month_length = array[time_in].dt.days_in_month
-
- # After https://docs.xarray.dev/en/v0.5.1/examples/monthly-means.html
- group = f"{time_in}.year"
- weights = month_length.groupby(group) / month_length.groupby(group).sum()
- np.testing.assert_allclose(weights.groupby(group).sum().values, 1)
- array = (array * weights).groupby(group).sum(dim=time_in, skipna=True)
- if time_out != "year":
- array = array.rename({"year": time_out})
-
- else:
- mon_day = xr.DataArray(
- np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]), dims=["month"]
- )
- mon_wgt = mon_day / mon_day.sum()
- array = (
- array.rolling({time_in: 12}, center=False) # rolling
- .construct("month") # construct the array
- .isel(
- {time_in: slice(11, None, 12)}
- ) # slice so that the first element is [1..12], second is [13..24]
- .dot(mon_wgt, dims=["month"])
- )
- if time_in != time_out:
- array = array.rename({time_in: time_out})
- return array
-
-
-# List of PFTs used in CLM
def define_pftlist():
+ """
+ Return list of PFTs used in CLM
+ """
pftlist = [
"not_vegetated",
"needleleaf_evergreen_temperate_tree",
@@ -136,12 +94,14 @@ def define_pftlist():
return pftlist
-# Get CLM ivt number corresponding to a given name
def ivt_str2int(ivt_str):
+ """
+ Get CLM ivt number corresponding to a given name
+ """
pftlist = define_pftlist()
if isinstance(ivt_str, str):
ivt_int = pftlist.index(ivt_str)
- elif isinstance(ivt_str, list) or isinstance(ivt_str, np.ndarray):
+ elif isinstance(ivt_str, (list, np.ndarray)):
ivt_int = [ivt_str2int(x) for x in ivt_str]
if isinstance(ivt_str, np.ndarray):
ivt_int = np.array(ivt_int)
@@ -153,12 +113,14 @@ def ivt_str2int(ivt_str):
return ivt_int
-# Get CLM ivt name corresponding to a given number
def ivt_int2str(ivt_int):
+ """
+ Get CLM ivt name corresponding to a given number
+ """
pftlist = define_pftlist()
if np.issubdtype(type(ivt_int), np.integer) or int(ivt_int) == ivt_int:
ivt_str = pftlist[int(ivt_int)]
- elif isinstance(ivt_int, list) or isinstance(ivt_int, np.ndarray):
+ elif isinstance(ivt_int, (list, np.ndarray)):
ivt_str = [ivt_int2str(x) for x in ivt_int]
if isinstance(ivt_int, np.ndarray):
ivt_str = np.array(ivt_str)
@@ -172,23 +134,23 @@ def ivt_int2str(ivt_int):
return ivt_str
-# Does this vegetation type's name match (for a given comparison method) any member of a filtering list?
-"""
-Methods:
+def is_this_vegtype(this_vegtype, this_filter, this_method):
+ """
+ Does this vegetation type's name match (for a given comparison method) any member of a filtering
+ list?
+
+ Methods:
ok_contains: True if any member of this_filter is found in this_vegtype.
notok_contains: True of no member of this_filter is found in this_vegtype.
- ok_exact: True if this_vegtype matches any member of this_filter
+ ok_exact: True if this_vegtype matches any member of this_filter
exactly.
- notok_exact: True if this_vegtype does not match any member of
+ notok_exact: True if this_vegtype does not match any member of
this_filter exactly.
-"""
-
-
-def is_this_vegtype(this_vegtype, this_filter, this_method):
+ """
# Make sure data type of this_vegtype is acceptable
if isinstance(this_vegtype, float) and int(this_vegtype) == this_vegtype:
this_vegtype = int(this_vegtype)
- data_type_ok = lambda x: isinstance(x, str) or isinstance(x, int) or isinstance(x, np.int64)
+ data_type_ok = lambda x: isinstance(x, (int, np.int64, str))
ok_input = True
if not data_type_ok(this_vegtype):
if isinstance(this_vegtype, xr.core.dataarray.DataArray):
@@ -221,43 +183,44 @@ def is_this_vegtype(this_vegtype, this_filter, this_method):
# Perform the comparison
if this_method == "ok_contains":
return any(n in this_vegtype for n in this_filter)
- elif this_method == "notok_contains":
+ if this_method == "notok_contains":
return not any(n in this_vegtype for n in this_filter)
- elif this_method == "ok_exact":
+ if this_method == "ok_exact":
return any(n == this_vegtype for n in this_filter)
- elif this_method == "notok_exact":
+ if this_method == "notok_exact":
return not any(n == this_vegtype for n in this_filter)
- else:
- raise ValueError(f"Unknown comparison method: '{this_method}'")
-
-
-# Get boolean list of whether each vegetation type in list is a managed crop
-"""
- this_vegtypelist: The list of vegetation types whose members you want to
- test.
- this_filter: The list of strings against which you want to compare
- each member of this_vegtypelist.
- this_method: How you want to do the comparison. See is_this_vegtype().
-"""
+ raise ValueError(f"Unknown comparison method: '{this_method}'")
def is_each_vegtype(this_vegtypelist, this_filter, this_method):
+ """
+ Get boolean list of whether each vegetation type in list is a managed crop
+
+ this_vegtypelist: The list of vegetation types whose members you want to test.
+ this_filter: The list of strings against which you want to compare each member of
+ this_vegtypelist.
+ this_method: How you want to do the comparison. See is_this_vegtype().
+ """
if isinstance(this_vegtypelist, xr.DataArray):
this_vegtypelist = this_vegtypelist.values
return [is_this_vegtype(x, this_filter, this_method) for x in this_vegtypelist]
-# List (strings) of managed crops in CLM.
def define_mgdcrop_list():
+ """
+ List (strings) of managed crops in CLM.
+ """
notcrop_list = ["tree", "grass", "shrub", "unmanaged", "not_vegetated"]
defined_pftlist = define_pftlist()
is_crop = is_each_vegtype(defined_pftlist, notcrop_list, "notok_contains")
return [defined_pftlist[i] for i, x in enumerate(is_crop) if x]
-# Convert list of vegtype strings to integer index equivalents.
def vegtype_str2int(vegtype_str, vegtype_mainlist=None):
+ """
+ Convert list of vegtype strings to integer index equivalents.
+ """
convert_to_ndarray = not isinstance(vegtype_str, np.ndarray)
if convert_to_ndarray:
vegtype_str = np.array(vegtype_str)
@@ -266,222 +229,34 @@ def vegtype_str2int(vegtype_str, vegtype_mainlist=None):
vegtype_mainlist = vegtype_mainlist.vegtype_str.values
elif isinstance(vegtype_mainlist, xr.DataArray):
vegtype_mainlist = vegtype_mainlist.values
- elif vegtype_mainlist == None:
+ elif vegtype_mainlist is None:
vegtype_mainlist = define_pftlist()
if not isinstance(vegtype_mainlist, list) and isinstance(vegtype_mainlist[0], str):
if isinstance(vegtype_mainlist, list):
raise TypeError(
f"Not sure how to handle vegtype_mainlist as list of {type(vegtype_mainlist[0])}"
)
- else:
- raise TypeError(
- f"Not sure how to handle vegtype_mainlist as type {type(vegtype_mainlist[0])}"
- )
+ raise TypeError(
+ f"Not sure how to handle vegtype_mainlist as type {type(vegtype_mainlist[0])}"
+ )
if vegtype_str.shape == ():
indices = np.array([-1])
else:
indices = np.full(len(vegtype_str), -1)
- for v in np.unique(vegtype_str):
- indices[np.where(vegtype_str == v)] = vegtype_mainlist.index(v)
+ for vegtype_str_2 in np.unique(vegtype_str):
+ indices[np.where(vegtype_str == vegtype_str_2)] = vegtype_mainlist.index(vegtype_str_2)
if convert_to_ndarray:
indices = [int(x) for x in indices]
return indices
-# Flexibly subset time(s) and/or vegetation type(s) from an xarray Dataset or DataArray. Keyword arguments like dimension=selection. Selections can be individual values or slice()s. Optimize memory usage by beginning keyword argument list with the selections that will result in the largest reduction of object size. Use dimension "vegtype" to extract patches of designated vegetation type (can be string or integer).
-# Can also do dimension=function---e.g., time=np.mean will take the mean over the time dimension.
-def xr_flexsel(xr_object, patches1d_itype_veg=None, warn_about_seltype_interp=True, **kwargs):
- # Setup
- havewarned = False
- delimiter = "__"
-
- for key, selection in kwargs.items():
- if callable(selection):
- # It would have been really nice to do selection(xr_object, axis=key), but numpy methods and xarray methods disagree on "axis" vs. "dimension." So instead, just do this manually.
- if selection == np.mean:
- try:
- xr_object = xr_object.mean(dim=key)
- except:
- raise ValueError(
- f"Failed to take mean of dimension {key}. Try doing so outside of"
- " xr_flexsel()."
- )
- else:
- raise ValueError(f"xr_flexsel() doesn't recognize function {selection}")
-
- elif key == "vegtype":
- # Convert to list, if needed
- if not isinstance(selection, list):
- selection = [selection]
-
- # Convert to indices, if needed
- if isinstance(selection[0], str):
- selection = vegtype_str2int(selection)
-
- # Get list of boolean(s)
- if isinstance(selection[0], int):
- if isinstance(patches1d_itype_veg, type(None)):
- patches1d_itype_veg = xr_object.patches1d_itype_veg.values
- elif isinstance(patches1d_itype_veg, xr.core.dataarray.DataArray):
- patches1d_itype_veg = patches1d_itype_veg.values
- is_vegtype = is_each_vegtype(patches1d_itype_veg, selection, "ok_exact")
- elif isinstance(selection[0], bool):
- if len(selection) != len(xr_object.patch):
- raise ValueError(
- "If providing boolean 'vegtype' argument to xr_flexsel(), it must be the"
- f" same length as xr_object.patch ({len(selection)} vs."
- f" {len(xr_object.patch)})"
- )
- is_vegtype = selection
- else:
- raise TypeError(f"Not sure how to handle 'vegtype' of type {type(selection[0])}")
- xr_object = xr_object.isel(patch=[i for i, x in enumerate(is_vegtype) if x])
- if "ivt" in xr_object:
- xr_object = xr_object.isel(
- ivt=is_each_vegtype(xr_object.ivt.values, selection, "ok_exact")
- )
-
- else:
- # Parse selection type, if provided
- if delimiter in key:
- key, selection_type = key.split(delimiter)
-
- # Check type of selection
- else:
- is_inefficient = False
- if isinstance(selection, slice):
- slice_members = []
- if selection == slice(0):
- raise ValueError("slice(0) will be empty")
- if selection.start != None:
- slice_members = slice_members + [selection.start]
- if selection.stop != None:
- slice_members = slice_members + [selection.stop]
- if selection.step != None:
- slice_members = slice_members + [selection.step]
- if slice_members == []:
- raise TypeError("slice is all None?")
- this_type = int
- for x in slice_members:
- if x < 0 or not isinstance(x, int):
- this_type = "values"
- break
- elif isinstance(selection, np.ndarray):
- if selection.dtype.kind in np.typecodes["AllInteger"]:
- this_type = int
- else:
- is_inefficient = True
- this_type = None
- for x in selection:
- if x < 0 or x % 1 > 0:
- if isinstance(x, int):
- this_type = "values"
- else:
- this_type = type(x)
- break
- if this_type == None:
- this_type = int
- selection = selection.astype(int)
- else:
- this_type = type(selection)
-
- warn_about_this_seltype_interp = warn_about_seltype_interp
- if this_type == list and isinstance(selection[0], str):
- selection_type = "values"
- warn_about_this_seltype_interp = False
- elif this_type == int:
- selection_type = "indices"
- else:
- selection_type = "values"
-
- if warn_about_this_seltype_interp:
- # Suggest suppressing selection type interpretation warnings
- if not havewarned:
- print(
- "xr_flexsel(): Suppress all 'selection type interpretation' messages by"
- " specifying warn_about_seltype_interp=False"
- )
- havewarned = True
- if is_inefficient:
- extra = " This will also improve efficiency for large selections."
- else:
- extra = ""
- print(
- f"xr_flexsel(): Selecting {key} as {selection_type} because selection was"
- f" interpreted as {this_type}. If not correct, specify selection type"
- " ('indices' or 'values') in keyword like"
- f" '{key}{delimiter}SELECTIONTYPE=...' instead of '{key}=...'.{extra}"
- )
-
- # Trim along relevant 1d axes
- if isinstance(xr_object, xr.Dataset) and key in ["lat", "lon"]:
- if selection_type == "indices":
- inclCoords = xr_object[key].values[selection]
- elif selection_type == "values":
- if isinstance(selection, slice):
- inclCoords = xr_object.sel({key: selection}, drop=False)[key].values
- else:
- inclCoords = selection
- else:
- raise TypeError(f"selection_type {selection_type} not recognized")
- if key == "lat":
- thisXY = "jxy"
- elif key == "lon":
- thisXY = "ixy"
- else:
- raise KeyError(
- f"Key '{key}' not recognized: What 1d_ suffix should I use for variable"
- " name?"
- )
- pattern = re.compile(f"1d_{thisXY}")
- matches = [x for x in list(xr_object.keys()) if pattern.search(x) != None]
- for thisVar in matches:
- if len(xr_object[thisVar].dims) != 1:
- raise RuntimeError(
- f"Expected {thisVar} to have 1 dimension, but it has"
- f" {len(xr_object[thisVar].dims)}: {xr_object[thisVar].dims}"
- )
- thisVar_dim = xr_object[thisVar].dims[0]
- # print(f"Variable {thisVar} has dimension {thisVar_dim}")
- thisVar_coords = xr_object[key].values[
- xr_object[thisVar].values.astype(int) - 1
- ]
- # print(f"{thisVar_dim} size before: {xr_object.sizes[thisVar_dim]}")
- ok_ind = []
- new_1d_thisXY = []
- for i, x in enumerate(thisVar_coords):
- if x in inclCoords:
- ok_ind = ok_ind + [i]
- new_1d_thisXY = new_1d_thisXY + [(inclCoords == x).nonzero()[0] + 1]
- xr_object = xr_object.isel({thisVar_dim: ok_ind})
- new_1d_thisXY = np.array(new_1d_thisXY).squeeze()
- xr_object[thisVar].values = new_1d_thisXY
- # print(f"{thisVar_dim} size after: {xr_object.sizes[thisVar_dim]}")
-
- # Perform selection
- if selection_type == "indices":
- # Have to select like this instead of with index directly because otherwise assign_coords() will throw an error. Not sure why.
- if isinstance(selection, int):
- # Single integer? Turn it into a slice.
- selection = slice(selection, selection + 1)
- elif (
- isinstance(selection, np.ndarray)
- and not selection.dtype.kind in np.typecodes["AllInteger"]
- ):
- selection = selection.astype(int)
- xr_object = xr_object.isel({key: selection})
- elif selection_type == "values":
- xr_object = xr_object.sel({key: selection})
- else:
- raise TypeError(f"selection_type {selection_type} not recognized")
-
- return xr_object
-
-
-# Get PFT of each patch, in both integer and string forms.
def get_patch_ivts(this_ds, this_pftlist):
- # First, get all the integer values; should be time*pft or pft*time. We will eventually just take the first timestep.
+ """
+ Get PFT of each patch, in both integer and string forms.
+ """
+ # First, get all the integer values; should be time*pft or pft*time. We will eventually just
+ # take the first timestep.
vegtype_int = this_ds.patches1d_itype_veg
vegtype_int.values = vegtype_int.values.astype(int)
@@ -492,379 +267,63 @@ def get_patch_ivts(this_ds, this_pftlist):
return {"int": vegtype_int, "str": vegtype_str, "all_str": this_pftlist}
-# Convert a list of strings with vegetation type names into a DataArray. Used to add vegetation type info in import_ds().
def get_vegtype_str_da(vegtype_str):
+ """
+ Convert a list of strings with vegetation type names into a DataArray.
+ """
nvt = len(vegtype_str)
- thisName = "vegtype_str"
vegtype_str_da = xr.DataArray(
- vegtype_str, coords={"ivt": np.arange(0, nvt)}, dims=["ivt"], name=thisName
+ vegtype_str, coords={"ivt": np.arange(0, nvt)}, dims=["ivt"], name="vegtype_str"
)
return vegtype_str_da
-# Function to drop unwanted variables in preprocessing of open_mfdataset(), making sure to NOT drop any unspecified variables that will be useful in gridding. Also adds vegetation type info in the form of a DataArray of strings.
-# Also renames "pft" dimension (and all like-named variables, e.g., pft1d_itype_veg_str) to be named like "patch". This can later be reversed, for compatibility with other code, using patch2pft().
-def mfdataset_preproc(ds, vars_to_import, vegtypes_to_import, timeSlice):
- # Rename "pft" dimension and variables to "patch", if needed
- if "pft" in ds.dims:
- pattern = re.compile("pft.*1d")
- matches = [x for x in list(ds.keys()) if pattern.search(x) != None]
- pft2patch_dict = {"pft": "patch"}
- for m in matches:
- pft2patch_dict[m] = m.replace("pft", "patch").replace("patchs", "patches")
- ds = ds.rename(pft2patch_dict)
-
- derived_vars = []
- if vars_to_import != None:
- # Split vars_to_import into variables that are vs. aren't already in ds
- derived_vars = [v for v in vars_to_import if v not in ds]
- present_vars = [v for v in vars_to_import if v in ds]
- vars_to_import = present_vars
-
- # Get list of dimensions present in variables in vars_to_import.
- dimList = []
- for thisVar in vars_to_import:
- # list(set(x)) returns a list of the unique items in x
- dimList = list(set(dimList + list(ds.variables[thisVar].dims)))
-
- # Get any _1d variables that are associated with those dimensions. These will be useful in gridding. Also, if any dimension is "pft", set up to rename it and all like-named variables to "patch"
- onedVars = []
- for thisDim in dimList:
- pattern = re.compile(f"{thisDim}.*1d")
- matches = [x for x in list(ds.keys()) if pattern.search(x) != None]
- onedVars = list(set(onedVars + matches))
-
- # Add dimensions and _1d variables to vars_to_import
- vars_to_import = list(set(vars_to_import + list(ds.dims) + onedVars))
-
- # Add any _bounds variables
- bounds_vars = []
- for v in vars_to_import:
- bounds_var = v + "_bounds"
- if bounds_var in ds:
- bounds_vars = bounds_vars + [bounds_var]
- vars_to_import = vars_to_import + bounds_vars
-
- # Get list of variables to drop
- varlist = list(ds.variables)
- vars_to_drop = list(np.setdiff1d(varlist, vars_to_import))
-
- # Drop them
- ds = ds.drop_vars(vars_to_drop)
-
- # Add vegetation type info
- if "patches1d_itype_veg" in list(ds):
- this_pftlist = define_pftlist()
- get_patch_ivts(
- ds, this_pftlist
- ) # Includes check of whether vegtype changes over time anywhere
- vegtype_da = get_vegtype_str_da(this_pftlist)
- patches1d_itype_veg_str = vegtype_da.values[
- ds.isel(time=0).patches1d_itype_veg.values.astype(int)
- ]
- npatch = len(patches1d_itype_veg_str)
- patches1d_itype_veg_str = xr.DataArray(
- patches1d_itype_veg_str,
- coords={"patch": np.arange(0, npatch)},
- dims=["patch"],
- name="patches1d_itype_veg_str",
- )
- ds = xr.merge([ds, vegtype_da, patches1d_itype_veg_str])
-
- # Restrict to veg. types of interest, if any
- if vegtypes_to_import != None:
- ds = xr_flexsel(ds, vegtype=vegtypes_to_import)
-
- # Restrict to time slice, if any
- if timeSlice:
- ds = safer_timeslice(ds, timeSlice)
-
- # Finish import
- ds = xr.decode_cf(ds, decode_times=True)
-
- # Compute derived variables
- for v in derived_vars:
- if v == "HYEARS" and "HDATES" in ds and ds.HDATES.dims == ("time", "mxharvests", "patch"):
- yearList = np.array([np.float32(x.year - 1) for x in ds.time.values])
- hyears = ds["HDATES"].copy()
- hyears.values = np.tile(
- np.expand_dims(yearList, (1, 2)), (1, ds.dims["mxharvests"], ds.dims["patch"])
- )
- with np.errstate(invalid="ignore"):
- is_le_zero = ~np.isnan(ds.HDATES.values) & (ds.HDATES.values <= 0)
- hyears.values[is_le_zero] = ds.HDATES.values[is_le_zero]
- hyears.values[np.isnan(ds.HDATES.values)] = np.nan
- hyears.attrs["long_name"] = "DERIVED: actual crop harvest years"
- hyears.attrs["units"] = "year"
- ds["HYEARS"] = hyears
-
- return ds
-
-
-# Import a dataset that can be spread over multiple files, only including specified variables and/or vegetation types and/or timesteps, concatenating by time. DOES actually read the dataset into memory, but only AFTER dropping unwanted variables and/or vegetation types.
-def import_ds(
- filelist,
- myVars=None,
- myVegtypes=None,
- timeSlice=None,
- myVars_missing_ok=[],
- only_active_patches=False,
- rename_lsmlatlon=False,
- chunks=None,
-):
- # Convert myVegtypes here, if needed, to avoid repeating the process each time you read a file in xr.open_mfdataset().
- if myVegtypes is not None:
- if not isinstance(myVegtypes, list):
- myVegtypes = [myVegtypes]
- if isinstance(myVegtypes[0], str):
- myVegtypes = vegtype_str2int(myVegtypes)
-
- # Same for these variables.
- if myVars != None:
- if not isinstance(myVars, list):
- myVars = [myVars]
- if myVars_missing_ok:
- if not isinstance(myVars_missing_ok, list):
- myVars_missing_ok = [myVars_missing_ok]
-
- # Make sure lists are actually lists
- if not isinstance(filelist, list):
- filelist = [filelist]
- if not isinstance(myVars_missing_ok, list):
- myVars_missing_ok = [myVars_missing_ok]
-
- # Remove files from list if they don't contain requested timesteps.
- # timeSlice should be in the format slice(start,end[,step]). start or end can be None to be unbounded on one side. Note that the standard slice() documentation suggests that only elements through end-1 will be selected, but that seems not to be the case in the xarray implementation.
- if timeSlice:
- new_filelist = []
- for file in sorted(filelist):
- filetime = xr.open_dataset(file).time
- filetime_sel = safer_timeslice(filetime, timeSlice)
- include_this_file = filetime_sel.size
- if include_this_file:
- new_filelist.append(file)
-
- # If you found some matching files, but then you find one that doesn't, stop going through the list.
- elif new_filelist:
- break
- if not new_filelist:
- raise RuntimeError(f"No files found in timeSlice {timeSlice}")
- filelist = new_filelist
-
- # The xarray open_mfdataset() "preprocess" argument requires a function that takes exactly one variable (an xarray.Dataset object). Wrapping mfdataset_preproc() in this lambda function allows this. Could also just allow mfdataset_preproc() to access myVars and myVegtypes directly, but that's bad practice as it could lead to scoping issues.
- mfdataset_preproc_closure = lambda ds: mfdataset_preproc(ds, myVars, myVegtypes, timeSlice)
-
- # Import
- if isinstance(filelist, list) and len(filelist) == 1:
- filelist = filelist[0]
- if isinstance(filelist, list):
- with warnings.catch_warnings():
- warnings.filterwarnings(action="ignore", category=DeprecationWarning)
- if importlib.find_loader("dask") is None:
- raise ModuleNotFoundError(
- "You have asked xarray to import a list of files as a single Dataset using"
- " open_mfdataset(), but this requires dask, which is not available.\nFile"
- f" list: {filelist}"
- )
- this_ds = xr.open_mfdataset(
- sorted(filelist),
- data_vars="minimal",
- preprocess=mfdataset_preproc_closure,
- compat="override",
- coords="all",
- concat_dim="time",
- combine="nested",
- chunks=chunks,
- )
- elif isinstance(filelist, str):
- this_ds = xr.open_dataset(filelist, chunks=chunks)
- this_ds = mfdataset_preproc(this_ds, myVars, myVegtypes, timeSlice)
- this_ds = this_ds.compute()
-
- # Include only active patches (or whatever)
- if only_active_patches:
- is_active = this_ds.patches1d_active.values
- p_active = np.where(is_active)[0]
- this_ds_active = this_ds.isel(patch=p_active)
-
- # Warn and/or error about variables that couldn't be imported or derived
- if myVars:
- missing_vars = [v for v in myVars if v not in this_ds]
- ok_missing_vars = [v for v in missing_vars if v in myVars_missing_ok]
- bad_missing_vars = [v for v in missing_vars if v not in myVars_missing_ok]
- if ok_missing_vars:
- print(
- "Could not import some variables; either not present or not deriveable:"
- f" {ok_missing_vars}"
- )
- if bad_missing_vars:
- raise RuntimeError(
- "Could not import some variables; either not present or not deriveable:"
- f" {bad_missing_vars}"
- )
-
- if rename_lsmlatlon:
- if "lsmlat" in this_ds.dims:
- this_ds = this_ds.rename({"lsmlat": "lat"})
- if "lsmlon" in this_ds.dims:
- this_ds = this_ds.rename({"lsmlon": "lon"})
-
- return this_ds
-
-
-# Return a DataArray, with defined coordinates, for a given variable in a dataset.
-def get_thisVar_da(thisVar, this_ds):
- # Make DataArray for this variable
- thisvar_da = np.array(this_ds.variables[thisVar])
- theseDims = this_ds.variables[thisVar].dims
- thisvar_da = xr.DataArray(thisvar_da, dims=theseDims)
-
- # Define coordinates of this variable's DataArray
- dimsDict = dict()
- for thisDim in theseDims:
- dimsDict[thisDim] = this_ds[thisDim]
- thisvar_da = thisvar_da.assign_coords(dimsDict)
- thisvar_da.attrs = this_ds[thisVar].attrs
-
- return thisvar_da
-
-
-# Make a geographically gridded DataArray (with dimensions time, vegetation type [as string], lat, lon) of one variable within a Dataset. Optional keyword arguments will be passed to xr_flexsel() to select single steps or slices along the specified ax(ie)s.
-#
-# fillValue: Default None means grid will be filled with NaN, unless the variable in question already has a fillValue, in which case that will be used.
-def grid_one_variable(this_ds, thisVar, fillValue=None, **kwargs):
- # Get this Dataset's values for selection(s), if provided
- this_ds = xr_flexsel(this_ds, **kwargs)
-
- # Get DataArrays needed for gridding
- thisvar_da = get_thisVar_da(thisVar, this_ds)
- vt_da = None
- if "patch" in thisvar_da.dims:
- spatial_unit = "patch"
- xy_1d_prefix = "patches"
- if "patches1d_itype_veg" in this_ds:
- vt_da = get_thisVar_da("patches1d_itype_veg", this_ds)
- elif "gridcell" in thisvar_da.dims:
- spatial_unit = "gridcell"
- xy_1d_prefix = "grid"
- else:
- raise RuntimeError(
- f"What variables to use for _ixy and _jxy of variable with dims {thisvar_da.dims}?"
- )
- ixy_da = get_thisVar_da(xy_1d_prefix + "1d_ixy", this_ds)
- jxy_da = get_thisVar_da(xy_1d_prefix + "1d_jxy", this_ds)
-
- if not fillValue and "_FillValue" in thisvar_da.attrs:
- fillValue = thisvar_da.attrs["_FillValue"]
-
- # Renumber vt_da to work as indices on new ivt dimension, if needed.
- ### Ensures that the unique set of vt_da values begins with 1 and
- ### contains no missing steps.
- if "ivt" in this_ds and vt_da is not None:
- vt_da.values = np.array([np.where(this_ds.ivt.values == x)[0][0] for x in vt_da.values])
-
- # Get new dimension list
- new_dims = list(thisvar_da.dims)
- ### Remove "[spatial_unit]".
- if spatial_unit in new_dims:
- new_dims.remove(spatial_unit)
- # Add "ivt_str" (vegetation type, as string). This needs to go at the end, to avoid a possible situation where you wind up with multiple Ellipsis members of fill_indices.
- if "ivt" in this_ds and spatial_unit == "patch":
- new_dims.append("ivt_str")
- ### Add lat and lon to end of list
- new_dims = new_dims + ["lat", "lon"]
-
- # Set up empty array
- n_list = []
- for dim in new_dims:
- if dim == "ivt_str":
- n = this_ds.sizes["ivt"]
- elif dim in thisvar_da.coords:
- n = thisvar_da.sizes[dim]
- else:
- n = this_ds.sizes[dim]
- n_list = n_list + [n]
- thisvar_gridded = np.empty(n_list)
- if fillValue:
- thisvar_gridded[:] = fillValue
- else:
- thisvar_gridded[:] = np.NaN
-
- # Fill with this variable
- fill_indices = []
- for dim in new_dims:
- if dim == "lat":
- fill_indices.append(jxy_da.values.astype(int) - 1)
- elif dim == "lon":
- fill_indices.append(ixy_da.values.astype(int) - 1)
- elif dim == "ivt_str":
- fill_indices.append(vt_da)
- elif not fill_indices:
- # I.e., if fill_indices is empty. Could also do "elif len(fill_indices)==0".
- fill_indices.append(Ellipsis)
- try:
- thisvar_gridded[tuple(fill_indices[: len(fill_indices)])] = thisvar_da.values
- except:
- thisvar_gridded[tuple(fill_indices[: len(fill_indices)])] = thisvar_da.values.transpose()
- if not np.any(np.bitwise_not(np.isnan(thisvar_gridded))):
- if np.all(np.isnan(thisvar_da.values)):
- print("Warning: This DataArray (and thus map) is all NaN")
- else:
- raise RuntimeError("thisvar_gridded was not filled!")
-
- # Assign coordinates, attributes and name
- thisvar_gridded = xr.DataArray(thisvar_gridded, dims=tuple(new_dims), attrs=thisvar_da.attrs)
- for dim in new_dims:
- if dim == "ivt_str":
- values = this_ds.vegtype_str.values
- elif dim in thisvar_da.coords:
- values = thisvar_da[dim]
- else:
- values = this_ds[dim].values
- thisvar_gridded = thisvar_gridded.assign_coords({dim: values})
- thisvar_gridded.name = thisVar
-
- # Add FillValue attribute
- if fillValue:
- thisvar_gridded.attrs["_FillValue"] = fillValue
-
- return thisvar_gridded
-
-
-# ctsm_pylib can't handle time slicing like Dataset.sel(time=slice("1998-01-01", "2005-12-31")) for some reason. This function tries to fall back to slicing by integers. It should work with both Datasets and DataArrays.
-def safer_timeslice(ds, timeSlice, timeVar="time"):
+def safer_timeslice(ds_in, time_slice, time_var="time"):
+ """
+ ctsm_pylib can't handle time slicing like Dataset.sel(time=slice("1998-01-01", "2005-12-31"))
+ for some reason. This function tries to fall back to slicing by integers. It should work with
+ both Datasets and DataArrays.
+ """
try:
- ds = ds.sel({timeVar: timeSlice})
- except:
+ ds_in = ds_in.sel({time_var: time_slice})
+ except: # pylint: disable=bare-except
# If the issue might have been slicing using strings, try to fall back to integer slicing
- if (
- isinstance(timeSlice.start, str)
- and isinstance(timeSlice.stop, str)
- and len(timeSlice.start.split("-")) == 3
- and timeSlice.start.split("-")[1:] == ["01", "01"]
- and len(timeSlice.stop.split("-")) == 3
+ can_try_integer_slicing = (
+ isinstance(time_slice.start, str)
+ and isinstance(time_slice.stop, str)
+ and len(time_slice.start.split("-")) == 3
+ and time_slice.start.split("-")[1:] == ["01", "01"]
+ and len(time_slice.stop.split("-")) == 3
and (
- timeSlice.stop.split("-")[1:] == ["12", "31"]
- or timeSlice.stop.split("-")[1:] == ["01", "01"]
+ time_slice.stop.split("-")[1:] == ["12", "31"]
+ or time_slice.stop.split("-")[1:] == ["01", "01"]
)
- ):
- fileyears = np.array([x.year for x in ds.time.values])
+ )
+ if can_try_integer_slicing:
+ fileyears = np.array([x.year for x in ds_in.time.values])
if len(np.unique(fileyears)) != len(fileyears):
print("Could not fall back to integer slicing of years: Time axis not annual")
raise
- yStart = int(timeSlice.start.split("-")[0])
- yStop = int(timeSlice.stop.split("-")[0])
- where_in_timeSlice = np.where((fileyears >= yStart) & (fileyears <= yStop))[0]
- ds = ds.isel({timeVar: where_in_timeSlice})
+ y_start = int(time_slice.start.split("-")[0])
+ y_stop = int(time_slice.stop.split("-")[0])
+ where_in_timeslice = np.where((fileyears >= y_start) & (fileyears <= y_stop))[0]
+ ds_in = ds_in.isel({time_var: where_in_timeslice})
else:
- print(f"Could not fall back to integer slicing for timeSlice {timeSlice}")
+ print(f"Could not fall back to integer slicing for time_slice {time_slice}")
raise
- return ds
+ return ds_in
-# Convert a longitude axis that's -180 to 180 around the international date line to one that's 0 to 360 around the prime meridian. If you pass in a Dataset or DataArray, the "lon" coordinates will be changed. Otherwise, it assumes you're passing in numeric data.
def lon_idl2pm(lons_in, fail_silently=False):
+ """
+ Convert a longitude axis that's -180 to 180 around the international date line to one that's 0
+ to 360 around the prime meridian.
+
+ - If you pass in a Dataset or DataArray, the "lon" coordinates will be changed. Otherwise, it
+ assumes you're passing in numeric data.
+ """
+
def check_ok(tmp, fail_silently):
msg = ""
@@ -875,10 +334,9 @@ def check_ok(tmp, fail_silently):
if msg == "":
return True
- elif fail_silently:
+ if fail_silently:
return False
- else:
- raise ValueError(msg)
+ raise ValueError(msg)
def do_it(tmp):
tmp = tmp + 360
@@ -909,14 +367,19 @@ def do_it(tmp):
return lons_out
-# Helper function to check that a list is strictly increasing
-def is_strictly_increasing(L):
- # https://stackoverflow.com/a/4983359/2965321
- return all(x < y for x, y in zip(L, L[1:]))
+def is_strictly_increasing(this_list):
+ """
+ Helper function to check that a list is strictly increasing
+
+ https://stackoverflow.com/a/4983359/2965321
+ """
+ return all(x < y for x, y in zip(this_list, this_list[1:]))
-# Ensure that longitude axis coordinates are monotonically increasing
def make_lon_increasing(xr_obj):
+ """
+ Ensure that longitude axis coordinates are monotonically increasing
+ """
if not "lon" in xr_obj.dims:
return xr_obj
diff --git a/python/ctsm/crop_calendars/generate_gdds.py b/python/ctsm/crop_calendars/generate_gdds.py
index 16e3e130da..156ebfb20e 100644
--- a/python/ctsm/crop_calendars/generate_gdds.py
+++ b/python/ctsm/crop_calendars/generate_gdds.py
@@ -1,32 +1,29 @@
-paramfile_dir = "/glade/campaign/cesm/cesmdata/cseg/inputdata/lnd/clm2/paramdata"
-
-# Import other shared functions
+"""
+Generate maturity requirements (GDD) from outputs of a GDD-generating run
+"""
import os
-import inspect
import sys
+import pickle
+import datetime as dt
+import argparse
+import logging
+import numpy as np
+import xarray as xr
# Import the CTSM Python utilities.
-# sys.path.insert() is necessary for RXCROPMATURITY to work. The fact that it's calling this script in the RUN phase seems to require the python/ directory to be manually added to path.
+# sys.path.insert() is necessary for RXCROPMATURITY to work. The fact that it's calling this script
+# in the RUN phase seems to require the python/ directory to be manually added to path.
_CTSM_PYTHON = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, "python"
)
sys.path.insert(1, _CTSM_PYTHON)
-import ctsm.crop_calendars.cropcal_module as cc
-import ctsm.crop_calendars.generate_gdds_functions as gddfn
-
-# Import everything else
-import os
-import sys
-import numpy as np
-import xarray as xr
-import pickle
-import datetime as dt
-import argparse
-import logging
+import ctsm.crop_calendars.cropcal_module as cc # pylint: disable=wrong-import-position
+import ctsm.crop_calendars.generate_gdds_functions as gddfn # pylint: disable=wrong-import-position
-# Info re: PFT parameter set
-my_clm_ver = 51
-my_clm_subver = "c211112"
+# Global constants
+PARAMFILE_DIR = "/glade/campaign/cesm/cesmdata/cseg/inputdata/lnd/clm2/paramdata"
+MY_CLM_VER = 51
+MY_CLM_SUBVER = "c211112"
def main(
@@ -47,6 +44,7 @@ def main(
skip_crops=None,
logger=None,
):
+ # pylint: disable=missing-function-docstring,too-many-statements
# Directories to save output files and figures
if not output_dir:
if only_make_figs:
@@ -73,11 +71,14 @@ def main(
# Disable plotting if any plotting module is unavailable
if save_figs:
try:
+ # pylint: disable=import-outside-toplevel,unused-import,import-error
import cartopy
import matplotlib
- except:
+ except ModuleNotFoundError as exc:
if only_make_figs:
- raise RuntimeError("only_make_figs True but not all plotting modules are available")
+ raise RuntimeError(
+ "only_make_figs True but not all plotting modules are available"
+ ) from exc
gddfn.log(logger, "Not all plotting modules are available; disabling save_figs")
save_figs = False
@@ -95,19 +96,21 @@ def main(
##########################
if not only_make_figs:
- # Keep 1 extra year to avoid incomplete final growing season for crops harvested after Dec. 31.
- y1_import_str = f"{first_season+1}-01-01"
- yN_import_str = f"{last_season+2}-01-01"
+ # Keep 1 extra year to avoid incomplete final growing season for crops
+ # harvested after Dec. 31.
+ yr_1_import_str = f"{first_season+1}-01-01"
+ yr_n_import_str = f"{last_season+2}-01-01"
gddfn.log(
logger,
- f"Importing netCDF time steps {y1_import_str} through {yN_import_str} (years are +1 because of CTSM output naming)",
+ f"Importing netCDF time steps {yr_1_import_str} through {yr_n_import_str} "
+ + "(years are +1 because of CTSM output naming)",
)
pickle_file = os.path.join(output_dir, f"{first_season}-{last_season}.pickle")
h2_ds_file = os.path.join(output_dir, f"{first_season}-{last_season}.h2_ds.nc")
if os.path.exists(pickle_file):
- with open(pickle_file, "rb") as f:
+ with open(pickle_file, "rb") as file:
(
first_season,
last_season,
@@ -115,14 +118,14 @@ def main(
gddaccum_yp_list,
gddharv_yp_list,
skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ lastyear_active_patch_indices_list,
incorrectly_daily,
save_figs,
incl_vegtypes_str,
incl_patches1d_itype_veg,
mxsowings,
skip_crops,
- ) = pickle.load(f)
+ ) = pickle.load(file)
print(f"Will resume import at {pickle_year+1}")
h2_ds = None
else:
@@ -132,17 +135,17 @@ def main(
gddaccum_yp_list = []
gddharv_yp_list = []
incl_vegtypes_str = None
- lastYear_active_patch_indices_list = None
+ lastyear_active_patch_indices_list = None
sdates_rx = sdates_file
hdates_rx = hdates_file
if not unlimited_season_length:
- mxmats = cc.import_max_gs_length(paramfile_dir, my_clm_ver, my_clm_subver)
+ mxmats = cc.import_max_gs_length(PARAMFILE_DIR, MY_CLM_VER, MY_CLM_SUBVER)
else:
mxmats = None
- for y, thisYear in enumerate(np.arange(first_season + 1, last_season + 3)):
- if thisYear <= pickle_year:
+ for yr_index, this_yr in enumerate(np.arange(first_season + 1, last_season + 3)):
+ if this_yr <= pickle_year:
continue
(
@@ -152,7 +155,7 @@ def main(
gddaccum_yp_list,
gddharv_yp_list,
skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ lastyear_active_patch_indices_list,
incorrectly_daily,
incl_vegtypes_str,
incl_patches1d_itype_veg,
@@ -160,14 +163,14 @@ def main(
) = gddfn.import_and_process_1yr(
first_season,
last_season,
- y,
- thisYear,
+ yr_index,
+ this_yr,
sdates_rx,
hdates_rx,
gddaccum_yp_list,
gddharv_yp_list,
skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ lastyear_active_patch_indices_list,
incorrectly_daily,
input_dir,
incl_vegtypes_str,
@@ -179,16 +182,16 @@ def main(
)
gddfn.log(logger, f" Saving pickle file ({pickle_file})...")
- with open(pickle_file, "wb") as f:
+ with open(pickle_file, "wb") as file:
pickle.dump(
[
first_season,
last_season,
- thisYear,
+ this_yr,
gddaccum_yp_list,
gddharv_yp_list,
skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ lastyear_active_patch_indices_list,
incorrectly_daily,
save_figs,
incl_vegtypes_str,
@@ -196,7 +199,7 @@ def main(
mxsowings,
skip_crops,
],
- f,
+ file,
protocol=-1,
)
@@ -248,35 +251,35 @@ def main(
]
dummy_vars = []
dummy_longnames = []
- for v, thisVar in enumerate(all_vars):
- if thisVar not in gdd_maps_ds:
- dummy_vars.append(thisVar)
- dummy_longnames.append(all_longnames[v])
+ for var_index, this_var in enumerate(all_vars):
+ if this_var not in gdd_maps_ds:
+ dummy_vars.append(this_var)
+ dummy_longnames.append(all_longnames[var_index])
- def make_dummy(thisCrop_gridded, addend):
- dummy_gridded = thisCrop_gridded
+ def make_dummy(this_crop_gridded, addend):
+ dummy_gridded = this_crop_gridded
dummy_gridded.values = dummy_gridded.values * 0 + addend
return dummy_gridded
- for v in gdd_maps_ds:
- thisCrop_gridded = gdd_maps_ds[v].copy()
+ for var in gdd_maps_ds:
+ this_crop_gridded = gdd_maps_ds[var].copy()
break
- dummy_gridded = make_dummy(thisCrop_gridded, -1)
+ dummy_gridded = make_dummy(this_crop_gridded, -1)
- for v, thisVar in enumerate(dummy_vars):
- if thisVar in gdd_maps_ds:
+ for var_index, this_var in enumerate(dummy_vars):
+ if this_var in gdd_maps_ds:
gddfn.error(
- logger, f"{thisVar} is already in gdd_maps_ds. Why overwrite it with dummy?"
+ logger, f"{this_var} is already in gdd_maps_ds. Why overwrite it with dummy?"
)
- dummy_gridded.name = thisVar
- dummy_gridded.attrs["long_name"] = dummy_longnames[v]
- gdd_maps_ds[thisVar] = dummy_gridded
+ dummy_gridded.name = this_var
+ dummy_gridded.attrs["long_name"] = dummy_longnames[var_index]
+ gdd_maps_ds[this_var] = dummy_gridded
# Add lon/lat attributes
- def add_lonlat_attrs(ds):
- ds.lon.attrs = {"long_name": "coordinate_longitude", "units": "degrees_east"}
- ds.lat.attrs = {"long_name": "coordinate_latitude", "units": "degrees_north"}
- return ds
+ def add_lonlat_attrs(this_ds):
+ this_ds.lon.attrs = {"long_name": "coordinate_longitude", "units": "degrees_east"}
+ this_ds.lat.attrs = {"long_name": "coordinate_latitude", "units": "degrees_north"}
+ return this_ds
gdd_maps_ds = add_lonlat_attrs(gdd_maps_ds)
gddharv_maps_ds = add_lonlat_attrs(gddharv_maps_ds)
@@ -297,14 +300,17 @@ def add_lonlat_attrs(ds):
def save_gdds(sdates_file, hdates_file, outfile, gdd_maps_ds, sdates_rx):
# Set up output file from template (i.e., prescribed sowing dates).
template_ds = xr.open_dataset(sdates_file, decode_times=True)
- for v in template_ds:
- if "sdate" in v:
- template_ds = template_ds.drop(v)
+ for var in template_ds:
+ if "sdate" in var:
+ template_ds = template_ds.drop(var)
template_ds.to_netcdf(path=outfile, format="NETCDF3_CLASSIC")
template_ds.close()
# Add global attributes
- comment = f"Derived from CLM run plus crop calendar input files {os.path.basename(sdates_file) and {os.path.basename(hdates_file)}}."
+ comment = (
+ "Derived from CLM run plus crop calendar input files "
+ + f"{os.path.basename(sdates_file) and {os.path.basename(hdates_file)}}."
+ )
gdd_maps_ds.attrs = {
"author": "Sam Rabin (sam.rabin@gmail.com)",
"comment": comment,
@@ -384,7 +390,11 @@ def add_attrs_to_map_ds(
parser.add_argument(
"-i",
"--input-dir",
- help="Directory where run outputs can be found (and where outputs will go). If --only-make-figs, this is the directory with the preprocessed files (e.g., *.pickle file).",
+ help=(
+ "Directory where run outputs can be found (and where outputs will go). If "
+ + "--only-make-figs, this is the directory with the preprocessed files (e.g., *.pickle "
+ + "file)."
+ ),
required=True,
)
parser.add_argument(
@@ -464,7 +474,6 @@ def add_attrs_to_map_ds(
args = parser.parse_args(sys.argv[1:])
for k, v in sorted(vars(args).items()):
print(f"{k}: {v}")
- save_figs = not args.dont_save_figs
# Call main()
main(
@@ -474,7 +483,7 @@ def add_attrs_to_map_ds(
sdates_file=args.sdates_file,
hdates_file=args.hdates_file,
output_dir=args.output_dir,
- save_figs=save_figs,
+ save_figs=not args.dont_save_figs,
only_make_figs=args.only_make_figs,
run1_name=args.run1_name,
run2_name=args.run2_name,
@@ -484,9 +493,3 @@ def add_attrs_to_map_ds(
unlimited_season_length=args.unlimited_season_length,
skip_crops=args.skip_crops,
)
-
-# main(input_dir="/Users/Shared/CESM_runs/tests_10x15_20230329_gddgen/202303301820",
-# sdates_file="/Users/Shared/CESM_work/crop_dates_mostrice/sdates_ggcmi_crop_calendar_phase3_v1.01_nninterp-f10_f10_mg37.2000-2000.20230330_165301.nc",
-# hdates_file="/Users/Shared/CESM_work/crop_dates_mostrice/hdates_ggcmi_crop_calendar_phase3_v1.01_nninterp-f10_f10_mg37.2000-2000.20230330_165301.nc",
-# first_season=1997, last_season=2003,
-# save_figs=False)
diff --git a/python/ctsm/crop_calendars/generate_gdds_functions.py b/python/ctsm/crop_calendars/generate_gdds_functions.py
index cb05f1920d..8af2fdc049 100644
--- a/python/ctsm/crop_calendars/generate_gdds_functions.py
+++ b/python/ctsm/crop_calendars/generate_gdds_functions.py
@@ -1,85 +1,102 @@
-import numpy as np
-import xarray as xr
+"""
+Functions to support generate_gdds.py
+"""
+# pylint: disable=too-many-lines,too-many-statements
import warnings
import os
import glob
import datetime as dt
from importlib import util as importlib_util
+import numpy as np
+import xarray as xr
-# Import the CTSM Python utilities.
-# sys.path.insert() is necessary for RXCROPMATURITY to work. The fact that it's calling this script in the RUN phase seems to require the python/ directory to be manually added to path.
-_CTSM_PYTHON = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, "python"
-)
-import sys
-
-sys.path.insert(1, _CTSM_PYTHON)
import ctsm.crop_calendars.cropcal_utils as utils
import ctsm.crop_calendars.cropcal_module as cc
+from ctsm.crop_calendars.xr_flexsel import xr_flexsel
+from ctsm.crop_calendars.grid_one_variable import grid_one_variable
+from ctsm.crop_calendars.import_ds import import_ds
-can_plot = True
+CAN_PLOT = True
try:
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ # pylint: disable=import-error
from ctsm.crop_calendars.cropcal_figs_module import *
from matplotlib.transforms import Bbox
warnings.filterwarnings(
"ignore",
- message="__len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry.",
+ message=(
+ "__len__ for multi-part geometries is deprecated and will be removed in Shapely "
+ + "2.0. Check the length of the `geoms` property instead to get the number of "
+ + "parts of a multi-part geometry."
+ ),
)
warnings.filterwarnings(
"ignore",
- message="Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.",
+ message=(
+ "Iteration over multi-part geometries is deprecated and will be removed in Shapely "
+ + "2.0. Use the `geoms` property to access the constituent parts of a multi-part "
+ + "geometry."
+ ),
)
print("Will (attempt to) produce harvest requirement map figure files.")
-except:
+except ModuleNotFoundError:
print("Will NOT produce harvest requirement map figure files.")
- can_plot = False
+ CAN_PLOT = False
-# Functions to simultaneously print to console and to log file
def log(logger, string):
+ """
+ Simultaneously print INFO messages to console and to log file
+ """
print(string)
logger.info(string)
def error(logger, string):
+ """
+ Simultaneously print ERROR messages to console and to log file
+ """
logger.error(string)
raise RuntimeError(string)
def check_sdates(dates_ds, sdates_rx, logger, verbose=False):
+ """
+ Checking that input and output sdates match
+ """
log(logger, " Checking that input and output sdates match...")
- sdates_grid = utils.grid_one_variable(dates_ds, "SDATES")
+ sdates_grid = grid_one_variable(dates_ds, "SDATES")
all_ok = True
any_found = False
vegtypes_skipped = []
vegtypes_included = []
- for i, vt_str in enumerate(dates_ds.vegtype_str.values):
+ for i, vegtype_str in enumerate(dates_ds.vegtype_str.values):
# Input
- vt = dates_ds.ivt.values[i]
- thisVar = f"gs1_{vt}"
- if thisVar not in sdates_rx:
- vegtypes_skipped = vegtypes_skipped + [vt_str]
+ vegtype_int = dates_ds.ivt.values[i]
+ this_var = f"gs1_{vegtype_int}"
+ if this_var not in sdates_rx:
+ vegtypes_skipped = vegtypes_skipped + [vegtype_str]
# log(logger, f" {vt_str} ({vt}) SKIPPED...")
continue
- vegtypes_included = vegtypes_included + [vt_str]
+ vegtypes_included = vegtypes_included + [vegtype_str]
any_found = True
if verbose:
- log(logger, f" {vt_str} ({vt})...")
- in_map = sdates_rx[thisVar].squeeze(drop=True)
+ log(logger, f" {vegtype_str} ({vegtype_int})...")
+ in_map = sdates_rx[this_var].squeeze(drop=True)
# Output
- out_map = sdates_grid.sel(ivt_str=vt_str).squeeze(drop=True)
+ out_map = sdates_grid.sel(ivt_str=vegtype_str).squeeze(drop=True)
# Check for differences
diff_map = out_map - in_map
diff_map_notnan = diff_map.values[np.invert(np.isnan(diff_map.values))]
if np.any(diff_map_notnan):
- log(logger, f"Difference(s) found in {vt_str}")
+ log(logger, f"Difference(s) found in {vegtype_str}")
here = np.where(diff_map_notnan)
log(logger, "in:")
in_map_notnan = in_map.values[np.invert(np.isnan(diff_map.values))]
@@ -91,7 +108,7 @@ def check_sdates(dates_ds, sdates_rx, logger, verbose=False):
log(logger, diff_map_notnan[here][0:4])
all_ok = False
- if not (any_found):
+ if not any_found:
error(logger, "No matching variables found in sdates_rx!")
# Sanity checks for included vegetation types
@@ -102,7 +119,8 @@ def check_sdates(dates_ds, sdates_rx, logger, verbose=False):
elif vegtypes_skipped_weird:
log(
logger,
- f"\nWarning: Some crop types had output rainfed patches but no irrigated patches: {vegtypes_skipped_weird}",
+ "\nWarning: Some crop types had output rainfed patches but no irrigated patches: "
+ + f"{vegtypes_skipped_weird}",
)
if all_ok:
@@ -111,34 +129,42 @@ def check_sdates(dates_ds, sdates_rx, logger, verbose=False):
error(logger, " ❌ Input and output sdates differ.")
-def import_rx_dates(s_or_h, date_inFile, incl_patches1d_itype_veg, mxsowings, logger):
- if isinstance(date_inFile, xr.Dataset):
- return date_inFile
- elif not isinstance(date_inFile, str):
+def import_rx_dates(s_or_h, date_infile, incl_patches1d_itype_veg, mxsowings, logger):
+ """
+ Import prescribed sowing or harvest dates
+ """
+ if isinstance(date_infile, xr.Dataset):
+ return date_infile
+ if not isinstance(date_infile, str):
error(
logger,
- f"Importing {s_or_h}dates_rx: Expected date_inFile to be str or DataArray, not {type(date_inFile)}",
+ f"Importing {s_or_h}dates_rx: Expected date_infile to be str or DataArray,"
+ + f"not {type(date_infile)}",
)
# Which vegetation types were simulated?
- itype_veg_toImport = np.unique(incl_patches1d_itype_veg)
+ itype_veg_to_import = np.unique(incl_patches1d_itype_veg)
- date_varList = []
- for i in itype_veg_toImport:
- for g in np.arange(mxsowings):
- thisVar = f"{s_or_h}date{g+1}_{i}"
- date_varList = date_varList + [thisVar]
+ date_var_list = []
+ for i in itype_veg_to_import:
+ for n_sowing in np.arange(mxsowings):
+ this_var = f"{s_or_h}date{n_sowing+1}_{i}"
+ date_var_list = date_var_list + [this_var]
- ds = utils.import_ds(date_inFile, myVars=date_varList)
+ this_ds = import_ds(date_infile, my_vars=date_var_list)
- for v in ds:
- ds = ds.rename({v: v.replace(f"{s_or_h}date", "gs")})
+ for var in this_ds:
+ this_ds = this_ds.rename({var: var.replace(f"{s_or_h}date", "gs")})
- return ds
+ return this_ds
-def thisCrop_map_to_patches(lon_points, lat_points, map_ds, vegtype_int):
- # xarray pointwise indexing; see https://xarray.pydata.org/en/stable/user-guide/indexing.html#more-advanced-indexing
+def this_crop_map_to_patches(lon_points, lat_points, map_ds, vegtype_int):
+ """
+ Given a map, get a vector of patches
+ """
+ # xarray pointwise indexing;
+ # see https://xarray.pydata.org/en/stable/user-guide/indexing.html#more-advanced-indexing
return (
map_ds[f"gs1_{vegtype_int}"]
.sel(lon=xr.DataArray(lon_points, dims="patch"), lat=xr.DataArray(lat_points, dims="patch"))
@@ -146,8 +172,10 @@ def thisCrop_map_to_patches(lon_points, lat_points, map_ds, vegtype_int):
)
-# Get and grid mean GDDs in GGCMI growing season
def yp_list_to_ds(yp_list, daily_ds, incl_vegtypes_str, dates_rx, longname_prefix, logger):
+ """
+ Get and grid mean GDDs in GGCMI growing season
+ """
# Get means
warnings.filterwarnings(
"ignore", message="Mean of empty slice"
@@ -160,44 +188,45 @@ def yp_list_to_ds(yp_list, daily_ds, incl_vegtypes_str, dates_rx, longname_prefi
# Grid
ds_out = xr.Dataset()
- for c, ra in enumerate(p_list):
- if isinstance(ra, type(None)):
+ for this_crop_int, data in enumerate(p_list):
+ if isinstance(data, type(None)):
continue
- thisCrop_str = incl_vegtypes_str[c]
- log(logger, f" {thisCrop_str}...")
- newVar = f"gdd1_{utils.ivt_str2int(thisCrop_str)}"
- ds = daily_ds.isel(
- patch=np.where(daily_ds.patches1d_itype_veg_str.values == thisCrop_str)[0]
+ this_crop_str = incl_vegtypes_str[this_crop_int]
+ log(logger, f" {this_crop_str}...")
+ new_var = f"gdd1_{utils.ivt_str2int(this_crop_str)}"
+ this_ds = daily_ds.isel(
+ patch=np.where(daily_ds.patches1d_itype_veg_str.values == this_crop_str)[0]
)
- template_da = ds.patches1d_itype_veg_str
- da = xr.DataArray(
- data=ra,
+ template_da = this_ds.patches1d_itype_veg_str
+ this_da = xr.DataArray(
+ data=data,
coords=template_da.coords,
- attrs={"units": "GDD", "long_name": f"{longname_prefix}{thisCrop_str}"},
+ attrs={"units": "GDD", "long_name": f"{longname_prefix}{this_crop_str}"},
)
# Grid this crop
- ds["tmp"] = da
- da_gridded = utils.grid_one_variable(ds, "tmp", vegtype=thisCrop_str).squeeze(drop=True)
+ this_ds["tmp"] = this_da
+ da_gridded = grid_one_variable(this_ds, "tmp", vegtype=this_crop_str)
+ da_gridded = da_gridded.squeeze(drop=True)
# Add singleton time dimension and save to output Dataset
da_gridded = da_gridded.expand_dims(time=dates_rx.time)
- ds_out[newVar] = da_gridded
+ ds_out[new_var] = da_gridded
return ds_out
def import_and_process_1yr(
- y1,
- yN,
- y,
- thisYear,
+ year_1,
+ year_n,
+ year_index,
+ this_year,
sdates_rx,
hdates_rx,
gddaccum_yp_list,
gddharv_yp_list,
- skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ skip_patches_for_isel_nan_last_year,
+ last_year_active_patch_indices_list,
incorrectly_daily,
indir,
incl_vegtypes_str_in,
@@ -207,8 +236,11 @@ def import_and_process_1yr(
skip_crops,
logger,
):
+ """
+ Import one year of CLM output data for GDD generation
+ """
save_figs = True
- log(logger, f"netCDF year {thisYear}...")
+ log(logger, f"netCDF year {this_year}...")
log(logger, dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# Without dask, this can take a LONG time at resolutions finer than 2-deg
@@ -233,11 +265,11 @@ def import_and_process_1yr(
crops_to_read = utils.define_mgdcrop_list()
print(h1_filelist)
- dates_ds = utils.import_ds(
+ dates_ds = import_ds(
h1_filelist,
- myVars=["SDATES", "HDATES"],
- myVegtypes=crops_to_read,
- timeSlice=slice(f"{thisYear}-01-01", f"{thisYear}-12-31"),
+ my_vars=["SDATES", "HDATES"],
+ my_vegtypes=crops_to_read,
+ time_slice=slice(f"{this_year}-01-01", f"{this_year}-12-31"),
chunks=chunks,
)
@@ -261,8 +293,8 @@ def import_and_process_1yr(
np.sum(~np.isnan(dates_ds.HDATES.values), axis=dates_ds.HDATES.dims.index("mxharvests"))
== 0
)
- N_unmatched_nans = np.sum(sdates_all_nan != hdates_all_nan)
- if N_unmatched_nans > 0:
+ n_unmatched_nans = np.sum(sdates_all_nan != hdates_all_nan)
+ if n_unmatched_nans > 0:
error(logger, "Output SDATE and HDATE NaN masks do not match.")
if np.sum(~np.isnan(dates_ds.SDATES.values)) == 0:
error(logger, "All SDATES are NaN!")
@@ -270,15 +302,15 @@ def import_and_process_1yr(
# Just work with non-NaN patches for now
skip_patches_for_isel_nan = np.where(sdates_all_nan)[0]
incl_patches_for_isel_nan = np.where(~sdates_all_nan)[0]
- different_nan_mask = y > 0 and not np.array_equal(
- skip_patches_for_isel_nan_lastyear, skip_patches_for_isel_nan
+ different_nan_mask = year_index > 0 and not np.array_equal(
+ skip_patches_for_isel_nan_last_year, skip_patches_for_isel_nan
)
if different_nan_mask:
log(logger, " Different NaN mask than last year")
incl_thisyr_but_nan_lastyr = [
dates_ds.patch.values[p]
for p in incl_patches_for_isel_nan
- if p in skip_patches_for_isel_nan_lastyear
+ if p in skip_patches_for_isel_nan_last_year
]
else:
incl_thisyr_but_nan_lastyr = []
@@ -286,14 +318,15 @@ def import_and_process_1yr(
if skipping_patches_for_isel_nan:
log(
logger,
- f" Ignoring {len(skip_patches_for_isel_nan)} patches with all-NaN sowing and harvest dates.",
+ f" Ignoring {len(skip_patches_for_isel_nan)} patches with all-NaN sowing and "
+ + "harvest dates.",
)
dates_incl_ds = dates_ds.isel(patch=incl_patches_for_isel_nan)
else:
dates_incl_ds = dates_ds
incl_patches1d_itype_veg = dates_incl_ds.patches1d_itype_veg
- if y == 0:
+ if year_index == 0:
incl_vegtypes_str = [c for c in dates_incl_ds.vegtype_str.values if c not in skip_crops]
else:
incl_vegtypes_str = incl_vegtypes_str_in
@@ -304,13 +337,15 @@ def import_and_process_1yr(
if incl_vegtypes_str != list(dates_incl_ds.vegtype_str.values):
error(
logger,
- f"Included veg types differ. Previously {incl_vegtypes_str}, now {dates_incl_ds.vegtype_str.values}",
+ f"Included veg types differ. Previously {incl_vegtypes_str}, "
+ + f"now {dates_incl_ds.vegtype_str.values}",
)
if np.sum(~np.isnan(dates_incl_ds.SDATES.values)) == 0:
error(logger, "All SDATES are NaN after ignoring those patches!")
- # Some patches can have -1 sowing date?? Hopefully just an artifact of me incorrectly saving SDATES/HDATES daily.
+ # Some patches can have -1 sowing date?? Hopefully just an artifact of me incorrectly saving
+ # SDATES/HDATES daily.
mxsowings = dates_ds.dims["mxsowings"]
mxsowings_dim = dates_ds.SDATES.dims.index("mxsowings")
skip_patches_for_isel_sdatelt1 = np.where(dates_incl_ds.SDATES.values < 1)[1]
@@ -322,7 +357,8 @@ def import_and_process_1yr(
if incorrectly_daily and list(unique_hdates) == [364]:
log(
logger,
- f" ❗ {len(skip_patches_for_isel_sdatelt1)} patches have SDATE < 1, but this might have just been because of incorrectly daily outputs. Setting them to 365.",
+ f" ❗ {len(skip_patches_for_isel_sdatelt1)} patches have SDATE < 1, but this"
+ + "might have just been because of incorrectly daily outputs. Setting them to 365.",
)
new_sdates_ar = dates_incl_ds.SDATES.values
if mxsowings_dim != 0:
@@ -336,13 +372,16 @@ def import_and_process_1yr(
else:
error(
logger,
- f"{len(skip_patches_for_isel_sdatelt1)} patches have SDATE < 1. Unique affected hdates: {unique_hdates}",
+ f"{len(skip_patches_for_isel_sdatelt1)} patches have SDATE < 1. "
+ + f"Unique affected hdates: {unique_hdates}",
)
- # Some patches can have -1 harvest date?? Hopefully just an artifact of me incorrectly saving SDATES/HDATES daily. Can also happen if patch wasn't active last year
+ # Some patches can have -1 harvest date?? Hopefully just an artifact of me incorrectly saving
+ # SDATES/HDATES daily. Can also happen if patch wasn't active last year
mxharvests = dates_ds.dims["mxharvests"]
mxharvests_dim = dates_ds.HDATES.dims.index("mxharvests")
- # If a patch was inactive last year but was either (a) harvested the last time it was active or (b) was never active, it will have -1 as its harvest date this year. Such instances are okay.
+ # If a patch was inactive last year but was either (a) harvested the last time it was active or
+ # (b) was never active, it will have -1 as its harvest date this year. Such instances are okay.
hdates_thisyr = dates_incl_ds.HDATES.isel(mxharvests=0)
skip_patches_for_isel_hdatelt1 = np.where(hdates_thisyr.values < 1)[0]
skipping_patches_for_isel_hdatelt1 = len(skip_patches_for_isel_hdatelt1) > 0
@@ -352,7 +391,6 @@ def import_and_process_1yr(
patch=incl_thisyr_but_nan_lastyr
)
if np.any(hdates_thisyr_where_nan_lastyr < 1):
- # patches_to_fix = hdates_thisyr_where_nan_lastyr.isel(patch=np.where(hdates_thisyr_where_nan_lastyr < 1)[0]).patch.values
new_hdates = dates_incl_ds.HDATES.values
if mxharvests_dim != 0:
error(logger, "Code this up")
@@ -360,7 +398,10 @@ def import_and_process_1yr(
here = [patch_list.index(x) for x in incl_thisyr_but_nan_lastyr]
log(
logger,
- f" ❗ {len(here)} patches have harvest date -1 because they weren't active last year (and were either never active or were harvested when last active). Ignoring, but you should have done a run with patches always active if they are ever active in the real LU timeseries.",
+ f" ❗ {len(here)} patches have harvest date -1 because they weren't active last"
+ + "year (and were either never active or were harvested when last active). "
+ + "Ignoring, but you should have done a run with patches always active if they are "
+ + "ever active in the real LU timeseries.",
)
new_hdates[0, here] = sdates_thisyr_where_nan_lastyr.values - 1
dates_incl_ds["HDATES"] = xr.DataArray(
@@ -382,7 +423,9 @@ def import_and_process_1yr(
if incorrectly_daily and list(unique_sdates) == [1]:
log(
logger,
- f" ❗ {len(skip_patches_for_isel_hdatelt1)} patches have HDATE < 1??? Seems like this might have just been because of incorrectly daily outputs; setting them to 365.",
+ f" ❗ {len(skip_patches_for_isel_hdatelt1)} patches have HDATE < 1??? Seems like "
+ + "this might have just been because of incorrectly daily outputs; setting them to "
+ + "365.",
)
new_hdates_ar = dates_incl_ds.HDATES.values
if mxharvests_dim != 0:
@@ -396,18 +439,21 @@ def import_and_process_1yr(
else:
error(
logger,
- f"{len(skip_patches_for_isel_hdatelt1)} patches have HDATE < 1. Possible causes:\n * Not using constant crop areas (e.g., flanduse_timeseries from make_lu_for_gddgen.py)\n * Not skipping the first 2 years of output\nUnique affected sdates: {unique_sdates}",
+ f"{len(skip_patches_for_isel_hdatelt1)} patches have HDATE < 1. Possible causes:\n"
+ + "* Not using constant crop areas (e.g., flanduse_timeseries from "
+ + "make_lu_for_gddgen.py)\n * Not skipping the first 2 years of output\n"
+ + f"Unique affected sdates: {unique_sdates}",
)
# Make sure there was only one harvest per year
- N_extra_harv = np.sum(
+ n_extra_harv = np.sum(
np.nanmax(
dates_incl_ds.HDATES.isel(mxharvests=slice(1, mxharvests)).values, axis=mxharvests_dim
)
>= 1
)
- if N_extra_harv > 0:
- error(logger, f"{N_extra_harv} patches have >1 harvest.")
+ if n_extra_harv > 0:
+ error(logger, f"{n_extra_harv} patches have >1 harvest.")
# Make sure harvest happened the day before sowing
sdates_clm = dates_incl_ds.SDATES.values.squeeze()
@@ -432,13 +478,13 @@ def import_and_process_1yr(
if mxmats and (imported_sdates or imported_hdates):
print(" Limiting growing season length...")
hdates_rx = hdates_rx_orig.copy()
- for v in hdates_rx_orig:
- if v == "time_bounds":
+ for var in hdates_rx_orig:
+ if var == "time_bounds":
continue
# Get max growing season length
vegtype_int = int(
- v.split("_")[1]
+ var.split("_")[1]
) # netCDF variable name v should be something like gs1_17
vegtype_str = utils.ivt_int2str(vegtype_int)
if vegtype_str == "soybean":
@@ -452,42 +498,46 @@ def import_and_process_1yr(
continue
# Get "prescribed" growing season length
- gs_len_rx_da = get_gs_len_da(hdates_rx_orig[v] - sdates_rx[v])
+ gs_len_rx_da = get_gs_len_da(hdates_rx_orig[var] - sdates_rx[var])
not_ok = gs_len_rx_da.values > mxmat
if not np.any(not_ok):
print(f" Not limiting {vegtype_str}: No rx season > {mxmat} days")
continue
- hdates_limited = hdates_rx_orig[v].copy().values
- hdates_limited[np.where(not_ok)] = sdates_rx[v].values[np.where(not_ok)] + mxmat
+ hdates_limited = hdates_rx_orig[var].copy().values
+ hdates_limited[np.where(not_ok)] = sdates_rx[var].values[np.where(not_ok)] + mxmat
hdates_limited[np.where(hdates_limited > 365)] -= 365
if np.any(hdates_limited < 1):
raise RuntimeError("Limited hdates < 1")
- elif np.any(hdates_limited > 365):
+ if np.any(hdates_limited > 365):
raise RuntimeError("Limited hdates > 365")
- hdates_rx[v] = xr.DataArray(
- data=hdates_limited, coords=hdates_rx_orig[v].coords, attrs=hdates_rx_orig[v].attrs
+ hdates_rx[var] = xr.DataArray(
+ data=hdates_limited,
+ coords=hdates_rx_orig[var].coords,
+ attrs=hdates_rx_orig[var].attrs,
)
print(
- f" Limited {vegtype_str} growing season length to {mxmat}. Longest was {int(np.max(gs_len_rx_da.values))}, now {int(np.max(get_gs_len_da(hdates_rx[v] - sdates_rx[v]).values))}."
+ f" Limited {vegtype_str} growing season length to {mxmat}. Longest was "
+ + f"{int(np.max(gs_len_rx_da.values))}, now "
+ + f"{int(np.max(get_gs_len_da(hdates_rx[var] - sdates_rx[var]).values))}."
)
else:
hdates_rx = hdates_rx_orig
- log(logger, f" Importing accumulated GDDs...")
+ log(logger, " Importing accumulated GDDs...")
clm_gdd_var = "GDDACCUM"
- myVars = [clm_gdd_var, "GDDHARV"]
- pattern = os.path.join(indir, f"*h2.{thisYear-1}-01-01*.nc")
+ my_vars = [clm_gdd_var, "GDDHARV"]
+ pattern = os.path.join(indir, f"*h2.{this_year-1}-01-01*.nc")
h2_files = glob.glob(pattern)
if not h2_files:
- pattern = os.path.join(indir, f"*h2.{thisYear-1}-01-01*.nc.base")
+ pattern = os.path.join(indir, f"*h2.{this_year-1}-01-01*.nc.base")
h2_files = glob.glob(pattern)
if not h2_files:
- error(logger, f"No files found matching pattern '*h2.{thisYear-1}-01-01*.nc(.base)'")
- h2_ds = utils.import_ds(
+ error(logger, f"No files found matching pattern '*h2.{this_year-1}-01-01*.nc(.base)'")
+ h2_ds = import_ds(
h2_files,
- myVars=myVars,
- myVegtypes=crops_to_read,
+ my_vars=my_vars,
+ my_vegtypes=crops_to_read,
chunks=chunks,
)
@@ -503,181 +553,209 @@ def import_and_process_1yr(
error(logger, f"All {clm_gdd_var} values are zero!")
# Get standard datetime axis for outputs
- Nyears = yN - y1 + 1
+ n_years = year_n - year_1 + 1
if len(gddaccum_yp_list) == 0:
- lastYear_active_patch_indices_list = [None for vegtype_str in incl_vegtypes_str]
+ last_year_active_patch_indices_list = [None for vegtype_str in incl_vegtypes_str]
gddaccum_yp_list = [None for vegtype_str in incl_vegtypes_str]
if save_figs:
gddharv_yp_list = [None for vegtype_str in incl_vegtypes_str]
incl_vegtype_indices = []
- for v, vegtype_str in enumerate(incl_vegtypes_str):
+ for var, vegtype_str in enumerate(incl_vegtypes_str):
if vegtype_str in skip_crops:
log(logger, f" SKIPPING {vegtype_str}")
continue
vegtype_int = utils.vegtype_str2int(vegtype_str)[0]
- thisCrop_full_patchlist = list(utils.xr_flexsel(h2_ds, vegtype=vegtype_str).patch.values)
+ this_crop_full_patchlist = list(xr_flexsel(h2_ds, vegtype=vegtype_str).patch.values)
# Get time series for each patch of this type
- thisCrop_ds = utils.xr_flexsel(h2_incl_ds, vegtype=vegtype_str)
- thisCrop_gddaccum_da = thisCrop_ds[clm_gdd_var]
+ this_crop_ds = xr_flexsel(h2_incl_ds, vegtype=vegtype_str)
+ this_crop_gddaccum_da = this_crop_ds[clm_gdd_var]
if save_figs:
- thisCrop_gddharv_da = thisCrop_ds["GDDHARV"]
- if not thisCrop_gddaccum_da.size:
+ this_crop_gddharv_da = this_crop_ds["GDDHARV"]
+ if not this_crop_gddaccum_da.size:
continue
log(logger, f" {vegtype_str}...")
- incl_vegtype_indices = incl_vegtype_indices + [v]
+ incl_vegtype_indices = incl_vegtype_indices + [var]
# Get prescribed harvest dates for these patches
- lon_points = thisCrop_ds.patches1d_lon.values
- lat_points = thisCrop_ds.patches1d_lat.values
- thisCrop_hdates_rx = thisCrop_map_to_patches(lon_points, lat_points, hdates_rx, vegtype_int)
+ lon_points = this_crop_ds.patches1d_lon.values
+ lat_points = this_crop_ds.patches1d_lat.values
+ this_crop_hdates_rx = this_crop_map_to_patches(
+ lon_points, lat_points, hdates_rx, vegtype_int
+ )
- if isinstance(gddaccum_yp_list[v], type(None)):
- gddaccum_yp_list[v] = np.full((Nyears + 1, len(thisCrop_full_patchlist)), np.nan)
+ if isinstance(gddaccum_yp_list[var], type(None)):
+ gddaccum_yp_list[var] = np.full((n_years + 1, len(this_crop_full_patchlist)), np.nan)
if save_figs:
- gddharv_yp_list[v] = np.full((Nyears + 1, len(thisCrop_full_patchlist)), np.nan)
+ gddharv_yp_list[var] = np.full((n_years + 1, len(this_crop_full_patchlist)), np.nan)
# Get the accumulated GDDs at each prescribed harvest date
- gddaccum_atharv_p = np.full(thisCrop_hdates_rx.shape, np.nan)
+ gddaccum_atharv_p = np.full(this_crop_hdates_rx.shape, np.nan)
if save_figs:
- gddharv_atharv_p = np.full(thisCrop_hdates_rx.shape, np.nan)
- unique_rx_hdates = np.unique(thisCrop_hdates_rx.values)
+ gddharv_atharv_p = np.full(this_crop_hdates_rx.shape, np.nan)
+ unique_rx_hdates = np.unique(this_crop_hdates_rx.values)
# Build an indexing tuple
patches = []
i_patches = []
i_times = []
- for i, hdate in enumerate(unique_rx_hdates):
- here = np.where(thisCrop_hdates_rx.values == hdate)[0]
- patches += list(thisCrop_gddaccum_da.patch.values[here])
+ for hdate in unique_rx_hdates:
+ here = np.where(this_crop_hdates_rx.values == hdate)[0]
+ patches += list(this_crop_gddaccum_da.patch.values[here])
i_patches += list(here)
i_times += list(np.full((len(here),), int(hdate - 1)))
# Sort back to correct order
if not np.all(
- thisCrop_gddaccum_da.patch.values[:-1] <= thisCrop_gddaccum_da.patch.values[1:]
+ this_crop_gddaccum_da.patch.values[:-1] <= this_crop_gddaccum_da.patch.values[1:]
):
error(logger, "This code depends on DataArray patch list being sorted.")
sortorder = np.argsort(patches)
i_patches = list(np.array(i_patches)[np.array(sortorder)])
i_times = list(np.array(i_times)[np.array(sortorder)])
# Select using the indexing tuple
- gddaccum_atharv_p = thisCrop_gddaccum_da.values[(i_times, i_patches)]
+ gddaccum_atharv_p = this_crop_gddaccum_da.values[(i_times, i_patches)]
if save_figs:
- gddharv_atharv_p = thisCrop_gddharv_da.values[(i_times, i_patches)]
+ gddharv_atharv_p = this_crop_gddharv_da.values[(i_times, i_patches)]
if np.any(np.isnan(gddaccum_atharv_p)):
log(
logger,
- f" ❗ {np.sum(np.isnan(gddaccum_atharv_p))}/{len(gddaccum_atharv_p)} NaN after extracting GDDs accumulated at harvest",
+ f" ❗ {np.sum(np.isnan(gddaccum_atharv_p))}/{len(gddaccum_atharv_p)} "
+ + "NaN after extracting GDDs accumulated at harvest",
)
if save_figs and np.any(np.isnan(gddharv_atharv_p)):
log(
logger,
- f" ❗ {np.sum(np.isnan(gddharv_atharv_p))}/{len(gddharv_atharv_p)} NaN after extracting GDDHARV",
+ f" ❗ {np.sum(np.isnan(gddharv_atharv_p))}/{len(gddharv_atharv_p)} "
+ + "NaN after extracting GDDHARV",
)
# Assign these to growing seasons based on whether gs crossed new year
- thisYear_active_patch_indices = [
- thisCrop_full_patchlist.index(x) for x in thisCrop_ds.patch.values
+ this_year_active_patch_indices = [
+ this_crop_full_patchlist.index(x) for x in this_crop_ds.patch.values
]
- thisCrop_sdates_rx = thisCrop_map_to_patches(lon_points, lat_points, sdates_rx, vegtype_int)
- where_gs_thisyr = np.where(thisCrop_sdates_rx < thisCrop_hdates_rx)[0]
- tmp_gddaccum = np.full(thisCrop_sdates_rx.shape, np.nan)
+ this_crop_sdates_rx = this_crop_map_to_patches(
+ lon_points, lat_points, sdates_rx, vegtype_int
+ )
+ where_gs_thisyr = np.where(this_crop_sdates_rx < this_crop_hdates_rx)[0]
+ tmp_gddaccum = np.full(this_crop_sdates_rx.shape, np.nan)
tmp_gddaccum[where_gs_thisyr] = gddaccum_atharv_p[where_gs_thisyr]
if save_figs:
tmp_gddharv = np.full(tmp_gddaccum.shape, np.nan)
tmp_gddharv[where_gs_thisyr] = gddharv_atharv_p[where_gs_thisyr]
- if y > 0:
- lastYear_active_patch_indices = lastYear_active_patch_indices_list[v]
- where_gs_lastyr = np.where(thisCrop_sdates_rx > thisCrop_hdates_rx)[0]
- active_thisYear_where_gs_lastyr_indices = [
- thisYear_active_patch_indices[x] for x in where_gs_lastyr
+ if year_index > 0:
+ last_year_active_patch_indices = last_year_active_patch_indices_list[var]
+ where_gs_lastyr = np.where(this_crop_sdates_rx > this_crop_hdates_rx)[0]
+ active_this_year_where_gs_lastyr_indices = [
+ this_year_active_patch_indices[x] for x in where_gs_lastyr
]
- if not np.array_equal(lastYear_active_patch_indices, thisYear_active_patch_indices):
+ if not np.array_equal(last_year_active_patch_indices, this_year_active_patch_indices):
if incorrectly_daily:
log(
logger,
- " ❗ This year's active patch indices differ from last year's. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ This year's active patch indices differ from last year's. "
+ + "Allowing because this might just be an artifact of incorrectly daily "
+ + "outputs, BUT RESULTS MUST NOT BE TRUSTED.",
)
else:
error(logger, "This year's active patch indices differ from last year's.")
# Make sure we're not about to overwrite any existing values.
if np.any(
- ~np.isnan(gddaccum_yp_list[v][y - 1, active_thisYear_where_gs_lastyr_indices])
+ ~np.isnan(
+ gddaccum_yp_list[var][year_index - 1, active_this_year_where_gs_lastyr_indices]
+ )
):
if incorrectly_daily:
log(
logger,
- " ❗ Unexpected non-NaN for last season's GDD accumulation. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ Unexpected non-NaN for last season's GDD accumulation. "
+ + "Allowing because this might just be an artifact of incorrectly daily "
+ + "outputs, BUT RESULTS MUST NOT BE TRUSTED.",
)
else:
error(logger, "Unexpected non-NaN for last season's GDD accumulation")
if save_figs and np.any(
- ~np.isnan(gddharv_yp_list[v][y - 1, active_thisYear_where_gs_lastyr_indices])
+ ~np.isnan(
+ gddharv_yp_list[var][year_index - 1, active_this_year_where_gs_lastyr_indices]
+ )
):
if incorrectly_daily:
log(
logger,
- " ❗ Unexpected non-NaN for last season's GDDHARV. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ Unexpected non-NaN for last season's GDDHARV. Allowing "
+ + "because this might just be an artifact of incorrectly daily outputs, "
+ + "BUT RESULTS MUST NOT BE TRUSTED.",
)
else:
error(logger, "Unexpected non-NaN for last season's GDDHARV")
# Fill.
- gddaccum_yp_list[v][y - 1, active_thisYear_where_gs_lastyr_indices] = gddaccum_atharv_p[
- where_gs_lastyr
- ]
+ gddaccum_yp_list[var][
+ year_index - 1, active_this_year_where_gs_lastyr_indices
+ ] = gddaccum_atharv_p[where_gs_lastyr]
if save_figs:
- gddharv_yp_list[v][
- y - 1, active_thisYear_where_gs_lastyr_indices
+ gddharv_yp_list[var][
+ year_index - 1, active_this_year_where_gs_lastyr_indices
] = gddharv_atharv_p[where_gs_lastyr]
# Last year's season should be filled out now; make sure.
if np.any(
- np.isnan(gddaccum_yp_list[v][y - 1, active_thisYear_where_gs_lastyr_indices])
+ np.isnan(
+ gddaccum_yp_list[var][year_index - 1, active_this_year_where_gs_lastyr_indices]
+ )
):
if incorrectly_daily:
log(
logger,
- " ❗ Unexpected NaN for last season's GDD accumulation. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ Unexpected NaN for last season's GDD accumulation. Allowing "
+ + "because this might just be an artifact of incorrectly daily outputs, "
+ + "BUT RESULTS MUST NOT BE TRUSTED.",
)
else:
error(logger, "Unexpected NaN for last season's GDD accumulation.")
if save_figs and np.any(
- np.isnan(gddharv_yp_list[v][y - 1, active_thisYear_where_gs_lastyr_indices])
+ np.isnan(
+ gddharv_yp_list[var][year_index - 1, active_this_year_where_gs_lastyr_indices]
+ )
):
if incorrectly_daily:
log(
logger,
- " ❗ Unexpected NaN for last season's GDDHARV. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ Unexpected NaN for last season's GDDHARV. Allowing because "
+ + "this might just be an artifact of incorrectly daily outputs, BUT "
+ + "RESULTS MUST NOT BE TRUSTED.",
)
else:
error(logger, "Unexpected NaN for last season's GDDHARV.")
- gddaccum_yp_list[v][y, thisYear_active_patch_indices] = tmp_gddaccum
+ gddaccum_yp_list[var][year_index, this_year_active_patch_indices] = tmp_gddaccum
if save_figs:
- gddharv_yp_list[v][y, thisYear_active_patch_indices] = tmp_gddharv
+ gddharv_yp_list[var][year_index, this_year_active_patch_indices] = tmp_gddharv
- # Make sure that NaN masks are the same for this year's sdates and 'filled-out' GDDs from last year
- if y > 0:
+ # Make sure that NaN masks are the same for this year's sdates and 'filled-out' GDDs from
+ # last year
+ if year_index > 0:
nanmask_output_sdates = np.isnan(
dates_ds.SDATES.isel(
mxsowings=0, patch=np.where(dates_ds.patches1d_itype_veg_str == vegtype_str)[0]
).values
)
- nanmask_output_gdds_lastyr = np.isnan(gddaccum_yp_list[v][y - 1, :])
+ nanmask_output_gdds_lastyr = np.isnan(gddaccum_yp_list[var][year_index - 1, :])
if not np.array_equal(nanmask_output_gdds_lastyr, nanmask_output_sdates):
if incorrectly_daily:
log(
logger,
- " ❗ NaN masks differ between this year's sdates and 'filled-out' GDDs from last year. Allowing because this might just be an artifact of incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
+ " ❗ NaN masks differ between this year's sdates and 'filled-out' "
+ + "GDDs from last year. Allowing because this might just be an artifact of "
+ + "incorrectly daily outputs, BUT RESULTS MUST NOT BE TRUSTED.",
)
else:
error(
logger,
- "NaN masks differ between this year's sdates and 'filled-out' GDDs from last year",
+ "NaN masks differ between this year's sdates and 'filled-out' GDDs from "
+ + "last year",
)
- lastYear_active_patch_indices_list[v] = thisYear_active_patch_indices
+ last_year_active_patch_indices_list[var] = this_year_active_patch_indices
- skip_patches_for_isel_nan_lastyear = skip_patches_for_isel_nan
+ skip_patches_for_isel_nan_last_year = skip_patches_for_isel_nan
# Could save space by only saving variables needed for gridding
log(logger, " Saving h2_ds...")
@@ -689,8 +767,8 @@ def import_and_process_1yr(
hdates_rx,
gddaccum_yp_list,
gddharv_yp_list,
- skip_patches_for_isel_nan_lastyear,
- lastYear_active_patch_indices_list,
+ skip_patches_for_isel_nan_last_year,
+ last_year_active_patch_indices_list,
incorrectly_daily,
incl_vegtypes_str,
incl_patches1d_itype_veg,
@@ -698,35 +776,37 @@ def import_and_process_1yr(
)
-def get_multicrop_maps(ds, theseVars, crop_fracs_yx, dummy_fill, gdd_units):
+def get_multicrop_maps(this_ds, these_vars, crop_fracs_yx, dummy_fill, gdd_units):
+ # pylint: disable=missing-function-docstring
# Get GDDs for these crops
- da_eachCFT = xr.concat((ds[x] for i, x in enumerate(theseVars)), dim="cft")
- if "time" in ds.dims:
- da_eachCFT = da_eachCFT.isel(time=0, drop=True)
- da_eachCFT = da_eachCFT.where(da_eachCFT != dummy_fill)
- da_eachCFT.attrs["units"] = gdd_units
+ da_each_cft = xr.concat((this_ds[x] for i, x in enumerate(these_vars)), dim="cft")
+ if "time" in this_ds.dims:
+ da_each_cft = da_each_cft.isel(time=0, drop=True)
+ da_each_cft = da_each_cft.where(da_each_cft != dummy_fill)
+ da_each_cft.attrs["units"] = gdd_units
# What are the maximum differences seen between different crop types?
- if len(theseVars) > 1:
- maxDiff = np.nanmax(da_eachCFT.max(dim="cft") - da_eachCFT.min(dim="cft"))
- if maxDiff > 0:
- print(f" Max difference among crop types: {np.round(maxDiff)}")
+ if len(these_vars) > 1:
+ max_diff = np.nanmax(da_each_cft.max(dim="cft") - da_each_cft.min(dim="cft"))
+ if max_diff > 0:
+ print(f" Max difference among crop types: {np.round(max_diff)}")
if crop_fracs_yx is None:
- return da_eachCFT.isel(cft=0, drop=True)
+ return da_each_cft.isel(cft=0, drop=True)
# Warn if GDD is NaN anywhere that there is area
- da_eachCFT["cft"] = crop_fracs_yx["cft"]
- gddNaN_areaPos = np.isnan(da_eachCFT) & (crop_fracs_yx > 0)
- if np.any(gddNaN_areaPos):
- total_bad_croparea = np.nansum(crop_fracs_yx.where(gddNaN_areaPos).values)
+ da_each_cft["cft"] = crop_fracs_yx["cft"]
+ gdd_nan_area_pos = np.isnan(da_each_cft) & (crop_fracs_yx > 0)
+ if np.any(gdd_nan_area_pos):
+ total_bad_croparea = np.nansum(crop_fracs_yx.where(gdd_nan_area_pos).values)
total_croparea = np.nansum(crop_fracs_yx.values)
print(
- f" GDD reqt NaN but area positive ({np.round(total_bad_croparea/total_croparea*100, 1)}% of this crop's area)"
+ " GDD reqt NaN but area positive "
+ + f"({np.round(total_bad_croparea/total_croparea*100, 1)}% of this crop's area)"
)
# Get areas and weights, masking cell-crops with NaN GDDs
- crop_fracs_yx = crop_fracs_yx.where(~np.isnan(da_eachCFT))
+ crop_fracs_yx = crop_fracs_yx.where(~np.isnan(da_each_cft))
crop_area_yx = crop_fracs_yx.sum(dim="cft")
weights_yx = crop_fracs_yx / crop_area_yx
weights_sum_gt0 = weights_yx.sum(dim="cft").where(weights_yx > 0)
@@ -734,45 +814,48 @@ def get_multicrop_maps(ds, theseVars, crop_fracs_yx, dummy_fill, gdd_units):
assert np.isclose(np.nanmax(weights_sum_gt0.values), 1.0)
# Mask GDDs and weights where there is no area
- da_eachCFT = da_eachCFT.where(crop_fracs_yx > 0)
- if len(theseVars) == 1:
- return da_eachCFT.isel(cft=0, drop=True)
+ da_each_cft = da_each_cft.where(crop_fracs_yx > 0)
+ if len(these_vars) == 1:
+ return da_each_cft.isel(cft=0, drop=True)
weights_yx = weights_yx.where(crop_fracs_yx > 0)
weights_sum = weights_yx.sum(dim="cft").where(crop_area_yx > 0)
assert np.isclose(np.nanmin(weights_sum.values), 1.0)
assert np.isclose(np.nanmax(weights_sum.values), 1.0)
# Ensure grid match between GDDs and weights
- if not np.array_equal(da_eachCFT["lon"].values, weights_yx["lon"].values):
+ if not np.array_equal(da_each_cft["lon"].values, weights_yx["lon"].values):
raise RuntimeError("lon mismatch")
- if not np.array_equal(da_eachCFT["lat"].values, weights_yx["lat"].values):
+ if not np.array_equal(da_each_cft["lat"].values, weights_yx["lat"].values):
raise RuntimeError("lat mismatch")
# Get area-weighted mean GDD requirements for all crops
- da = (da_eachCFT * weights_yx).sum(dim="cft")
- da.attrs["units"] = gdd_units
- da = da.where(crop_area_yx > 0)
+ this_da = (da_each_cft * weights_yx).sum(dim="cft")
+ this_da.attrs["units"] = gdd_units
+ this_da = this_da.where(crop_area_yx > 0)
# Ensure that weighted mean is between each cell's min and max
- whereBad = (da < da_eachCFT.min(dim="cft")) | (da > da_eachCFT.max(dim="cft"))
- if np.any(whereBad):
- where_belowMin = da.where(da < da_eachCFT.min(dim="cft"))
- worst_belowMin = np.min((da_eachCFT.min(dim="cft") - where_belowMin).values)
- where_aboveMax = da.where(da > da_eachCFT.max(dim="cft"))
- worst_aboveMax = np.max((where_aboveMax - da_eachCFT.max(dim="cft")).values)
- worst = max(worst_belowMin, worst_aboveMax)
+ where_bad = (this_da < da_each_cft.min(dim="cft")) | (this_da > da_each_cft.max(dim="cft"))
+ if np.any(where_bad):
+ where_below_min = this_da.where(this_da < da_each_cft.min(dim="cft"))
+ worst_below_min = np.min((da_each_cft.min(dim="cft") - where_below_min).values)
+ where_above_max = this_da.where(this_da > da_each_cft.max(dim="cft"))
+ worst_above_max = np.max((where_above_max - da_each_cft.max(dim="cft")).values)
+ worst = max(worst_below_min, worst_above_max)
tol = 1e-12
if worst > 1e-12:
raise RuntimeError(
f"Some value is outside expected range by {worst} (exceeds tolerance {tol})"
)
- return da
+ return this_da
-if can_plot:
+if CAN_PLOT:
def get_bounds_ncolors(gdd_spacing, diff_map_yx):
+ """
+ Get information about color bar
+ """
vmax = np.floor(np.nanmax(diff_map_yx.values) / gdd_spacing) * gdd_spacing
vmin = -vmax
epsilon = np.nextafter(0, 1)
@@ -781,11 +864,11 @@ def get_bounds_ncolors(gdd_spacing, diff_map_yx):
bounds.remove(0)
bounds[bounds.index(-gdd_spacing)] /= 2
bounds[bounds.index(gdd_spacing)] /= 2
- Ncolors = len(bounds) + 1
- return vmax, bounds, Ncolors
+ n_colors = len(bounds) + 1
+ return vmax, bounds, n_colors
- def make_map(
- ax,
+ def make_gengdd_map(
+ this_axis,
this_map,
this_title,
vmax,
@@ -798,11 +881,14 @@ def make_map(
cbar_ticks=None,
vmin=None,
):
+ """
+ Make maps
+ """
if bounds:
if not cmap:
raise RuntimeError("Calling make_map() with bounds requires cmap to be specified")
norm = mcolors.BoundaryNorm(bounds, cmap.N, extend=extend)
- im1 = ax.pcolormesh(
+ im1 = this_axis.pcolormesh(
this_map.lon.values,
this_map.lat.values,
this_map,
@@ -817,11 +903,11 @@ def make_map(
if vmin is not None:
raise RuntimeError("Do not specify vmin in this call of make_map()")
vmin = -vmax
- Ncolors = vmax / gdd_spacing
- if Ncolors % 2 == 0:
- Ncolors += 1
+ n_colors = vmax / gdd_spacing
+ if n_colors % 2 == 0:
+ n_colors += 1
if not cmap:
- cmap = cm.get_cmap(cropcal_colors["div_other_nonnorm"], Ncolors)
+ cmap = cm.get_cmap(cropcal_colors["div_other_nonnorm"], n_colors)
if np.any(this_map.values > vmax) and np.any(this_map.values < vmin):
extend = "both"
@@ -838,15 +924,15 @@ def make_map(
else:
vmin = np.floor(vmin / 500) * 500
vmax = np.floor(vmax / 500) * 500
- Ncolors = int(vmax / 500)
+ n_colors = int(vmax / 500)
if not cmap:
- cmap = cm.get_cmap(cropcal_colors["seq_other"], Ncolors + 1)
+ cmap = cm.get_cmap(cropcal_colors["seq_other"], n_colors + 1)
extend = "max"
extend_color = cmap.colors[-1]
- cmap = mcolors.ListedColormap(cmap.colors[:Ncolors])
+ cmap = mcolors.ListedColormap(cmap.colors[:n_colors])
cmap.set_over(extend_color)
- im1 = ax.pcolormesh(
+ im1 = this_axis.pcolormesh(
this_map.lon.values,
this_map.lat.values,
this_map,
@@ -856,9 +942,9 @@ def make_map(
cmap=cmap,
)
- ax.set_extent([-180, 180, -63, 90], crs=ccrs.PlateCarree())
- ax.coastlines(linewidth=0.3)
- ax.set_title(this_title, fontsize=fontsize_titles, fontweight="bold", y=0.96)
+ this_axis.set_extent([-180, 180, -63, 90], crs=ccrs.PlateCarree())
+ this_axis.coastlines(linewidth=0.3)
+ this_axis.set_title(this_title, fontsize=fontsize_titles, fontweight="bold", y=0.96)
cbar = plt.colorbar(
im1,
orientation="horizontal",
@@ -876,24 +962,30 @@ def make_map(
ticks = np.arange(-60, 91, bin_width)
ticklabels = [str(x) for x in ticks]
- for i, x in enumerate(ticks):
- if x % 2:
+ for i, tick in enumerate(ticks):
+ if tick % 2:
ticklabels[i] = ""
plt.yticks(np.arange(-60, 91, 15), labels=ticklabels, fontsize=fontsize_ticklabels)
plt.axis("off")
- def get_non_nans(in_da, fillValue):
- in_da = in_da.where(in_da != fillValue)
+ def get_non_nans(in_da, fill_value):
+ """
+ Get non-NaN, non-fill values of a DataArray
+ """
+ in_da = in_da.where(in_da != fill_value)
return in_da.values[~np.isnan(in_da.values)]
- def set_boxplot_props(bp, color, linewidth):
+ def set_boxplot_props(bpl, color, linewidth):
+ """
+ Set boxplot properties
+ """
linewidth = 1.5
- plt.setp(bp["boxes"], color=color, linewidth=linewidth)
- plt.setp(bp["whiskers"], color=color, linewidth=linewidth)
- plt.setp(bp["caps"], color=color, linewidth=linewidth)
- plt.setp(bp["medians"], color=color, linewidth=linewidth)
+ plt.setp(bpl["boxes"], color=color, linewidth=linewidth)
+ plt.setp(bpl["whiskers"], color=color, linewidth=linewidth)
+ plt.setp(bpl["caps"], color=color, linewidth=linewidth)
+ plt.setp(bpl["medians"], color=color, linewidth=linewidth)
plt.setp(
- bp["fliers"],
+ bpl["fliers"],
markeredgecolor=color,
markersize=6,
linewidth=linewidth,
@@ -901,16 +993,19 @@ def set_boxplot_props(bp, color, linewidth):
)
def make_plot(data, offset, linewidth):
+ """
+ Make boxplot
+ """
offset = 0.4 * offset
bpl = plt.boxplot(
data,
positions=np.array(range(len(data))) * 2.0 + offset,
widths=0.6,
- boxprops=dict(linewidth=linewidth),
- whiskerprops=dict(linewidth=linewidth),
- capprops=dict(linewidth=linewidth),
- medianprops=dict(linewidth=linewidth),
- flierprops=dict(markeredgewidth=0.5),
+ boxprops={"linewidth": linewidth},
+ whiskerprops={"linewidth": linewidth},
+ capprops={"linewidth": linewidth},
+ medianprops={"linewidth": linewidth},
+ flierprops={"markeredgewidth": 0.5},
)
return bpl
@@ -921,26 +1016,31 @@ def make_figures(
run1_name,
run2_name,
logger,
- thisDir=None,
+ this_dir=None,
gdd_maps_ds=None,
gddharv_maps_ds=None,
outdir_figs=None,
linewidth=1.5,
):
+ """
+ Make map-and-boxplot figures
+ """
if not gdd_maps_ds:
- if not thisDir:
+ if not this_dir:
error(
logger,
- "If not providing gdd_maps_ds, you must provide thisDir (location of gdd_maps.nc)",
+ "If not providing gdd_maps_ds, you must provide thisDir (location of "
+ + "gdd_maps.nc)",
)
- gdd_maps_ds = xr.open_dataset(thisDir + "gdd_maps.nc")
+ gdd_maps_ds = xr.open_dataset(this_dir + "gdd_maps.nc")
if not gddharv_maps_ds:
- if not thisDir:
+ if not this_dir:
error(
logger,
- "If not providing gddharv_maps_ds, you must provide thisDir (location of gddharv_maps.nc)",
+ "If not providing gddharv_maps_ds, you must provide thisDir (location of "
+ + "gddharv_maps.nc)",
)
- gddharv_maps_ds = xr.open_dataset(thisDir + "gdd_maps.nc")
+ gddharv_maps_ds = xr.open_dataset(this_dir + "gdd_maps.nc")
# Get info
incl_vegtypes_str = gdd_maps_ds.attrs["incl_vegtypes_str"]
@@ -952,19 +1052,19 @@ def make_figures(
if not outdir_figs:
outdir_figs = gdd_maps_ds.attrs["outdir_figs"]
try:
- y1 = gdd_maps_ds.attrs["y1"]
- yN = gdd_maps_ds.attrs["yN"]
+ year_1 = gdd_maps_ds.attrs["y1"]
+ year_n = gdd_maps_ds.attrs["yN"]
# Backwards compatibility with a bug (fixed 2023-01-03)
- except:
- y1 = gdd_maps_ds.attrs["first_season"]
- yN = gdd_maps_ds.attrs["last_season"]
+ except KeyError:
+ year_1 = gdd_maps_ds.attrs["first_season"]
+ year_n = gdd_maps_ds.attrs["last_season"]
# Import LU data, if doing so
if land_use_file:
- y1_lu = y1 if first_land_use_year == None else first_land_use_year
- yN_lu = yN if last_land_use_year == None else last_land_use_year
- lu_ds = cc.open_lu_ds(land_use_file, y1_lu, yN_lu, gdd_maps_ds, ungrid=False)
- lu_years_text = f" (masked by {y1_lu}-{yN_lu} area)"
- lu_years_file = f"_mask{y1_lu}-{yN_lu}"
+ year_1_lu = year_1 if first_land_use_year is None else first_land_use_year
+ year_n_lu = year_n if last_land_use_year is None else last_land_use_year
+ lu_ds = cc.open_lu_ds(land_use_file, year_1_lu, year_n_lu, gdd_maps_ds, ungrid=False)
+ lu_years_text = f" (masked by {year_1_lu}-{year_n_lu} area)"
+ lu_years_file = f"_mask{year_1_lu}-{year_n_lu}"
else:
lu_ds = None
lu_years_text = ""
@@ -980,11 +1080,11 @@ def make_figures(
fontsize_axislabels = 12
fontsize_ticklabels = 12
- Nbins = len(lat_bin_edges) - 1
+ n_bins = len(lat_bin_edges) - 1
bin_names = ["All"]
- for b in np.arange(Nbins):
- lower = lat_bin_edges[b]
- upper = lat_bin_edges[b + 1]
+ for this_bin in np.arange(n_bins):
+ lower = lat_bin_edges[this_bin]
+ upper = lat_bin_edges[this_bin + 1]
bin_names.append(f"{lower}–{upper}")
color_old = cropcal_colors_cases(run1_name)
@@ -996,13 +1096,13 @@ def make_figures(
gdd_units = "GDD (°C • day)"
# Maps
- ny = 3
- nx = 1
+ nplot_y = 3
+ nplot_x = 1
log(logger, "Making before/after maps...")
vegtype_list = incl_vegtypes_str
if land_use_file:
vegtype_list += ["Corn", "Cotton", "Rice", "Soybean", "Sugarcane", "Wheat"]
- for v, vegtype_str in enumerate(vegtype_list):
+ for vegtype_str in vegtype_list:
print(f"{vegtype_str}...")
# Get component types
@@ -1025,12 +1125,12 @@ def make_figures(
else:
crop_fracs_yx = None
- theseVars = [f"gdd1_{x}" for x in vegtypes_int]
+ these_vars = [f"gdd1_{x}" for x in vegtypes_int]
gddharv_map_yx = get_multicrop_maps(
- gddharv_maps_ds, theseVars, crop_fracs_yx, dummy_fill, gdd_units
+ gddharv_maps_ds, these_vars, crop_fracs_yx, dummy_fill, gdd_units
)
gdd_map_yx = get_multicrop_maps(
- gdd_maps_ds, theseVars, crop_fracs_yx, dummy_fill, gdd_units
+ gdd_maps_ds, these_vars, crop_fracs_yx, dummy_fill, gdd_units
)
# Get figure title
@@ -1048,25 +1148,25 @@ def make_figures(
# Set up figure and first subplot
if layout == "3x1":
fig = plt.figure(figsize=(7.5, 14))
- ax = fig.add_subplot(ny, nx, 1, projection=ccrs.PlateCarree())
+ this_axis = fig.add_subplot(nplot_y, nplot_x, 1, projection=ccrs.PlateCarree())
elif layout == "2x2":
fig = plt.figure(figsize=(12, 6))
spec = fig.add_gridspec(nrows=2, ncols=2, width_ratios=[0.4, 0.6])
- ax = fig.add_subplot(spec[0, 0], projection=ccrs.PlateCarree())
+ this_axis = fig.add_subplot(spec[0, 0], projection=ccrs.PlateCarree())
elif layout == "3x2":
fig = plt.figure(figsize=(14, 9))
spec = fig.add_gridspec(nrows=3, ncols=2, width_ratios=[0.5, 0.5], wspace=0.2)
- ax = fig.add_subplot(spec[0, 0], projection=ccrs.PlateCarree())
+ this_axis = fig.add_subplot(spec[0, 0], projection=ccrs.PlateCarree())
else:
error(logger, f"layout {layout} not recognized")
- thisMin = int(np.round(np.nanmin(gddharv_map_yx)))
- thisMax = int(np.round(np.nanmax(gddharv_map_yx)))
- thisTitle = f"{run1_name} (range {thisMin}–{thisMax})"
- make_map(
- ax,
+ this_min = int(np.round(np.nanmin(gddharv_map_yx)))
+ this_max = int(np.round(np.nanmax(gddharv_map_yx)))
+ this_title = f"{run1_name} (range {this_min}–{this_max})"
+ make_gengdd_map(
+ this_axis,
gddharv_map_yx,
- thisTitle,
+ this_title,
vmax,
bin_width,
fontsize_ticklabels,
@@ -1075,18 +1175,18 @@ def make_figures(
)
if layout == "3x1":
- ax = fig.add_subplot(ny, nx, 2, projection=ccrs.PlateCarree())
+ this_axis = fig.add_subplot(nplot_y, nplot_x, 2, projection=ccrs.PlateCarree())
elif layout in ["2x2", "3x2"]:
- ax = fig.add_subplot(spec[1, 0], projection=ccrs.PlateCarree())
+ this_axis = fig.add_subplot(spec[1, 0], projection=ccrs.PlateCarree())
else:
error(logger, f"layout {layout} not recognized")
- thisMin = int(np.round(np.nanmin(gdd_map_yx)))
- thisMax = int(np.round(np.nanmax(gdd_map_yx)))
- thisTitle = f"{run2_name} (range {thisMin}–{thisMax})"
- make_map(
- ax,
+ this_min = int(np.round(np.nanmin(gdd_map_yx)))
+ this_max = int(np.round(np.nanmax(gdd_map_yx)))
+ this_title = f"{run2_name} (range {this_min}–{this_max})"
+ make_gengdd_map(
+ this_axis,
gdd_map_yx,
- thisTitle,
+ this_title,
vmax,
bin_width,
fontsize_ticklabels,
@@ -1096,22 +1196,22 @@ def make_figures(
# Difference
if layout == "3x2":
- ax = fig.add_subplot(spec[2, 0], projection=ccrs.PlateCarree())
- thisMin = int(np.round(np.nanmin(gdd_map_yx)))
- thisMax = int(np.round(np.nanmax(gdd_map_yx)))
- thisTitle = f"{run2_name} minus {run1_name}"
+ this_axis = fig.add_subplot(spec[2, 0], projection=ccrs.PlateCarree())
+ this_min = int(np.round(np.nanmin(gdd_map_yx)))
+ this_max = int(np.round(np.nanmax(gdd_map_yx)))
+ this_title = f"{run2_name} minus {run1_name}"
diff_map_yx = gdd_map_yx - gddharv_map_yx
diff_map_yx.attrs["units"] = gdd_units
gdd_spacing = 500
- vmax, bounds, Ncolors = get_bounds_ncolors(gdd_spacing, diff_map_yx)
- if Ncolors < 9:
+ vmax, bounds, n_colors = get_bounds_ncolors(gdd_spacing, diff_map_yx)
+ if n_colors < 9:
gdd_spacing = 250
- vmax, bounds, Ncolors = get_bounds_ncolors(gdd_spacing, diff_map_yx)
+ vmax, bounds, n_colors = get_bounds_ncolors(gdd_spacing, diff_map_yx)
- cmap = cm.get_cmap(cropcal_colors["div_other_nonnorm"], Ncolors)
+ cmap = cm.get_cmap(cropcal_colors["div_other_nonnorm"], n_colors)
cbar_ticks = []
- include_0bin_ticks = Ncolors <= 13
+ include_0bin_ticks = n_colors <= 13
if vmax <= 3000:
tick_spacing = gdd_spacing * 2
elif vmax <= 5000:
@@ -1119,17 +1219,19 @@ def make_figures(
else:
tick_spacing = 2000
previous = -np.inf
- for x in bounds:
- if (not include_0bin_ticks) and (x > 0) and (previous < 0):
+ for bound in bounds:
+ if (not include_0bin_ticks) and (previous < 0 < bound):
cbar_ticks.append(0)
- if x % tick_spacing == 0 or (include_0bin_ticks and abs(x) == gdd_spacing / 2):
- cbar_ticks.append(x)
- previous = x
-
- make_map(
- ax,
+ if bound % tick_spacing == 0 or (
+ include_0bin_ticks and abs(bound) == gdd_spacing / 2
+ ):
+ cbar_ticks.append(bound)
+ previous = bound
+
+ make_gengdd_map(
+ this_axis,
diff_map_yx,
- thisTitle,
+ this_title,
vmax,
bin_width,
fontsize_ticklabels,
@@ -1148,25 +1250,25 @@ def make_figures(
lat_abs = np.abs(gdd_map_yx.lat.values)
gdd_bybin_old = [gddharv_vector]
gdd_bybin_new = [gdd_vector]
- for b in np.arange(Nbins):
- lower = lat_bin_edges[b]
- upper = lat_bin_edges[b + 1]
+ for this_bin in np.arange(n_bins):
+ lower = lat_bin_edges[this_bin]
+ upper = lat_bin_edges[this_bin + 1]
lat_inds = np.where((lat_abs >= lower) & (lat_abs < upper))[0]
- gdd_vector_thisBin = get_non_nans(gdd_map_yx[lat_inds, :], dummy_fill)
- gddharv_vector_thisBin = get_non_nans(gddharv_map_yx[lat_inds, :], dummy_fill)
- gdd_bybin_old.append(gddharv_vector_thisBin)
- gdd_bybin_new.append(gdd_vector_thisBin)
+ this_bin_gdd_vector = get_non_nans(gdd_map_yx[lat_inds, :], dummy_fill)
+ this_bin_gddharv_vector = get_non_nans(gddharv_map_yx[lat_inds, :], dummy_fill)
+ gdd_bybin_old.append(this_bin_gddharv_vector)
+ gdd_bybin_new.append(this_bin_gdd_vector)
if layout == "3x1":
- ax = fig.add_subplot(ny, nx, 3)
+ this_axis = fig.add_subplot(nplot_y, nplot_x, 3)
elif layout in ["2x2", "3x2"]:
- ax = fig.add_subplot(spec[:, 1])
+ this_axis = fig.add_subplot(spec[:, 1])
else:
error(logger, f"layout {layout} not recognized")
# Shift bottom of plot up to make room for legend
- ax_pos = ax.get_position()
- ax.set_position(Bbox.from_extents(ax_pos.x0, 0.19, ax_pos.x1, ax_pos.y1))
+ ax_pos = this_axis.get_position()
+ this_axis.set_position(Bbox.from_extents(ax_pos.x0, 0.19, ax_pos.x1, ax_pos.y1))
# Define legend position
legend_bbox_to_anchor = (0, -0.15, 1, 0.2)
@@ -1188,13 +1290,13 @@ def make_figures(
plt.xticks(range(0, len(bin_names) * 2, 2), bin_names, fontsize=fontsize_ticklabels)
plt.yticks(fontsize=fontsize_ticklabels)
- ax.spines["right"].set_visible(False)
- ax.spines["top"].set_visible(False)
+ this_axis.spines["right"].set_visible(False)
+ this_axis.spines["top"].set_visible(False)
plt.xlabel("Latitude zone (absolute value)", fontsize=fontsize_axislabels)
plt.ylabel(gdd_units, fontsize=fontsize_axislabels)
- ax.yaxis.set_label_coords(-0.11, 0.5)
- plt.title(f"Zonal changes", fontsize=fontsize_titles, fontweight="bold")
+ this_axis.yaxis.set_label_coords(-0.11, 0.5)
+ plt.title("Zonal changes", fontsize=fontsize_titles, fontweight="bold")
plt.suptitle(
f"Maturity requirements: {vegtype_str_title}" + lu_years_text,
@@ -1205,10 +1307,13 @@ def make_figures(
if vegtype_str in incl_vegtypes_str:
outfile = os.path.join(
- outdir_figs, f"{theseVars[0]}_{vegtype_str}_gs{y1}-{yN}{lu_years_file}.png"
+ outdir_figs,
+ f"{these_vars[0]}_{vegtype_str}_gs{year_1}-{year_n}{lu_years_file}.png",
)
else:
- outfile = os.path.join(outdir_figs, f"{vegtype_str}_gs{y1}-{yN}{lu_years_file}.png")
+ outfile = os.path.join(
+ outdir_figs, f"{vegtype_str}_gs{year_1}-{year_n}{lu_years_file}.png"
+ )
plt.savefig(outfile, dpi=300, transparent=False, facecolor="white", bbox_inches="tight")
plt.close()
diff --git a/python/ctsm/crop_calendars/grid_one_variable.py b/python/ctsm/crop_calendars/grid_one_variable.py
new file mode 100644
index 0000000000..cb5d330032
--- /dev/null
+++ b/python/ctsm/crop_calendars/grid_one_variable.py
@@ -0,0 +1,179 @@
+"""
+Make a geographically gridded DataArray (with dimensions time, vegetation type [as string], lat,
+lon) of one variable within a Dataset.
+
+- Optional keyword arguments will be passed to xr_flexsel() to select single steps or slices
+ along the specified ax(ie)s.
+- fill_value: Default None means grid will be filled with NaN, unless the variable in question
+ already has a _FillValue, in which case that will be used.
+"""
+import numpy as np
+import xarray as xr
+from ctsm.crop_calendars.xr_flexsel import xr_flexsel
+
+
+def get_thisvar_da(var, this_ds):
+ """
+ Return a DataArray, with defined coordinates, for a given variable in a dataset.
+ """
+ # Make DataArray for this variable
+ thisvar_da = np.array(this_ds.variables[var])
+ these_dims = this_ds.variables[var].dims
+ thisvar_da = xr.DataArray(thisvar_da, dims=these_dims)
+
+ # Define coordinates of this variable's DataArray
+ dims_dict = dict()
+ for dim in these_dims:
+ dims_dict[dim] = this_ds[dim]
+ thisvar_da = thisvar_da.assign_coords(dims_dict)
+ thisvar_da.attrs = this_ds[var].attrs
+
+ return thisvar_da
+
+
+def convert_to_da(this_ds, var, fill_value, thisvar_da, new_dims, thisvar_gridded):
+ """
+ Convert Numpy array to DataArray with coordinates, attributes and name
+ """
+ thisvar_gridded = xr.DataArray(thisvar_gridded, dims=tuple(new_dims), attrs=thisvar_da.attrs)
+ for dim in new_dims:
+ if dim == "ivt_str":
+ values = this_ds.vegtype_str.values
+ elif dim in thisvar_da.coords:
+ values = thisvar_da[dim]
+ else:
+ values = this_ds[dim].values
+ thisvar_gridded = thisvar_gridded.assign_coords({dim: values})
+ thisvar_gridded.name = var
+
+ # Add FillValue attribute
+ if fill_value:
+ thisvar_gridded.attrs["_FillValue"] = fill_value
+ return thisvar_gridded
+
+
+def grid_the_data(thisvar_da, vt_da, ixy_da, jxy_da, new_dims, thisvar_gridded):
+ """
+ Fill lat-lon array with previously-ungridded data
+ """
+ fill_indices = []
+ for dim in new_dims:
+ if dim == "lat":
+ fill_indices.append(jxy_da.values.astype(int) - 1)
+ elif dim == "lon":
+ fill_indices.append(ixy_da.values.astype(int) - 1)
+ elif dim == "ivt_str":
+ fill_indices.append(vt_da)
+ elif not fill_indices:
+ # I.e., if fill_indices is empty. Could also do "elif len(fill_indices)==0".
+ fill_indices.append(Ellipsis)
+ try:
+ thisvar_gridded[tuple(fill_indices[: len(fill_indices)])] = thisvar_da.values
+ except: # pylint: disable=bare-except
+ thisvar_gridded[tuple(fill_indices[: len(fill_indices)])] = thisvar_da.values.transpose()
+ if not np.any(np.bitwise_not(np.isnan(thisvar_gridded))):
+ if np.all(np.isnan(thisvar_da.values)):
+ print("Warning: This DataArray (and thus map) is all NaN")
+ else:
+ raise RuntimeError("thisvar_gridded was not filled!")
+
+
+def create_filled_array(this_ds, fill_value, thisvar_da, new_dims):
+ """
+ Create a Numpy array to be filled with gridded data
+ """
+ dim_size_list = []
+ for dim in new_dims:
+ if dim == "ivt_str":
+ dim_size = this_ds.sizes["ivt"]
+ elif dim in thisvar_da.coords:
+ dim_size = thisvar_da.sizes[dim]
+ else:
+ dim_size = this_ds.sizes[dim]
+ dim_size_list = dim_size_list + [dim_size]
+ thisvar_gridded = np.empty(dim_size_list)
+ if fill_value:
+ thisvar_gridded[:] = fill_value
+ else:
+ thisvar_gridded[:] = np.NaN
+ return thisvar_gridded
+
+
+def get_ixy_jxy_das(this_ds, var):
+ """
+ Get DataArrays needed for gridding
+ """
+ thisvar_da = get_thisvar_da(var, this_ds)
+ vt_da = None
+ if "patch" in thisvar_da.dims:
+ spatial_unit = "patch"
+ xy_1d_prefix = "patches"
+ if "patches1d_itype_veg" in this_ds:
+ vt_da = get_thisvar_da("patches1d_itype_veg", this_ds)
+ elif "gridcell" in thisvar_da.dims:
+ spatial_unit = "gridcell"
+ xy_1d_prefix = "grid"
+ else:
+ raise RuntimeError(
+ f"What variables to use for _ixy and _jxy of variable with dims {thisvar_da.dims}?"
+ )
+ ixy_da = get_thisvar_da(xy_1d_prefix + "1d_ixy", this_ds)
+ jxy_da = get_thisvar_da(xy_1d_prefix + "1d_jxy", this_ds)
+ return thisvar_da, vt_da, spatial_unit, ixy_da, jxy_da
+
+
+def get_new_dim_list(this_ds, thisvar_da, spatial_unit):
+ """
+ Get new dimension list
+ """
+ new_dims = list(thisvar_da.dims)
+ ### Remove "[spatial_unit]".
+ if spatial_unit in new_dims:
+ new_dims.remove(spatial_unit)
+ # Add "ivt_str" (vegetation type, as string). This needs to go at the end, to avoid a possible
+ # situation where you wind up with multiple Ellipsis members of fill_indices.
+ if "ivt" in this_ds and spatial_unit == "patch":
+ new_dims.append("ivt_str")
+ ### Add lat and lon to end of list
+ new_dims = new_dims + ["lat", "lon"]
+ return new_dims
+
+
+def grid_one_variable(this_ds, var, fill_value=None, **kwargs):
+ """
+ Make a geographically gridded DataArray (with dimensions time, vegetation type [as string], lat,
+ lon) of one variable within a Dataset.
+
+ - Optional keyword arguments will be passed to xr_flexsel() to select single steps or slices
+ along the specified ax(ie)s.
+ - fill_value: Default None means grid will be filled with NaN, unless the variable in question
+ already has a _FillValue, in which case that will be used.
+ """
+ # Get this Dataset's values for selection(s), if provided
+ this_ds = xr_flexsel(this_ds, **kwargs)
+
+ # Get DataArrays needed for gridding
+ thisvar_da, vt_da, spatial_unit, ixy_da, jxy_da = get_ixy_jxy_das(this_ds, var)
+
+ if not fill_value and "_FillValue" in thisvar_da.attrs:
+ fill_value = thisvar_da.attrs["_FillValue"]
+
+ # Renumber vt_da to work as indices on new ivt dimension, if needed.
+ ### Ensures that the unique set of vt_da values begins with 1 and
+ ### contains no missing steps.
+ if "ivt" in this_ds and vt_da is not None:
+ vt_da.values = np.array([np.where(this_ds.ivt.values == x)[0][0] for x in vt_da.values])
+
+ # Get new dimension list
+ new_dims = get_new_dim_list(this_ds, thisvar_da, spatial_unit)
+
+ # Create a Numpy array to be filled with gridded data
+ thisvar_gridded = create_filled_array(this_ds, fill_value, thisvar_da, new_dims)
+
+ # Fill lat-lon array with previously-ungridded data
+ grid_the_data(thisvar_da, vt_da, ixy_da, jxy_da, new_dims, thisvar_gridded)
+
+ # Convert Numpy array to DataArray with coordinates, attributes and name
+ thisvar_gridded = convert_to_da(this_ds, var, fill_value, thisvar_da, new_dims, thisvar_gridded)
+
+ return thisvar_gridded
diff --git a/python/ctsm/crop_calendars/import_ds.py b/python/ctsm/crop_calendars/import_ds.py
new file mode 100644
index 0000000000..77a22b626b
--- /dev/null
+++ b/python/ctsm/crop_calendars/import_ds.py
@@ -0,0 +1,267 @@
+"""
+Import a dataset that can be spread over multiple files, only including specified variables
+and/or vegetation types and/or timesteps, concatenating by time.
+
+- DOES actually read the dataset into memory, but only AFTER dropping unwanted variables and/or
+ vegetation types.
+"""
+import re
+import warnings
+from importlib.util import find_spec
+import numpy as np
+import xarray as xr
+import ctsm.crop_calendars.cropcal_utils as utils
+from ctsm.crop_calendars.xr_flexsel import xr_flexsel
+
+
+def compute_derived_vars(ds_in, var):
+ """
+ Compute derived variables
+ """
+ if (
+ var == "HYEARS"
+ and "HDATES" in ds_in
+ and ds_in.HDATES.dims == ("time", "mxharvests", "patch")
+ ):
+ year_list = np.array([np.float32(x.year - 1) for x in ds_in.time.values])
+ hyears = ds_in["HDATES"].copy()
+ hyears.values = np.tile(
+ np.expand_dims(year_list, (1, 2)),
+ (1, ds_in.dims["mxharvests"], ds_in.dims["patch"]),
+ )
+ with np.errstate(invalid="ignore"):
+ is_le_zero = ~np.isnan(ds_in.HDATES.values) & (ds_in.HDATES.values <= 0)
+ hyears.values[is_le_zero] = ds_in.HDATES.values[is_le_zero]
+ hyears.values[np.isnan(ds_in.HDATES.values)] = np.nan
+ hyears.attrs["long_name"] = "DERIVED: actual crop harvest years"
+ hyears.attrs["units"] = "year"
+ ds_in["HYEARS"] = hyears
+ else:
+ raise RuntimeError(f"Unable to compute derived variable {var}")
+ return ds_in
+
+
+def mfdataset_preproc(ds_in, vars_to_import, vegtypes_to_import, time_slice):
+ """
+ Function to drop unwanted variables in preprocessing of open_mfdataset().
+
+ - Makes sure to NOT drop any unspecified variables that will be useful in gridding.
+ - Also adds vegetation type info in the form of a DataArray of strings.
+ - Also renames "pft" dimension (and all like-named variables, e.g., pft1d_itype_veg_str) to be
+ named like "patch". This can later be reversed, for compatibility with other code, using
+ patch2pft().
+ """
+ # Rename "pft" dimension and variables to "patch", if needed
+ if "pft" in ds_in.dims:
+ pattern = re.compile("pft.*1d")
+ matches = [x for x in list(ds_in.keys()) if pattern.search(x) is not None]
+ pft2patch_dict = {"pft": "patch"}
+ for match in matches:
+ pft2patch_dict[match] = match.replace("pft", "patch").replace("patchs", "patches")
+ ds_in = ds_in.rename(pft2patch_dict)
+
+ derived_vars = []
+ if vars_to_import is not None:
+ # Split vars_to_import into variables that are vs. aren't already in ds
+ derived_vars = [v for v in vars_to_import if v not in ds_in]
+ present_vars = [v for v in vars_to_import if v in ds_in]
+ vars_to_import = present_vars
+
+ # Get list of dimensions present in variables in vars_to_import.
+ dim_list = []
+ for var in vars_to_import:
+ # list(set(x)) returns a list of the unique items in x
+ dim_list = list(set(dim_list + list(ds_in.variables[var].dims)))
+
+ # Get any _1d variables that are associated with those dimensions. These will be useful in
+ # gridding. Also, if any dimension is "pft", set up to rename it and all like-named
+ # variables to "patch"
+ oned_vars = []
+ for dim in dim_list:
+ pattern = re.compile(f"{dim}.*1d")
+ matches = [x for x in list(ds_in.keys()) if pattern.search(x) is not None]
+ oned_vars = list(set(oned_vars + matches))
+
+ # Add dimensions and _1d variables to vars_to_import
+ vars_to_import = list(set(vars_to_import + list(ds_in.dims) + oned_vars))
+
+ # Add any _bounds variables
+ bounds_vars = []
+ for var in vars_to_import:
+ bounds_var = var + "_bounds"
+ if bounds_var in ds_in:
+ bounds_vars = bounds_vars + [bounds_var]
+ vars_to_import = vars_to_import + bounds_vars
+
+ # Get list of variables to drop
+ varlist = list(ds_in.variables)
+ vars_to_drop = list(np.setdiff1d(varlist, vars_to_import))
+
+ # Drop them
+ ds_in = ds_in.drop_vars(vars_to_drop)
+
+ # Add vegetation type info
+ if "patches1d_itype_veg" in list(ds_in):
+ this_pftlist = utils.define_pftlist()
+ utils.get_patch_ivts(
+ ds_in, this_pftlist
+ ) # Includes check of whether vegtype changes over time anywhere
+ vegtype_da = utils.get_vegtype_str_da(this_pftlist)
+ patches1d_itype_veg_str = vegtype_da.values[
+ ds_in.isel(time=0).patches1d_itype_veg.values.astype(int)
+ ]
+ npatch = len(patches1d_itype_veg_str)
+ patches1d_itype_veg_str = xr.DataArray(
+ patches1d_itype_veg_str,
+ coords={"patch": np.arange(0, npatch)},
+ dims=["patch"],
+ name="patches1d_itype_veg_str",
+ )
+ ds_in = xr.merge([ds_in, vegtype_da, patches1d_itype_veg_str])
+
+ # Restrict to veg. types of interest, if any
+ if vegtypes_to_import is not None:
+ ds_in = xr_flexsel(ds_in, vegtype=vegtypes_to_import)
+
+ # Restrict to time slice, if any
+ if time_slice:
+ ds_in = utils.safer_timeslice(ds_in, time_slice)
+
+ # Finish import
+ ds_in = xr.decode_cf(ds_in, decode_times=True)
+
+ # Compute derived variables
+ for var in derived_vars:
+ ds_in = compute_derived_vars(ds_in, var)
+
+ return ds_in
+
+
+def process_inputs(filelist, my_vars, my_vegtypes, my_vars_missing_ok):
+ """
+ Process inputs to import_ds()
+ """
+ if my_vars_missing_ok is None:
+ my_vars_missing_ok = []
+ # Convert my_vegtypes here, if needed, to avoid repeating the process each time you read a file
+ # in xr.open_mfdataset().
+ if my_vegtypes is not None:
+ if not isinstance(my_vegtypes, list):
+ my_vegtypes = [my_vegtypes]
+ if isinstance(my_vegtypes[0], str):
+ my_vegtypes = utils.vegtype_str2int(my_vegtypes)
+
+ # Same for these variables.
+ if my_vars is not None:
+ if not isinstance(my_vars, list):
+ my_vars = [my_vars]
+ if my_vars_missing_ok:
+ if not isinstance(my_vars_missing_ok, list):
+ my_vars_missing_ok = [my_vars_missing_ok]
+
+ # Make sure lists are actually lists
+ if not isinstance(filelist, list):
+ filelist = [filelist]
+ if not isinstance(my_vars_missing_ok, list):
+ my_vars_missing_ok = [my_vars_missing_ok]
+ return filelist, my_vars, my_vegtypes, my_vars_missing_ok
+
+
+def import_ds(
+ filelist,
+ my_vars=None,
+ my_vegtypes=None,
+ time_slice=None,
+ my_vars_missing_ok=None,
+ rename_lsmlatlon=False,
+ chunks=None,
+):
+ """
+ Import a dataset that can be spread over multiple files, only including specified variables
+ and/or vegetation types and/or timesteps, concatenating by time.
+
+ - DOES actually read the dataset into memory, but only AFTER dropping unwanted variables and/or
+ vegetation types.
+ """
+ filelist, my_vars, my_vegtypes, my_vars_missing_ok = process_inputs(
+ filelist, my_vars, my_vegtypes, my_vars_missing_ok
+ )
+
+ # Remove files from list if they don't contain requested timesteps.
+ # time_slice should be in the format slice(start,end[,step]). start or end can be None to be
+ # unbounded on one side. Note that the standard slice() documentation suggests that only
+ # elements through end-1 will be selected, but that seems not to be the case in the xarray
+ # implementation.
+ if time_slice:
+ new_filelist = []
+ for file in sorted(filelist):
+ filetime = xr.open_dataset(file).time
+ filetime_sel = utils.safer_timeslice(filetime, time_slice)
+ include_this_file = filetime_sel.size
+ if include_this_file:
+ new_filelist.append(file)
+
+ # If you found some matching files, but then you find one that doesn't, stop going
+ # through the list.
+ elif new_filelist:
+ break
+ if not new_filelist:
+ raise RuntimeError(f"No files found in time_slice {time_slice}")
+ filelist = new_filelist
+
+ # The xarray open_mfdataset() "preprocess" argument requires a function that takes exactly one
+ # variable (an xarray.Dataset object). Wrapping mfdataset_preproc() in this lambda function
+ # allows this. Could also just allow mfdataset_preproc() to access my_vars and my_vegtypes
+ # directly, but that's bad practice as it could lead to scoping issues.
+ mfdataset_preproc_closure = lambda ds: mfdataset_preproc(ds, my_vars, my_vegtypes, time_slice)
+
+ # Import
+ if isinstance(filelist, list) and len(filelist) == 1:
+ filelist = filelist[0]
+ if isinstance(filelist, list):
+ with warnings.catch_warnings():
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning)
+ if find_spec("dask") is None:
+ raise ModuleNotFoundError(
+ "You have asked xarray to import a list of files as a single Dataset using"
+ " open_mfdataset(), but this requires dask, which is not available.\nFile"
+ f" list: {filelist}"
+ )
+ this_ds = xr.open_mfdataset(
+ sorted(filelist),
+ data_vars="minimal",
+ preprocess=mfdataset_preproc_closure,
+ compat="override",
+ coords="all",
+ concat_dim="time",
+ combine="nested",
+ chunks=chunks,
+ )
+ elif isinstance(filelist, str):
+ this_ds = xr.open_dataset(filelist, chunks=chunks)
+ this_ds = mfdataset_preproc(this_ds, my_vars, my_vegtypes, time_slice)
+ this_ds = this_ds.compute()
+
+ # Warn and/or error about variables that couldn't be imported or derived
+ if my_vars:
+ missing_vars = [v for v in my_vars if v not in this_ds]
+ ok_missing_vars = [v for v in missing_vars if v in my_vars_missing_ok]
+ bad_missing_vars = [v for v in missing_vars if v not in my_vars_missing_ok]
+ if ok_missing_vars:
+ print(
+ "Could not import some variables; either not present or not deriveable:"
+ f" {ok_missing_vars}"
+ )
+ if bad_missing_vars:
+ raise RuntimeError(
+ "Could not import some variables; either not present or not deriveable:"
+ f" {bad_missing_vars}"
+ )
+
+ if rename_lsmlatlon:
+ if "lsmlat" in this_ds.dims:
+ this_ds = this_ds.rename({"lsmlat": "lat"})
+ if "lsmlon" in this_ds.dims:
+ this_ds = this_ds.rename({"lsmlon": "lon"})
+
+ return this_ds
diff --git a/python/ctsm/crop_calendars/process_ggcmi_shdates.py b/python/ctsm/crop_calendars/process_ggcmi_shdates.py
index 835f91cb22..cada2b421b 100644
--- a/python/ctsm/crop_calendars/process_ggcmi_shdates.py
+++ b/python/ctsm/crop_calendars/process_ggcmi_shdates.py
@@ -1,16 +1,21 @@
-import numpy as np
-import xarray as xr
-import os
-import datetime as dt
-import cftime
+"""
+Convert GGCMI crop calendar files for use in CTSM
+"""
+
import sys
import argparse
import logging
+import os
+import datetime as dt
+import numpy as np
+import xarray as xr
+import cftime
# -- add python/ctsm to path (needed if we want to run process_ggcmi_shdates stand-alone)
_CTSM_PYTHON = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
sys.path.insert(1, _CTSM_PYTHON)
+# pylint: disable=wrong-import-position
from ctsm import ctsm_logging
import ctsm.crop_calendars.cropcal_utils as utils
import ctsm.crop_calendars.regrid_ggcmi_shdates as regrid
@@ -18,19 +23,28 @@
logger = logging.getLogger(__name__)
-def get_cft(y):
- return cftime.DatetimeNoLeap(y, 1, 1, 0, 0, 0, 0, has_year_zero=True)
+def get_cft(year):
+ """
+ Given a year, return the cftime.DatetimeNoLeap of Jan. 1 at 00:00.
+ """
+ return cftime.DatetimeNoLeap(year, 1, 1, 0, 0, 0, 0, has_year_zero=True)
-def get_dayssince_jan1y1(y1, y):
- cft_y1 = get_cft(y1)
- cft_y = get_cft(y)
+def get_dayssince_jan1y1(year1, year):
+ """
+ Get the number of days since Jan. 1 of year1
+ """
+ cft_y1 = get_cft(year1)
+ cft_y = get_cft(year)
time_delta = cft_y - cft_y1
time_delta_secs = time_delta.total_seconds()
return time_delta_secs / (60 * 60 * 24)
def main():
+ """
+ main() function for calling process_ggcmi_shdates.py from command line.
+ """
ctsm_logging.setup_logging_pre_config()
args = process_ggcmi_shdates_args()
process_ggcmi_shdates(
@@ -40,7 +54,6 @@ def main():
args.file_specifier,
args.first_year,
args.last_year,
- args.verbose,
args.ggcmi_author,
args.regrid_resolution,
args.regrid_template_file,
@@ -50,8 +63,14 @@ def main():
def process_ggcmi_shdates_args():
+ """
+ Set up and parse input arguments for working with GGCMI crop calendar files
+ """
parser = argparse.ArgumentParser(
- description="Converts raw sowing and harvest date files provided by GGCMI into a format that CLM can read, optionally at a target resolution."
+ description=(
+ "Converts raw sowing and harvest date files provided by GGCMI into "
+ + "a format that CLM can read, optionally at a target resolution."
+ )
)
# Required
@@ -72,7 +91,10 @@ def process_ggcmi_shdates_args():
parser.add_argument(
"-a",
"--author",
- help="String to be saved in author_thisfile attribute of output files. E.g., 'Author Name (authorname@ucar.edu)'",
+ help=(
+ "String to be saved in author_thisfile attribute of output files. "
+ + "E.g., 'Author Name (authorname@ucar.edu)'"
+ ),
type=str,
required=True,
)
@@ -80,21 +102,30 @@ def process_ggcmi_shdates_args():
# Optional
parser.add_argument(
"--file-specifier",
- help="String following CROP_IRR_ in input filenames. E.g., mai_ir_FILESPECIFIER.nc4. Will also be saved to output filenames.",
+ help=(
+ "String following CROP_IRR_ in input filenames. E.g., mai_ir_FILESPECIFIER.nc4. "
+ + "Will also be saved to output filenames."
+ ),
type=str,
default="ggcmi_crop_calendar_phase3_v1.01",
)
parser.add_argument(
"-y1",
"--first-year",
- help="First year in output files. Must be present in template file, unless it's the same as the last year.",
+ help=(
+ "First year in output files. Must be present in template file, "
+ + "unless it's the same as the last year."
+ ),
type=int,
default=2000,
)
parser.add_argument(
"-yN",
"--last-year",
- help="Last year in output files. Must be present in template file, unless it's the same as the first year.",
+ help=(
+ "Last year in output files. Must be present in template file, "
+ + "unless it's the same as the first year."
+ ),
type=int,
default=2000,
)
@@ -117,53 +148,19 @@ def process_ggcmi_shdates_args():
return args
-def process_ggcmi_shdates(
- input_directory,
- output_directory,
- author,
- file_specifier,
- first_year,
- last_year,
- verbose,
- ggcmi_author,
- regrid_resolution,
- regrid_template_file,
- regrid_extension,
- crop_list,
-):
-
- input_directory = os.path.realpath(input_directory)
- output_directory = os.path.realpath(output_directory)
-
- ############################################################
- ### Regrid original GGCMI files to target CLM resolution ###
- ############################################################
-
- regridded_ggcmi_files_dir = os.path.join(
- output_directory, f"regridded_ggcmi_files-{regrid_resolution}"
- )
+def setup_crop_dict():
+ """
+ Associate CLM crop names with (1) their integer counterpart and (2) their GGCMI counterpart.
- regrid.regrid_ggcmi_shdates(
- regrid_resolution,
- regrid_template_file,
- input_directory,
- regridded_ggcmi_files_dir,
- regrid_extension,
- crop_list,
- )
+ Some notes:
+ - As "CLMname: {clm_num, thiscrop_ggcmi}"
+ - CLM names and numbers taken from commit 3dcbc7499a57904750a994672fc36b4221b9def5
+ - Using one global GGCMI value for both temperate and tropical versions of corn and soybean.
+ - There is no GGCMI equivalent of CLM's winter barley and rye. Using winter wheat instead.
+ - Using GGCMI "pea" for CLM pulses, as suggested by GGCMI phase 3 protocol.
+ - Only using GGCMI "ri1" for rice; ignoring "ri2".
+ """
- ###########################
- ### Define dictionaries ###
- ###########################
-
- # First, we associate CLM crop names with (1) their integer counterpart and (2) their GGCMI counterpart.
- # Some notes:
- # - As "CLMname: {clm_num, thiscrop_ggcmi}"
- # - CLM names and numbers taken from commit `3dcbc7499a57904750a994672fc36b4221b9def5`
- # - Using one global GGCMI value for both temperate and tropical versions of corn and soybean.
- # - There is no GGCMI equivalent of CLM's winter barley and rye. Using winter wheat instead.
- # - Using GGCMI `pea` for CLM pulses, as suggested by GGCMI phase 3 protocol.
- # - Only using GGCMI `ri1` for rice; ignoring `ri2`.
def set_crop_dict(thisnum, thisname):
return {"clm_num": thisnum, "thiscrop_ggcmi": thisname}
@@ -234,8 +231,16 @@ def set_crop_dict(thisnum, thisname):
"c3_irrigated": set_crop_dict(16, None),
}
- # Next, we associate CLM variable names with their GGCMI counterparts. We also save a placeholder for output file paths associated with each variable.
- # As CLMname: {GGCMIname, output_file}
+ return crop_dict
+
+
+def setup_var_dict():
+ """
+ Associate CLM variable names with their GGCMI counterparts.
+ - We also save a placeholder for output file paths associated with each variable.
+ - As CLMname: {GGCMIname, output_file}
+ """
+
def set_var_dict(name_ggcmi, outfile):
return {"name_ggcmi": name_ggcmi, "outfile": outfile}
@@ -243,23 +248,178 @@ def set_var_dict(name_ggcmi, outfile):
"sdate": set_var_dict("planting_day", ""),
"hdate": set_var_dict("maturity_day", ""),
}
+ return variable_dict
+
+
+def set_var_attrs(thisvar_da, thiscrop_clm, thiscrop_ggcmi, varname_ggcmi, new_fillvalue):
+ """
+ Set output variable attributes
+ """
+
+ longname = thisvar_da.attrs["long_name"]
+ longname = longname.replace("rainfed", thiscrop_clm).replace("irrigated", thiscrop_clm)
+ thisvar_da.attrs["long_name"] = longname
+
+ if thiscrop_ggcmi is None:
+ thisvar_da.attrs["crop_name_clm"] = "none"
+ thisvar_da.attrs["crop_name_ggcmi"] = "none"
+ else:
+ thisvar_da.attrs["crop_name_clm"] = thiscrop_clm
+ thisvar_da.attrs["crop_name_ggcmi"] = thiscrop_ggcmi
+
+ thisvar_da.attrs["short_name_ggcmi"] = varname_ggcmi
+ thisvar_da.attrs["units"] = "day of year"
+ thisvar_da.encoding["_FillValue"] = new_fillvalue
+
+ # scale_factor and add_offset are required by I/O library for short data
+ # From https://www.unidata.ucar.edu/software/netcdf/workshops/2010/bestpractices/Packing.html:
+ # unpacked_value = packed_value * scale_factor + add_offset
+ thisvar_da.attrs["scale_factor"] = np.int16(1)
+ thisvar_da.attrs["add_offset"] = np.int16(0)
+ return thisvar_da
+
+
+def fill_convert_int(thisvar_ds, thiscrop_ggcmi, varname_ggcmi, new_fillvalue):
+ """
+ Ensure fill value and real data are correct format
+ """
+ dummyvalue = -1
+ thisvar_ds.variables[varname_ggcmi].encoding["_FillValue"] = new_fillvalue
+ if thiscrop_ggcmi is None:
+ thisvar_ds.variables[varname_ggcmi].values.fill(dummyvalue)
+ else:
+ thisvar_ds.variables[varname_ggcmi].values[
+ np.isnan(thisvar_ds.variables[varname_ggcmi].values)
+ ] = new_fillvalue
+ thisvar_ds.variables[varname_ggcmi].values = thisvar_ds.variables[
+ varname_ggcmi
+ ].values.astype("int16")
+
+ return thisvar_ds
+
+
+def add_time_dim(thisvar_ds, template_ds, varname_ggcmi, varname_clm):
+ """
+ Add time dimension (https://stackoverflow.com/a/62862440)
+ - Repeats original map for every timestep
+ - Probably not necessary to use this method, since I only end up extracting thisvar_ds.values
+ anyway---I could probably use some numpy method instead.
+ """
+
+ thisvar_ds = thisvar_ds.expand_dims(time=template_ds.time)
+ thisvar_da_tmp = thisvar_ds[varname_ggcmi]
+ thisvar_da = xr.DataArray(
+ data=thisvar_da_tmp.values.astype("int16"),
+ attrs=thisvar_da_tmp.attrs,
+ coords=thisvar_da_tmp.coords,
+ name=varname_clm,
+ )
+
+ return thisvar_da
+
+
+def create_output_files(
+ regrid_resolution,
+ variable_dict,
+ output_directory,
+ file_specifier,
+ first_year,
+ last_year,
+ template_ds,
+):
+ """
+ Create output files, one for each variable
+ """
+ datetime_string = dt.datetime.now().strftime("%year%m%d_%H%M%S")
+ nninterp_suffix = "nninterp-" + regrid_resolution
+ for var in variable_dict:
+ basename = (
+ f"{var}s_{file_specifier}_{nninterp_suffix}."
+ + f"{first_year}-{last_year}.{datetime_string}.nc"
+ )
+ outfile = os.path.join(output_directory, basename)
+ variable_dict[var]["outfile"] = outfile
+ template_ds.to_netcdf(
+ path=variable_dict[var]["outfile"],
+ format="NETCDF3_CLASSIC",
+ )
+
+ return nninterp_suffix
+
+
+def strip_dataset(cropcal_ds, varname_ggcmi):
+ """
+ Remove all variables except one from Dataset
+ """
+ droplist = []
+ for i in list(cropcal_ds.keys()):
+ if i != varname_ggcmi:
+ droplist.append(i)
+ thisvar_ds = cropcal_ds.drop(droplist)
+ return thisvar_ds
+
+
+def process_ggcmi_shdates(
+ input_directory,
+ output_directory,
+ author,
+ file_specifier,
+ first_year,
+ last_year,
+ ggcmi_author,
+ regrid_resolution,
+ regrid_template_file,
+ regrid_extension,
+ crop_list,
+):
+ """
+ Convert GGCMI crop calendar files for use in CTSM
+ """
+
+ input_directory = os.path.realpath(input_directory)
+ output_directory = os.path.realpath(output_directory)
+
+ ############################################################
+ ### Regrid original GGCMI files to target CLM resolution ###
+ ############################################################
+
+ regridded_ggcmi_files_dir = os.path.join(
+ output_directory, f"regridded_ggcmi_files-{regrid_resolution}"
+ )
+
+ regrid.regrid_ggcmi_shdates(
+ regrid_resolution,
+ regrid_template_file,
+ input_directory,
+ regridded_ggcmi_files_dir,
+ regrid_extension,
+ crop_list,
+ )
+
+ # Set up dictionaries used in remapping crops and variables between GGCMI and CLM
+ crop_dict = setup_crop_dict()
+ variable_dict = setup_var_dict()
################################
### Instantiate output files ###
################################
# Global attributes for output files
+ comment = (
+ "Day of year is 1-indexed (i.e., Jan. 1 = 1). "
+ + "Filled using cdo -remapnn,$original -setmisstonn"
+ )
out_attrs = {
"title": "GGCMI crop calendar for Phase 3, v1.01",
"author_thisfile": author,
"author_original": ggcmi_author,
- "comment": "Day of year is 1-indexed (i.e., Jan. 1 = 1). Filled using cdo -remapnn,$original -setmisstonn",
+ "comment": comment,
"created": dt.datetime.now().replace(microsecond=0).astimezone().isoformat(),
}
# Create template dataset
time_array = np.array(
- [get_dayssince_jan1y1(first_year, y) for y in np.arange(first_year, last_year + 1)]
+ [get_dayssince_jan1y1(first_year, year) for year in np.arange(first_year, last_year + 1)]
)
time_coord = xr.IndexVariable(
"time",
@@ -273,18 +433,15 @@ def set_var_dict(name_ggcmi, outfile):
template_ds = xr.Dataset(coords={"time": time_coord}, attrs=out_attrs)
# Create output files
- datetime_string = dt.datetime.now().strftime("%Y%m%d_%H%M%S")
- nninterp_suffix = "nninterp-" + regrid_resolution
- for v in variable_dict:
- outfile = os.path.join(
- output_directory,
- f"{v}s_{file_specifier}_{nninterp_suffix}.{first_year}-{last_year}.{datetime_string}.nc",
- )
- variable_dict[v]["outfile"] = outfile
- template_ds.to_netcdf(
- path=variable_dict[v]["outfile"],
- format="NETCDF3_CLASSIC",
- )
+ nninterp_suffix = create_output_files(
+ regrid_resolution,
+ variable_dict,
+ output_directory,
+ file_specifier,
+ first_year,
+ last_year,
+ template_ds,
+ )
#########################
### Process all crops ###
@@ -293,7 +450,7 @@ def set_var_dict(name_ggcmi, outfile):
for thiscrop_clm in crop_dict:
# Which crop are we on?
- c = list(crop_dict.keys()).index(thiscrop_clm) + 1
+ crop_int = list(crop_dict.keys()).index(thiscrop_clm) + 1
# Get information about this crop
this_dict = crop_dict[thiscrop_clm]
@@ -306,18 +463,24 @@ def set_var_dict(name_ggcmi, outfile):
# If no corresponding GGCMI crop, skip opening dataset.
# Will use previous cropcal_ds as a template.
- if thiscrop_ggcmi == None:
- if c == 1:
+ if thiscrop_ggcmi is None:
+ if crop_int == 1:
raise ValueError(f"First crop ({thiscrop_clm}) must have a GGCMI type")
logger.info(
- "Filling %s with dummy data (%d of %d)..." % (str(thiscrop_clm), c, len(crop_dict))
+ "Filling %s with dummy data (%d of %d)...",
+ str(thiscrop_clm),
+ crop_int,
+ len(crop_dict),
)
# Otherwise, import crop calendar file
else:
logger.info(
- "Importing %s -> %s (%d of %d)..."
- % (str(thiscrop_ggcmi), str(thiscrop_clm), c, len(crop_dict))
+ "Importing %s -> %s (%d of %d)...",
+ str(thiscrop_ggcmi),
+ str(thiscrop_clm),
+ crop_int,
+ len(crop_dict),
)
file_ggcmi = os.path.join(
@@ -326,7 +489,7 @@ def set_var_dict(name_ggcmi, outfile):
)
if not os.path.exists(file_ggcmi):
logger.warning(
- f"Skipping {thiscrop_ggcmi} because input file not found: {file_ggcmi}"
+ "Skipping %s because input file not found: %s", thiscrop_ggcmi, file_ggcmi
)
continue
cropcal_ds = xr.open_dataset(file_ggcmi)
@@ -338,7 +501,7 @@ def set_var_dict(name_ggcmi, outfile):
for thisvar_clm in variable_dict:
# Get GGCMI netCDF info
varname_ggcmi = variable_dict[thisvar_clm]["name_ggcmi"]
- logger.info(" Processing %s..." % varname_ggcmi)
+ logger.info(" Processing %s...", varname_ggcmi)
# Get CLM netCDF info
varname_clm = thisvar_clm + "1_" + str(thiscrop_int)
@@ -347,69 +510,21 @@ def set_var_dict(name_ggcmi, outfile):
raise Exception("Output file not found: " + file_clm)
# Strip dataset to just this variable
- droplist = []
- for i in list(cropcal_ds.keys()):
- if i != varname_ggcmi:
- droplist.append(i)
- thisvar_ds = cropcal_ds.drop(droplist)
- thisvar_ds = thisvar_ds.load()
+ strip_dataset(cropcal_ds, varname_ggcmi)
# Convert to integer
new_fillvalue = -1
- dummyvalue = -1
- thisvar_ds.variables[varname_ggcmi].encoding["_FillValue"] = new_fillvalue
- if thiscrop_ggcmi == None:
- thisvar_ds.variables[varname_ggcmi].values.fill(dummyvalue)
- else:
- thisvar_ds.variables[varname_ggcmi].values[
- np.isnan(thisvar_ds.variables[varname_ggcmi].values)
- ] = new_fillvalue
- thisvar_ds.variables[varname_ggcmi].values = thisvar_ds.variables[
- varname_ggcmi
- ].values.astype("int16")
+ thisvar_ds = fill_convert_int(thisvar_ds, thiscrop_ggcmi, varname_ggcmi, new_fillvalue)
# Add time dimension (https://stackoverflow.com/a/62862440)
- # (Repeats original map for every timestep)
- # Probably not necessary to use this method, since I only end up extracting thisvar_ds.values anyway---I could probably use some numpy method instead.
- thisvar_ds = thisvar_ds.expand_dims(time=template_ds.time)
- thisvar_da_tmp = thisvar_ds[varname_ggcmi]
- thisvar_da = xr.DataArray(
- data=thisvar_da_tmp.values.astype("int16"),
- attrs=thisvar_da_tmp.attrs,
- coords=thisvar_da_tmp.coords,
- name=varname_clm,
- )
-
- # Edit/add variable attributes etc.
- longname = thisvar_da.attrs["long_name"]
- longname = longname.replace("rainfed", thiscrop_clm).replace("irrigated", thiscrop_clm)
-
- def set_var_attrs(
- thisvar_da, longname, thiscrop_clm, thiscrop_ggcmi, varname_ggcmi, new_fillvalue
- ):
- thisvar_da.attrs["long_name"] = longname
- if thiscrop_ggcmi == None:
- thisvar_da.attrs["crop_name_clm"] = "none"
- thisvar_da.attrs["crop_name_ggcmi"] = "none"
- else:
- thisvar_da.attrs["crop_name_clm"] = thiscrop_clm
- thisvar_da.attrs["crop_name_ggcmi"] = thiscrop_ggcmi
- thisvar_da.attrs["short_name_ggcmi"] = varname_ggcmi
- thisvar_da.attrs["units"] = "day of year"
- thisvar_da.encoding["_FillValue"] = new_fillvalue
- # scale_factor and add_offset are required by I/O library for short data
- # From https://www.unidata.ucar.edu/software/netcdf/workshops/2010/bestpractices/Packing.html:
- # unpacked_value = packed_value * scale_factor + add_offset
- thisvar_da.attrs["scale_factor"] = np.int16(1)
- thisvar_da.attrs["add_offset"] = np.int16(0)
- return thisvar_da
+ thisvar_da = add_time_dim(thisvar_ds, template_ds, varname_ggcmi, varname_clm)
thisvar_da = set_var_attrs(
- thisvar_da, longname, thiscrop_clm, thiscrop_ggcmi, varname_ggcmi, new_fillvalue
+ thisvar_da, thiscrop_clm, thiscrop_ggcmi, varname_ggcmi, new_fillvalue
)
# Save
- logger.info(" Saving %s..." % varname_ggcmi)
+ logger.info(" Saving %s...", varname_ggcmi)
thisvar_da.to_netcdf(file_clm, mode="a", format="NETCDF3_CLASSIC")
cropcal_ds.close()
diff --git a/python/ctsm/crop_calendars/regrid_ggcmi_shdates.py b/python/ctsm/crop_calendars/regrid_ggcmi_shdates.py
index 911b2f93a1..b1988aa8b5 100644
--- a/python/ctsm/crop_calendars/regrid_ggcmi_shdates.py
+++ b/python/ctsm/crop_calendars/regrid_ggcmi_shdates.py
@@ -1,19 +1,25 @@
+"""
+Regrid GGCMI sowing and harvest date files
+"""
from subprocess import run
import os
import glob
import argparse
import sys
+import logging
import xarray as xr
import numpy as np
-import logging
# -- add python/ctsm to path (needed if we want to run regrid_ggcmi_shdates stand-alone)
_CTSM_PYTHON = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
sys.path.insert(1, _CTSM_PYTHON)
-from ctsm.utils import abort
-from ctsm.ctsm_pylib_dependent_utils import import_coord_1d, import_coord_2d
-from ctsm import ctsm_logging
+from ctsm.utils import abort # pylint: disable=wrong-import-position
+from ctsm.ctsm_pylib_dependent_utils import ( # pylint: disable=wrong-import-position
+ import_coord_1d,
+ import_coord_2d,
+)
+from ctsm import ctsm_logging # pylint: disable=wrong-import-position
logger = logging.getLogger(__name__)
@@ -37,18 +43,26 @@ def main():
def run_and_check(cmd):
+ """
+ Run a given shell command and check its result
+ """
result = run(
cmd,
shell=True,
capture_output=True,
text=True,
+ check=False,
)
if result.returncode != 0:
abort(f"Trouble running `{result.args}` in shell:\n{result.stdout}\n{result.stderr}")
-# Functionized because these are shared by process_ggcmi_shdates
def define_arguments(parser):
+ """
+ Set up arguments shared between regrid_ggcmi_shdates and process_ggcmi_shdates
+
+ Functionized because these are shared by process_ggcmi_shdates
+ """
# Required
parser.add_argument(
"-rr",
@@ -60,7 +74,11 @@ def define_arguments(parser):
parser.add_argument(
"-rt",
"--regrid-template-file",
- help="Template netCDF file to be used in regridding of inputs. This can be a CLM output file (i.e., something with 1-d lat and lon variables) or a CLM surface dataset (i.e., something with 2-d LATIXY and LONGXY variables).",
+ help=(
+ "Template netCDF file to be used in regridding of inputs. This can be a CLM output "
+ + "file (i.e., something with 1-d lat and lon variables) or a CLM surface dataset "
+ + "(i.e., something with 2-d LATIXY and LONGXY variables)."
+ ),
type=str,
required=True,
)
@@ -75,7 +93,10 @@ def define_arguments(parser):
parser.add_argument(
"-c",
"--crop-list",
- help="List of GGCMI crops to process; e.g., '--crop-list mai_rf,mai_ir'. If not provided, will process all GGCMI crops.",
+ help=(
+ "List of GGCMI crops to process; e.g., '--crop-list mai_rf,mai_ir'. If not provided, "
+ + "will process all GGCMI crops."
+ ),
default=None,
)
return parser
@@ -89,7 +110,10 @@ def regrid_ggcmi_shdates(
regrid_extension,
crop_list,
):
- logger.info(f"Regridding GGCMI crop calendars to {regrid_resolution}:")
+ """
+ Regrid GGCMI sowing and harvest date files
+ """
+ logger.info("Regridding GGCMI crop calendars to %s:", regrid_resolution)
# Ensure we can call necessary shell script(s)
for cmd in ["module load cdo; cdo"]:
@@ -113,31 +137,7 @@ def regrid_ggcmi_shdates(
regrid_extension = "." + regrid_extension
# Import and format latitude
- if "lat" in template_ds_in:
- lat, Nlat = import_coord_1d(template_ds_in, "lat")
- elif "LATIXY" in template_ds_in:
- lat, Nlat = import_coord_2d(template_ds_in, "lat", "LATIXY")
- lat.attrs["axis"] = "Y"
- else:
- abort("No latitude variable found in regrid template file")
-
- # Flip latitude, if needed
- if lat.values[0] < lat.values[1]:
- lat = lat.reindex(lat=list(reversed(lat["lat"])))
-
- # Import and format longitude
- if "lon" in template_ds_in:
- lon, Nlon = import_coord_1d(template_ds_in, "lon")
- elif "LONGXY" in template_ds_in:
- lon, Nlon = import_coord_2d(template_ds_in, "lon", "LONGXY")
- lon.attrs["axis"] = "Y"
- else:
- abort("No longitude variable found in regrid template file")
- template_da_out = xr.DataArray(
- data=np.full((Nlat, Nlon), 0.0),
- dims={"lat": lat, "lon": lon},
- name="area",
- )
+ lat, lon, template_da_out = get_template_da_out(template_ds_in)
# Save template Dataset for use by cdo
template_ds_out = xr.Dataset(
@@ -156,43 +156,75 @@ def regrid_ggcmi_shdates(
if len(input_files) == 0:
abort(f"No files found matching {os.path.join(os.getcwd(), pattern)}")
input_files.sort()
- for f in input_files:
- this_crop = f[0:6]
+ for file in input_files:
+ this_crop = file[0:6]
if crop_list is not None and this_crop not in crop_list:
continue
- logger.info(" " + this_crop)
- f2 = os.path.join(regrid_output_directory, f)
- f3 = f2.replace(regrid_extension, f"_nninterp-{regrid_resolution}{regrid_extension}")
-
- if os.path.exists(f3):
- os.remove(f3)
-
- # Sometimes cdo fails for no apparent reason. In testing this never happened more than 3x in a row.
+ logger.info(" %s", this_crop)
+ file_2 = os.path.join(regrid_output_directory, file)
+ file_3 = file_2.replace(
+ regrid_extension, f"_nninterp-{regrid_resolution}{regrid_extension}"
+ )
+
+ if os.path.exists(file_3):
+ os.remove(file_3)
+
+ # Sometimes cdo fails for no apparent reason. In testing this never happened more than 3x
+ # in a row.
+ cdo_cmd = (
+ f"module load cdo; cdo -L -remapnn,'{templatefile}' "
+ + f"-setmisstonn '{file}' '{file_3}'"
+ )
try:
- run_and_check(
- f"module load cdo; cdo -L -remapnn,'{templatefile}' -setmisstonn '{f}' '{f3}'"
- )
- except:
+ run_and_check(cdo_cmd)
+ except: # pylint: disable=bare-except
try:
- run_and_check(
- f"module load cdo; cdo -L -remapnn,'{templatefile}' -setmisstonn '{f}' '{f3}'"
- )
- except:
+ run_and_check(cdo_cmd)
+ except: # pylint: disable=bare-except
try:
- run_and_check(
- f"module load cdo; cdo -L -remapnn,'{templatefile}' -setmisstonn '{f}' '{f3}'"
- )
- except:
- run_and_check(
- f"module load cdo; cdo -L -remapnn,'{templatefile}' -setmisstonn '{f}' '{f3}'"
- )
+ run_and_check(cdo_cmd)
+ except: # pylint: disable=bare-except
+ run_and_check(cdo_cmd)
# Delete template file, which is no longer needed
os.remove(templatefile)
os.chdir(previous_dir)
+def get_template_da_out(template_ds_in):
+ """
+ Get template output DataArray from input Dataset
+ """
+ if "lat" in template_ds_in:
+ lat, n_lat = import_coord_1d(template_ds_in, "lat")
+ elif "LATIXY" in template_ds_in:
+ lat, n_lat = import_coord_2d(template_ds_in, "lat", "LATIXY")
+ lat.attrs["axis"] = "Y"
+ else:
+ abort("No latitude variable found in regrid template file")
+
+ # Flip latitude, if needed
+ if lat.values[0] < lat.values[1]:
+ lat = lat.reindex(lat=list(reversed(lat["lat"])))
+
+ # Import and format longitude
+ if "lon" in template_ds_in:
+ lon, n_lon = import_coord_1d(template_ds_in, "lon")
+ elif "LONGXY" in template_ds_in:
+ lon, n_lon = import_coord_2d(template_ds_in, "lon", "LONGXY")
+ lon.attrs["axis"] = "Y"
+ else:
+ abort("No longitude variable found in regrid template file")
+ template_da_out = xr.DataArray(
+ data=np.full((n_lat, n_lon), 0.0),
+ dims={"lat": lat, "lon": lon},
+ name="area",
+ )
+
+ return lat, lon, template_da_out
+
+
def regrid_ggcmi_shdates_arg_process():
"""Process input arguments
@@ -204,7 +236,7 @@ def regrid_ggcmi_shdates_arg_process():
ctsm_logging.setup_logging_pre_config()
parser = argparse.ArgumentParser(
- description="Regrids raw sowing and harvest date files provided by GGCMI to a target CLM resolution."
+ description=("Regrid raw sowing/harvest date files from GGCMI to a target CLM resolution."),
)
# Define arguments
diff --git a/python/ctsm/crop_calendars/xr_flexsel.py b/python/ctsm/crop_calendars/xr_flexsel.py
new file mode 100644
index 0000000000..d51d925985
--- /dev/null
+++ b/python/ctsm/crop_calendars/xr_flexsel.py
@@ -0,0 +1,263 @@
+"""
+Flexibly subset time(s) and/or vegetation type(s) from an xarray Dataset or DataArray.
+"""
+import re
+import numpy as np
+import xarray as xr
+
+from ctsm.crop_calendars.cropcal_utils import vegtype_str2int, is_each_vegtype
+
+
+def xr_flexsel(xr_object, patches1d_itype_veg=None, warn_about_seltype_interp=True, **kwargs):
+ """
+ Flexibly subset time(s) and/or vegetation type(s) from an xarray Dataset or DataArray.
+
+ - Keyword arguments like dimension=selection.
+ - Selections can be individual values or slice()s.
+ - Optimize memory usage by beginning keyword argument list with the selections that will result
+ in the largest reduction of object size.
+ - Use dimension "vegtype" to extract patches of designated vegetation type (can be string or
+ integer).
+ - Can also do dimension=function---e.g., time=np.mean will take the mean over the time
+ dimension.
+ """
+ # Setup
+ havewarned = False
+ delimiter = "__"
+
+ for key, selection in kwargs.items():
+ if callable(selection):
+ xr_object = handle_callable(xr_object, key, selection)
+
+ elif key == "vegtype":
+ xr_object = handle_vegtype(xr_object, patches1d_itype_veg, selection)
+
+ else:
+ # Parse selection type, if provided
+ if delimiter in key:
+ key, selection_type = key.split(delimiter)
+
+ # Check type of selection
+ else:
+ is_inefficient = False
+ if isinstance(selection, slice):
+ this_type = set_type_from_slice(selection)
+ elif isinstance(selection, np.ndarray):
+ selection, is_inefficient, this_type = set_type_from_ndarray(selection)
+ else:
+ this_type = type(selection)
+
+ warn_about_this_seltype_interp = warn_about_seltype_interp
+ if this_type == list and isinstance(selection[0], str):
+ selection_type = "values"
+ warn_about_this_seltype_interp = False
+ elif this_type == int:
+ selection_type = "indices"
+ else:
+ selection_type = "values"
+
+ if warn_about_this_seltype_interp:
+ do_warn_about_seltype_interp(
+ havewarned, delimiter, key, selection_type, is_inefficient, this_type
+ )
+
+ # Trim along relevant 1d axes
+ if isinstance(xr_object, xr.Dataset) and key in ["lat", "lon"]:
+ xr_object = trim_along_relevant_1d_axes(xr_object, selection, selection_type, key)
+
+ # Perform selection
+ xr_object = perform_selection(xr_object, key, selection, selection_type)
+
+ return xr_object
+
+
+def perform_selection(xr_object, key, selection, selection_type):
+ """
+ Perform selection
+ """
+ if selection_type == "indices":
+ # Have to select like this instead of with index directly because otherwise assign_coords()
+ # will throw an error. Not sure why.
+ if isinstance(selection, int):
+ # Single integer? Turn it into a slice.
+ selection = slice(selection, selection + 1)
+ elif (
+ isinstance(selection, np.ndarray)
+ and not selection.dtype.kind in np.typecodes["AllInteger"]
+ ):
+ selection = selection.astype(int)
+ xr_object = xr_object.isel({key: selection})
+ elif selection_type == "values":
+ xr_object = xr_object.sel({key: selection})
+ else:
+ raise TypeError(f"selection_type {selection_type} not recognized")
+ return xr_object
+
+
+def trim_along_relevant_1d_axes(xr_object, selection, selection_type, key):
+ """
+ Trim along relevant 1d axes
+ """
+ if selection_type == "indices":
+ incl_coords = xr_object[key].values[selection]
+ elif selection_type == "values":
+ if isinstance(selection, slice):
+ incl_coords = xr_object.sel({key: selection}, drop=False)[key].values
+ else:
+ incl_coords = selection
+ else:
+ raise TypeError(f"selection_type {selection_type} not recognized")
+ if key == "lat":
+ this_xy = "jxy"
+ elif key == "lon":
+ this_xy = "ixy"
+ else:
+ raise KeyError(
+ f"Key '{key}' not recognized: What 1d_ suffix should I use for variable name?"
+ )
+ pattern = re.compile(f"1d_{this_xy}")
+ matches = [x for x in list(xr_object.keys()) if pattern.search(x) is not None]
+ for var in matches:
+ if len(xr_object[var].dims) != 1:
+ raise RuntimeError(
+ f"Expected {var} to have 1 dimension, but it has"
+ f" {len(xr_object[var].dims)}: {xr_object[var].dims}"
+ )
+ dim = xr_object[var].dims[0]
+ # print(f"Variable {var} has dimension {dim}")
+ coords = xr_object[key].values[xr_object[var].values.astype(int) - 1]
+ # print(f"{dim} size before: {xr_object.sizes[dim]}")
+ ok_ind = []
+ new_1d_this_xy = []
+ for i, member in enumerate(coords):
+ if member in incl_coords:
+ ok_ind = ok_ind + [i]
+ new_1d_this_xy = new_1d_this_xy + [(incl_coords == member).nonzero()[0] + 1]
+ xr_object = xr_object.isel({dim: ok_ind})
+ new_1d_this_xy = np.array(new_1d_this_xy).squeeze()
+ xr_object[var].values = new_1d_this_xy
+ # print(f"{dim} size after: {xr_object.sizes[dim]}")
+ return xr_object
+
+
+def do_warn_about_seltype_interp(
+ havewarned, delimiter, key, selection_type, is_inefficient, this_type
+):
+ """
+ Suggest suppressing selection type interpretation warnings
+ """
+ if not havewarned:
+ print(
+ "xr_flexsel(): Suppress all 'selection type interpretation' messages by specifying"
+ + "warn_about_seltype_interp=False"
+ )
+ havewarned = True
+ if is_inefficient:
+ extra = " This will also improve efficiency for large selections."
+ else:
+ extra = ""
+ print(
+ f"xr_flexsel(): Selecting {key} as {selection_type} because selection was"
+ f" interpreted as {this_type}. If not correct, specify selection type"
+ " ('indices' or 'values') in keyword like"
+ f" '{key}{delimiter}SELECTIONTYPE=...' instead of '{key}=...'.{extra}"
+ )
+
+
+def set_type_from_ndarray(selection):
+ """
+ Sets selection type if given a Numpy array
+ """
+ if selection.dtype.kind in np.typecodes["AllInteger"]:
+ this_type = int
+ else:
+ is_inefficient = True
+ this_type = None
+ for member in selection:
+ if member < 0 or member % 1 > 0:
+ if isinstance(member, int):
+ this_type = "values"
+ else:
+ this_type = type(member)
+ break
+ if this_type is None:
+ this_type = int
+ selection = selection.astype(int)
+ return selection, is_inefficient, this_type
+
+
+def set_type_from_slice(selection):
+ """
+ Sets selection type if given a slice
+ """
+ slice_members = []
+ if selection == slice(0):
+ raise ValueError("slice(0) will be empty")
+ if selection.start is not None:
+ slice_members = slice_members + [selection.start]
+ if selection.stop is not None:
+ slice_members = slice_members + [selection.stop]
+ if selection.step is not None:
+ slice_members = slice_members + [selection.step]
+ if not slice_members:
+ raise TypeError("slice is all None?")
+ this_type = int
+ for member in slice_members:
+ if member < 0 or not isinstance(member, int):
+ this_type = "values"
+ break
+ return this_type
+
+
+def handle_vegtype(xr_object, patches1d_itype_veg, selection):
+ """
+ Handle selection "vegtype
+ """
+ # Convert to list, if needed
+ if not isinstance(selection, list):
+ selection = [selection]
+
+ # Convert to indices, if needed
+ if isinstance(selection[0], str):
+ selection = vegtype_str2int(selection)
+
+ # Get list of boolean(s)
+ if isinstance(selection[0], int):
+ if isinstance(patches1d_itype_veg, type(None)):
+ patches1d_itype_veg = xr_object.patches1d_itype_veg.values
+ elif isinstance(patches1d_itype_veg, xr.core.dataarray.DataArray):
+ patches1d_itype_veg = patches1d_itype_veg.values
+ is_vegtype = is_each_vegtype(patches1d_itype_veg, selection, "ok_exact")
+ elif isinstance(selection[0], bool):
+ if len(selection) != len(xr_object.patch):
+ raise ValueError(
+ "If providing boolean 'vegtype' argument to xr_flexsel(), it must be the"
+ f" same length as xr_object.patch ({len(selection)} vs."
+ f" {len(xr_object.patch)})"
+ )
+ is_vegtype = selection
+ else:
+ raise TypeError(f"Not sure how to handle 'vegtype' of type {type(selection[0])}")
+ xr_object = xr_object.isel(patch=[i for i, x in enumerate(is_vegtype) if x])
+ if "ivt" in xr_object:
+ xr_object = xr_object.isel(ivt=is_each_vegtype(xr_object.ivt.values, selection, "ok_exact"))
+
+ return xr_object
+
+
+def handle_callable(xr_object, key, selection):
+ """
+ Handle selection that's a callable
+ """
+ # It would have been really nice to do selection(xr_object, axis=key), but numpy methods and
+ # xarray methods disagree on "axis" vs. "dimension." So instead, just do this manually.
+ if selection == np.mean: # pylint: disable=comparison-with-callable
+ try:
+ xr_object = xr_object.mean(dim=key)
+ except: # pylint: disable=raise-missing-from
+ raise ValueError(
+ f"Failed to take mean of dimension {key}. Try doing so outside of xr_flexsel()."
+ )
+ else:
+ raise ValueError(f"xr_flexsel() doesn't recognize function {selection}")
+ return xr_object
diff --git a/python/ctsm/ctsm_pylib_dependent_utils.py b/python/ctsm/ctsm_pylib_dependent_utils.py
index 13ccf7a969..59ca15155b 100644
--- a/python/ctsm/ctsm_pylib_dependent_utils.py
+++ b/python/ctsm/ctsm_pylib_dependent_utils.py
@@ -1,49 +1,64 @@
-from ctsm.utils import abort
+"""
+Utilities that are dependent on non-standard modules (i.e., require ctsm_pylib).
+"""
+
import numpy as np
+from ctsm.utils import abort
-def import_coord_1d(ds, coordName):
+def import_coord_1d(data_set, coord_name):
"""Import 1-d coordinate variable
Args:
- ds (xarray Dataset): Dataset whose coordinate you want to import.
- coordName (str): Name of coordinate to import
+ data_set (xarray Dataset): Dataset whose coordinate you want to import.
+ coord_name (str): Name of coordinate to import
Returns:
xarray DataArray: DataArray corresponding to the requested coordinate.
"""
- da = ds[coordName]
- if len(da.dims) != 1:
- abort(f"Expected 1 dimension for {coordName}; found {len(da.dims)}: {da.dims}")
- return da, len(da)
+ data_array = data_set[coord_name]
+ if len(data_array.dims) != 1:
+ abort(
+ f"Expected 1 dimension for {coord_name}; "
+ + f"found {len(data_array.dims)}: {data_array.dims}"
+ )
+ return data_array, len(data_array)
-def import_coord_2d(ds, coordName, varName):
- """Import 2-d latitude or longitude variable from a CESM history file (e.g., name LATIXY or LONGXY) and return it as a 1-d DataArray that can be used as a coordinate for writing CESM input files
+def import_coord_2d(data_set, coord_name, var_name):
+ """
+ Import 2-d latitude or longitude variable from a CESM history file (e.g., name LATIXY
+ or LONGXY and return it as a 1-d DataArray that can be used as a coordinate for writing
+ CESM input files
Args:
- ds (xarray Dataset): Dataset whose coordinate you want to import.
- coordName (str): Name of coordinate to import
- varName (str): Name of variable with dimension coordName
+ data_set (xarray Dataset): Dataset whose coordinate you want to import.
+ coord_name (str): Name of coordinate to import
+ var_name (str): Name of variable with dimension coord_name
Returns:
xarray DataArray: 1-d variable that can be used as a coordinate for writing CESM input files
int: Length of that variable
"""
- da = ds[varName]
- thisDim = [x for x in da.dims if coordName in x]
- if len(thisDim) != 1:
- abort(f"Expected 1 dimension name containing {coordName}; found {len(thisDim)}: {thisDim}")
- thisDim = thisDim[0]
- otherDim = [x for x in da.dims if coordName not in x]
- if len(otherDim) != 1:
+ data_array = data_set[var_name]
+ this_dim = [x for x in data_array.dims if coord_name in x]
+ if len(this_dim) != 1:
+ abort(
+ f"Expected 1 dimension name containing {coord_name}; "
+ + f"found {len(this_dim)}: {this_dim}"
+ )
+ this_dim = this_dim[0]
+ other_dim = [x for x in data_array.dims if coord_name not in x]
+ if len(other_dim) != 1:
abort(
- f"Expected 1 dimension name not containing {coordName}; found {len(otherDim)}: {otherDim}"
+ f"Expected 1 dimension name not containing {coord_name}; "
+ + f"found {len(other_dim)}: {other_dim}"
)
- otherDim = otherDim[0]
- da = da.astype(np.float32)
- da = da.isel({otherDim: [0]}).squeeze().rename({thisDim: coordName}).rename(coordName)
- da = da.assign_coords({coordName: da.values})
- da.attrs["long_name"] = "coordinate " + da.attrs["long_name"]
- da.attrs["units"] = da.attrs["units"].replace(" ", "_")
- return da, len(da)
+ other_dim = other_dim[0]
+ data_array = data_array.astype(np.float32)
+ data_array = data_array.isel({other_dim: [0]}).squeeze()
+ data_array = data_array.rename({this_dim: coord_name}).rename(coord_name)
+ data_array = data_array.assign_coords({coord_name: data_array.values})
+ data_array.attrs["long_name"] = "coordinate " + data_array.attrs["long_name"]
+ data_array.attrs["units"] = data_array.attrs["units"].replace(" ", "_")
+ return data_array, len(data_array)
diff --git a/python/ctsm/site_and_regional/modify_singlept_site_neon.py b/python/ctsm/site_and_regional/modify_singlept_site_neon.py
index ae1318e2f8..e69a8ab834 100755
--- a/python/ctsm/site_and_regional/modify_singlept_site_neon.py
+++ b/python/ctsm/site_and_regional/modify_singlept_site_neon.py
@@ -54,6 +54,9 @@
myname = getuser()
+# Seconds to wait before requests.get() times out
+TIMEOUT = 60
+
# -- valid neon sites
valid_neon_sites = glob.glob(
@@ -176,7 +179,7 @@ def get_neon(neon_dir, site_name):
+ site_name
+ "_surfaceData.csv"
)
- response = requests.get(url)
+ response = requests.get(url, timeout=TIMEOUT)
with open(neon_file, "wb") as a_file:
a_file.write(response.content)
@@ -430,7 +433,7 @@ def download_file(url, fname):
file name to save the downloaded file.
"""
try:
- response = requests.get(url)
+ response = requests.get(url, timeout=TIMEOUT)
with open(fname, "wb") as a_file:
a_file.write(response.content)
@@ -443,7 +446,7 @@ def download_file(url, fname):
except Exception as err:
print("The server could not fulfill the request.")
print("Something went wrong in downloading", fname)
- print("Error code:", err.code)
+ raise err
def fill_interpolate(f_2, var, method):
@@ -472,6 +475,129 @@ def fill_interpolate(f_2, var, method):
print("=====================================")
+def print_neon_data_soil_structure(obs_bot, soil_bot, bin_index):
+ """
+ Print info about NEON data soil structure
+ """
+ print("================================")
+ print(" Neon data soil structure: ")
+ print("================================")
+
+ print("------------", "ground", "------------")
+ for i, this_obs_bot in enumerate(obs_bot):
+ print("layer", i)
+ print("-------------", "{0:.2f}".format(this_obs_bot), "-------------")
+
+ print("================================")
+ print("Surface data soil structure: ")
+ print("================================")
+
+ print("------------", "ground", "------------")
+ for this_bin in range(len(bin_index)):
+ print("layer", this_bin)
+ print("-------------", "{0:.2f}".format(soil_bot[this_bin]), "-------------")
+
+
+def print_soil_quality(
+ inorganic, bin_index, soil_lev, layer_depth, carbon_tot, estimated_oc, bulk_den, f_2
+):
+ """
+ Prints information about soil quality
+ """
+ print("~~~~~~~~~~~~~~~~~~~~~~~~")
+ print("inorganic:")
+ print("~~~~~~~~~~~~~~~~~~~~~~~~")
+ print(inorganic)
+ print("~~~~~~~~~~~~~~~~~~~~~~~~")
+
+ print("bin_index : ", bin_index[soil_lev])
+ print("layer_depth : ", layer_depth)
+ print("carbon_tot : ", carbon_tot)
+ print("estimated_oc : ", estimated_oc)
+ print("bulk_den : ", bulk_den)
+ print("organic :", f_2["ORGANIC"][soil_lev].values)
+ print("--------------------------")
+
+
+def update_agri_site_info(site_name, f_2):
+ """
+ Updates agricultural sites
+ """
+ ag_sites = ["KONA", "STER"]
+ if site_name not in ag_sites:
+ return f_2
+
+ print("Updating PCT_NATVEG")
+ print("Original : ", f_2.PCT_NATVEG.values)
+ f_2.PCT_NATVEG.values = [[0.0]]
+ print("Updated : ", f_2.PCT_NATVEG.values)
+
+ print("Updating PCT_CROP")
+ print("Original : ", f_2.PCT_CROP.values)
+ f_2.PCT_CROP.values = [[100.0]]
+ print("Updated : ", f_2.PCT_CROP.values)
+
+ print("Updating PCT_NAT_PFT")
+ print(f_2.PCT_NAT_PFT.values[0])
+ print(f_2.PCT_NAT_PFT[0].values)
+
+ return f_2
+
+
+def update_fields_with_neon(f_1, d_f, bin_index):
+ """
+ update fields with neon
+ """
+ f_2 = f_1
+ soil_levels = f_2["PCT_CLAY"].size
+ for soil_lev in range(soil_levels):
+ print("--------------------------")
+ print("soil_lev:", soil_lev)
+ print(d_f["clayTotal"][bin_index[soil_lev]])
+ f_2["PCT_CLAY"][soil_lev] = d_f["clayTotal"][bin_index[soil_lev]]
+ f_2["PCT_SAND"][soil_lev] = d_f["sandTotal"][bin_index[soil_lev]]
+
+ bulk_den = d_f["bulkDensExclCoarseFrag"][bin_index[soil_lev]]
+ carbon_tot = d_f["carbonTot"][bin_index[soil_lev]]
+ estimated_oc = d_f["estimatedOC"][bin_index[soil_lev]]
+
+ # -- estimated_oc in neon data is rounded to the nearest integer.
+ # -- Check to make sure the rounded oc is not higher than carbon_tot.
+ # -- Use carbon_tot if estimated_oc is bigger than carbon_tot.
+
+ estimated_oc = min(estimated_oc, carbon_tot)
+
+ layer_depth = (
+ d_f["biogeoBottomDepth"][bin_index[soil_lev]]
+ - d_f["biogeoTopDepth"][bin_index[soil_lev]]
+ )
+
+ # f_2["ORGANIC"][soil_lev] = estimated_oc * bulk_den / 0.58
+
+ # -- after adding caco3 by NEON:
+ # -- if caco3 exists:
+ # -- inorganic = caco3/100.0869*12.0107
+ # -- organic = carbon_tot - inorganic
+ # -- else:
+ # -- organic = estimated_oc * bulk_den /0.58
+
+ caco3 = d_f["caco3Conc"][bin_index[soil_lev]]
+ inorganic = caco3 / 100.0869 * 12.0107
+ print("inorganic:", inorganic)
+
+ if not np.isnan(inorganic):
+ actual_oc = carbon_tot - inorganic
+ else:
+ actual_oc = estimated_oc
+
+ f_2["ORGANIC"][soil_lev] = actual_oc * bulk_den / 0.58
+
+ print_soil_quality(
+ inorganic, bin_index, soil_lev, layer_depth, carbon_tot, estimated_oc, bulk_den, f_2
+ )
+ return f_2
+
+
def main():
"""modify_singlept_site_neon main function"""
args = get_parser().parse_args()
@@ -532,88 +658,10 @@ def main():
bins = d_f["biogeoTopDepth"] / 100
bin_index = np.digitize(soil_mid, bins) - 1
- """
- print ("================================")
- print (" Neon data soil structure: ")
- print ("================================")
-
- print ("------------","ground","------------")
- for i in range(len(obs_bot)):
- print ("layer",i)
- print ("-------------",
- "{0:.2f}".format(obs_bot[i]),
- "-------------")
-
- print ("================================")
- print ("Surface data soil structure: ")
- print ("================================")
-
- print ("------------","ground","------------")
- for b in range(len(bin_index)):
- print ("layer",b)
- print ("-------------",
- "{0:.2f}".format(soil_bot[b]),
- "-------------")
- """
+ print_neon_data_soil_structure(obs_bot, soil_bot, bin_index)
# -- update fields with neon
- f_2 = f_1
- soil_levels = f_2["PCT_CLAY"].size
- for soil_lev in range(soil_levels):
- print("--------------------------")
- print("soil_lev:", soil_lev)
- print(d_f["clayTotal"][bin_index[soil_lev]])
- f_2["PCT_CLAY"][soil_lev] = d_f["clayTotal"][bin_index[soil_lev]]
- f_2["PCT_SAND"][soil_lev] = d_f["sandTotal"][bin_index[soil_lev]]
-
- bulk_den = d_f["bulkDensExclCoarseFrag"][bin_index[soil_lev]]
- carbon_tot = d_f["carbonTot"][bin_index[soil_lev]]
- estimated_oc = d_f["estimatedOC"][bin_index[soil_lev]]
-
- # -- estimated_oc in neon data is rounded to the nearest integer.
- # -- Check to make sure the rounded oc is not higher than carbon_tot.
- # -- Use carbon_tot if estimated_oc is bigger than carbon_tot.
-
- estimated_oc = min(estimated_oc, carbon_tot)
-
- layer_depth = (
- d_f["biogeoBottomDepth"][bin_index[soil_lev]]
- - d_f["biogeoTopDepth"][bin_index[soil_lev]]
- )
-
- # f_2["ORGANIC"][soil_lev] = estimated_oc * bulk_den / 0.58
-
- # -- after adding caco3 by NEON:
- # -- if caco3 exists:
- # -- inorganic = caco3/100.0869*12.0107
- # -- organic = carbon_tot - inorganic
- # -- else:
- # -- organic = estimated_oc * bulk_den /0.58
-
- caco3 = d_f["caco3Conc"][bin_index[soil_lev]]
- inorganic = caco3 / 100.0869 * 12.0107
- print("inorganic:", inorganic)
-
- if not np.isnan(inorganic):
- actual_oc = carbon_tot - inorganic
- else:
- actual_oc = estimated_oc
-
- f_2["ORGANIC"][soil_lev] = actual_oc * bulk_den / 0.58
-
- print("~~~~~~~~~~~~~~~~~~~~~~~~")
- print("inorganic:")
- print("~~~~~~~~~~~~~~~~~~~~~~~~")
- print(inorganic)
- print("~~~~~~~~~~~~~~~~~~~~~~~~")
-
- print("bin_index : ", bin_index[soil_lev])
- print("layer_depth : ", layer_depth)
- print("carbon_tot : ", carbon_tot)
- print("estimated_oc : ", estimated_oc)
- print("bulk_den : ", bulk_den)
- print("organic :", f_2["ORGANIC"][soil_lev].values)
- print("--------------------------")
+ f_2 = update_fields_with_neon(f_1, d_f, bin_index)
# -- Interpolate missing values
method = "linear"
@@ -633,22 +681,8 @@ def main():
sort_print_soil_layers(obs_bot, soil_bot)
- # -- updates for ag sites : KONA and STER
- ag_sites = ["KONA", "STER"]
- if site_name in ag_sites:
- print("Updating PCT_NATVEG")
- print("Original : ", f_2.PCT_NATVEG.values)
- f_2.PCT_NATVEG.values = [[0.0]]
- print("Updated : ", f_2.PCT_NATVEG.values)
-
- print("Updating PCT_CROP")
- print("Original : ", f_2.PCT_CROP.values)
- f_2.PCT_CROP.values = [[100.0]]
- print("Updated : ", f_2.PCT_CROP.values)
-
- print("Updating PCT_NAT_PFT")
- print(f_2.PCT_NAT_PFT.values[0])
- print(f_2.PCT_NAT_PFT[0].values)
+ # -- updates for ag sites
+ update_agri_site_info(site_name, f_2)
out_dir = args.out_dir
diff --git a/python/ctsm/test/test_sys_regrid_ggcmi_shdates.py b/python/ctsm/test/test_sys_regrid_ggcmi_shdates.py
index 7521ef09a5..6c2e230481 100755
--- a/python/ctsm/test/test_sys_regrid_ggcmi_shdates.py
+++ b/python/ctsm/test/test_sys_regrid_ggcmi_shdates.py
@@ -5,7 +5,6 @@
"""
import os
-import re
import unittest
import tempfile
@@ -18,8 +17,7 @@
# -- add python/ctsm to path (needed if we want to run test stand-alone)
_CTSM_PYTHON = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
sys.path.insert(1, _CTSM_PYTHON)
-
-
+# pylint: disable=wrong-import-position
from ctsm.path_utils import path_to_ctsm_root
from ctsm import unit_testing
from ctsm.crop_calendars.regrid_ggcmi_shdates import regrid_ggcmi_shdates
@@ -78,6 +76,9 @@ def tearDown(self):
shutil.rmtree(self._tempdir, ignore_errors=True)
def test_regrid_ggcmi_shdates(self):
+ """
+ Tests regrid_ggcmi_shdates
+ """
# Call script
sys.argv = self._function_call_list
diff --git a/python/ctsm/test/test_unit_modify_singlept_site_neon.py b/python/ctsm/test/test_unit_modify_singlept_site_neon.py
index ecd96357b3..3a9d7d424c 100755
--- a/python/ctsm/test/test_unit_modify_singlept_site_neon.py
+++ b/python/ctsm/test/test_unit_modify_singlept_site_neon.py
@@ -17,7 +17,7 @@
# -- add python/ctsm to path (needed if we want to run the test stand-alone)
_CTSM_PYTHON = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
sys.path.insert(1, _CTSM_PYTHON)
-
+# pylint: disable=wrong-import-position
from ctsm.path_utils import path_to_ctsm_root
# pylint: disable=wrong-import-position
diff --git a/python/ctsm/test/test_unit_run_sys_tests.py b/python/ctsm/test/test_unit_run_sys_tests.py
index 65ec1df5a5..98a9d54674 100755
--- a/python/ctsm/test/test_unit_run_sys_tests.py
+++ b/python/ctsm/test/test_unit_run_sys_tests.py
@@ -271,7 +271,7 @@ def test_withDryRun_nothingDone(self):
def test_getTestmodList_suite(self):
"""Ensure that _get_testmod_list() works correctly with suite-style input"""
- input = [
+ testmod_list_input = [
"clm/default",
"clm/default",
"clm/crop",
@@ -283,12 +283,12 @@ def test_getTestmodList_suite(self):
"clm-crop",
"clm-cropMonthlyOutput",
]
- output = _get_testmod_list(input, unique=False)
+ output = _get_testmod_list(testmod_list_input, unique=False)
self.assertEqual(output, target)
def test_getTestmodList_suite_unique(self):
"""Ensure that _get_testmod_list() works correctly with unique=True"""
- input = [
+ testmod_list_input = [
"clm/default",
"clm/default",
"clm/crop",
@@ -300,24 +300,29 @@ def test_getTestmodList_suite_unique(self):
"clm-cropMonthlyOutput",
]
- output = _get_testmod_list(input, unique=True)
+ output = _get_testmod_list(testmod_list_input, unique=True)
self.assertEqual(output, target)
def test_getTestmodList_testname(self):
"""Ensure that _get_testmod_list() works correctly with full test name(s) specified"""
- input = [
+ testmod_list_input = [
"ERS_D_Ld15.f45_f45_mg37.I2000Clm50FatesRs.izumi_nag.clm-crop",
"ERS_D_Ld15.f45_f45_mg37.I2000Clm50FatesRs.izumi_nag.clm-default",
]
target = ["clm-crop", "clm-default"]
- output = _get_testmod_list(input)
+ output = _get_testmod_list(testmod_list_input)
self.assertEqual(output, target)
def test_getTestmodList_twomods(self):
- """Ensure that _get_testmod_list() works correctly with full test name(s) specified and two mods in one test"""
- input = ["ERS_D_Ld15.f45_f45_mg37.I2000Clm50FatesRs.izumi_nag.clm-default--clm-crop"]
+ """
+ Ensure that _get_testmod_list() works correctly with full test name(s) specified and two
+ mods in one test
+ """
+ testmod_list_input = [
+ "ERS_D_Ld15.f45_f45_mg37.I2000Clm50FatesRs.izumi_nag.clm-default--clm-crop"
+ ]
target = ["clm-default", "clm-crop"]
- output = _get_testmod_list(input)
+ output = _get_testmod_list(testmod_list_input)
self.assertEqual(output, target)
diff --git a/python/ctsm/test/test_unit_utils_import_coord.py b/python/ctsm/test/test_unit_utils_import_coord.py
index b7ec8f90ec..6e339a913f 100755
--- a/python/ctsm/test/test_unit_utils_import_coord.py
+++ b/python/ctsm/test/test_unit_utils_import_coord.py
@@ -16,7 +16,7 @@
# -- add python/ctsm to path (needed if we want to run test stand-alone)
_CTSM_PYTHON = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
sys.path.insert(1, _CTSM_PYTHON)
-
+# pylint: disable=wrong-import-position
from ctsm import unit_testing
from ctsm.path_utils import path_to_ctsm_root
from ctsm.ctsm_pylib_dependent_utils import import_coord_1d, import_coord_2d
@@ -33,7 +33,9 @@
# Allow all the instance attributes that we need
# pylint: disable=too-many-instance-attributes
class TestUtilsImportCoord(unittest.TestCase):
- # Tests the importcoord* subroutines from utils.py
+ """
+ Tests the importcoord* subroutines from utils.py
+ """
def setUp(self):
"""Setup for trying out the methods"""
@@ -56,13 +58,19 @@ def tearDown(self):
shutil.rmtree(self._tempdir, ignore_errors=True)
def test_importcoord1d(self):
+ """
+ Tests importing a 1-d lat/lon variable
+ """
ds = xr.open_dataset(self._1d_lonlat_file)
- lat, Nlat = import_coord_1d(ds, "lat")
- np.testing.assert_equal(Nlat, 360)
+ lat, n_lat = import_coord_1d(ds, "lat")
+ np.testing.assert_equal(n_lat, 360)
np.testing.assert_array_equal(lat.values[:4], [89.75, 89.25, 88.75, 88.25])
np.testing.assert_array_equal(lat.values[-4:], [-88.25, -88.75, -89.25, -89.75])
def test_importcoord1d_attrs(self):
+ """
+ Tests attributes of an imported 1-d lat/lon variable
+ """
ds = xr.open_dataset(self._1d_lonlat_file)
lat, _ = import_coord_1d(ds, "lat")
# Unlike import_coord_2d, import_coord_1d doesn't rename the long name.
@@ -73,20 +81,29 @@ def test_importcoord1d_attrs(self):
self.assertDictEqual(lat.attrs, expected_attributes)
def test_importcoord1d_too_many_dims(self):
+ """
+ Tests that 1d-importing function errors when given a 2d variable to import
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
- with self.assertRaisesRegex(
+ with self.assertRaises(
SystemExit,
- "Expected 1 dimension for LATIXY; found 2: \('lsmlat', 'lsmlon'\)",
+ msg="Expected 1 dimension for LATIXY; found 2: ('lsmlat', 'lsmlon')",
):
import_coord_1d(ds, "LATIXY")
def test_importcoord2d(self):
+ """
+ Tests importing a 2-d lat/lon variable
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
lat, _ = import_coord_2d(ds, "lat", "LATIXY")
expected_values = np.array([-13.9, -11.7, -9.5, -7.3, -5.1]).astype(np.float32)
np.testing.assert_array_equal(lat.values, expected_values)
def test_importcoord2d_attrs(self):
+ """
+ Tests attributes of an imported 2-d lat/lon variable
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
lat, _ = import_coord_2d(ds, "lat", "LATIXY")
expected_attributes = {
@@ -96,25 +113,34 @@ def test_importcoord2d_attrs(self):
self.assertDictEqual(lat.attrs, expected_attributes)
def test_importcoord2d_rename_dim(self):
+ """
+ Tests renaming of an imported 2-d lat/lon variable
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
lat, _ = import_coord_2d(ds, "lat", "LATIXY")
self.assertTupleEqual(lat.dims, ("lat",))
def test_importcoord2d_no_dim_contains_coordName(self):
+ """
+ Tests that 2d-importing function errors when given a nonexistent dim name
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
ds = ds.rename({"lsmlat": "abc"})
- with self.assertRaisesRegex(
+ with self.assertRaises(
SystemExit,
- "ERROR: Expected 1 dimension name containing lat; found 0: \[\]",
+ msg="ERROR: Expected 1 dimension name containing lat; found 0: []",
):
import_coord_2d(ds, "lat", "LATIXY")
def test_importcoord2d_1_dim_containing(self):
+ """
+ Tests that 2d-importing function errors when given an ambiguous dim name
+ """
ds = xr.open_dataset(self._2d_lonlat_file)
ds = ds.rename({"lsmlon": "lsmlat2"})
- with self.assertRaisesRegex(
+ with self.assertRaises(
SystemExit,
- "Expected 1 dimension name containing lat; found 2: \['lsmlat', 'lsmlat2'\]",
+ msg="Expected 1 dimension name containing lat; found 2: ['lsmlat', 'lsmlat2']",
):
import_coord_2d(ds, "lat", "LATIXY")
diff --git a/src/biogeochem/DryDepVelocity.F90 b/src/biogeochem/DryDepVelocity.F90
index f5968c9aa8..f6a3b857da 100644
--- a/src/biogeochem/DryDepVelocity.F90
+++ b/src/biogeochem/DryDepVelocity.F90
@@ -284,13 +284,13 @@ subroutine depvel_compute( bounds, &
if ( n_drydep == 0 ) return
- associate( &
- forc_solad => atm2lnd_inst%forc_solad_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (visible only)
+ associate( &
+ forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (visible only)
+ forc_solad => atm2lnd_inst%forc_solad_downscaled_col, & ! Input: [real(r8) (:,:) ] direct beam radiation (visible only)
forc_t => atm2lnd_inst%forc_t_downscaled_col , & ! Input: [real(r8) (:) ] downscaled atmospheric temperature (Kelvin)
forc_q => wateratm2lndbulk_inst%forc_q_downscaled_col , & ! Input: [real(r8) (:) ] downscaled atmospheric specific humidity (kg/kg)
forc_pbot => atm2lnd_inst%forc_pbot_downscaled_col , & ! Input: [real(r8) (:) ] downscaled surface pressure (Pa)
forc_rain => wateratm2lndbulk_inst%forc_rain_downscaled_col , & ! Input: [real(r8) (:) ] downscaled rain rate [mm/s]
-
h2osoi_vol => waterstatebulk_inst%h2osoi_vol_col , & ! Input: [real(r8) (:,:) ] volumetric soil water (0<=h2osoi_vol<=watsat)
snow_depth => waterdiagnosticbulk_inst%snow_depth_col , & ! Input: [real(r8) (:) ] snow height (m)
@@ -324,7 +324,7 @@ subroutine depvel_compute( bounds, &
spec_hum = forc_q(c)
rain = forc_rain(c)
sfc_temp = forc_t(c)
- solar_flux = forc_solad(g,1)
+ solar_flux = forc_solad(c,1)
lat = grc%latdeg(g)
lon = grc%londeg(g)
clmveg = patch%itype(pi)
diff --git a/src/biogeochem/VOCEmissionMod.F90 b/src/biogeochem/VOCEmissionMod.F90
index f1865af3b7..a4bd9dc4d2 100644
--- a/src/biogeochem/VOCEmissionMod.F90
+++ b/src/biogeochem/VOCEmissionMod.F90
@@ -485,7 +485,7 @@ subroutine VOCEmission (bounds, num_soilp, filter_soilp, &
!h2osoi_vol => waterstate_inst%h2osoi_vol_col , & ! Input: [real(r8) (:,:) ] volumetric soil water (m3/m3)
!h2osoi_ice => waterstate_inst%h2osoi_ice_col , & ! Input: [real(r8) (:,:) ] ice soil content (kg/m3)
- forc_solad => atm2lnd_inst%forc_solad_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (visible only)
+ forc_solad => atm2lnd_inst%forc_solad_downscaled_col, & ! Input: [real(r8) (:,:) ] direct beam radiation (visible only)
forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] diffuse radiation (visible only)
forc_pbot => atm2lnd_inst%forc_pbot_downscaled_col , & ! Input: [real(r8) (:) ] downscaled atmospheric pressure (Pa)
forc_pco2 => atm2lnd_inst%forc_pco2_grc , & ! Input: [real(r8) (:) ] partial pressure co2 (Pa)
@@ -557,7 +557,7 @@ subroutine VOCEmission (bounds, num_soilp, filter_soilp, &
! Calculate PAR: multiply w/m2 by 4.6 to get umol/m2/s for par (added 8/14/02)
!------------------------
! SUN:
- par_sun = (forc_solad(g,1) + fsun(p) * forc_solai(g,1)) * 4.6_r8
+ par_sun = (forc_solad(c,1) + fsun(p) * forc_solai(g,1)) * 4.6_r8
par24_sun = (forc_solad24(p) + fsun24(p) * forc_solai24(p)) * 4.6_r8
par240_sun = (forc_solad240(p) + fsun240(p) * forc_solai240(p)) * 4.6_r8
diff --git a/src/biogeophys/BalanceCheckMod.F90 b/src/biogeophys/BalanceCheckMod.F90
index ff72bcb307..b3efe6e525 100644
--- a/src/biogeophys/BalanceCheckMod.F90
+++ b/src/biogeophys/BalanceCheckMod.F90
@@ -35,6 +35,7 @@ module BalanceCheckMod
use landunit_varcon , only : istdlak, istsoil,istcrop,istwet,istice
use column_varcon , only : icol_roof, icol_sunwall, icol_shadewall
use column_varcon , only : icol_road_perv, icol_road_imperv
+ use clm_varctl , only : use_hillslope_routing
!
! !PUBLIC TYPES:
implicit none
@@ -215,6 +216,7 @@ subroutine WaterGridcellBalanceSingle(bounds, &
!
! !USES:
use subgridAveMod, only: c2g
+ use LandunitType , only : lun
!
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds
@@ -231,8 +233,8 @@ subroutine WaterGridcellBalanceSingle(bounds, &
character(len=5) , intent(in) :: flag ! specifies begwb or endwb
!
! !LOCAL VARIABLES:
- integer :: g ! indices
- integer :: begc, endc, begg, endg ! bounds
+ integer :: g, l ! indices
+ integer :: begc, endc, begl, endl, begg, endg ! bounds
real(r8) :: wb_col(bounds%begc:bounds%endc) ! temporary column-level water mass
real(r8) :: wb_grc(bounds%begg:bounds%endg) ! temporary grid cell-level water mass
real(r8) :: qflx_liq_dynbal_left_to_dribble(bounds%begg:bounds%endg) ! grc liq dynamic land cover change conversion runoff flux
@@ -250,6 +252,8 @@ subroutine WaterGridcellBalanceSingle(bounds, &
begc = bounds%begc
endc = bounds%endc
+ begl = bounds%begl
+ endl = bounds%endl
begg = bounds%begg
endg = bounds%endg
@@ -266,6 +270,15 @@ subroutine WaterGridcellBalanceSingle(bounds, &
call c2g(bounds, wb_col(begc:endc), wb_grc(begg:endg), &
c2l_scale_type='urbanf', l2g_scale_type='unity')
+ ! add landunit level state variable, convert from (m3) to (kg m-2)
+ if (use_hillslope_routing) then
+ do l = begl, endl
+ g = lun%gridcell(l)
+ wb_grc(g) = wb_grc(g) + waterstate_inst%stream_water_volume_lun(l) &
+ *1e3_r8/(grc%area(g)*1.e6_r8)
+ enddo
+ endif
+
! Call the beginning or ending version of the subroutine according
! to flag value
if (flag == 'begwb') then
@@ -500,8 +513,9 @@ subroutine BalanceCheck( bounds, &
!-----------------------------------------------------------------------
associate( &
- forc_solad => atm2lnd_inst%forc_solad_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (vis=forc_sols , nir=forc_soll )
- forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] diffuse radiation (vis=forc_solsd, nir=forc_solld)
+ forc_solad_col => atm2lnd_inst%forc_solad_downscaled_col , & ! Input: [real(r8) (:,:) ] direct beam radiation (vis=forc_sols , nir=forc_soll )
+ forc_solad => atm2lnd_inst%forc_solad_not_downscaled_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (vis=forc_sols , nir=forc_soll )
+ forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] diffuse radiation (vis=forc_solsd, nir=forc_solld)
forc_rain => wateratm2lnd_inst%forc_rain_downscaled_col , & ! Input: [real(r8) (:) ] column level rain rate [mm/s]
forc_rain_grc => wateratm2lnd_inst%forc_rain_not_downscaled_grc, & ! Input: [real(r8) (:) ] grid cell-level rain rate [mm/s]
forc_snow => wateratm2lnd_inst%forc_snow_downscaled_col , & ! Input: [real(r8) (:) ] column level snow rate [mm/s]
@@ -546,6 +560,7 @@ subroutine BalanceCheck( bounds, &
qflx_qrgwl_grc => waterlnd2atm_inst%qflx_rofliq_qgwl_grc , & ! Input: [real(r8) (:) ] grid cell-level qflx_surf at glaciers, wetlands, lakes
qflx_drain_col => waterflux_inst%qflx_drain_col , & ! Input: [real(r8) (:) ] column level sub-surface runoff (mm H2O /s)
qflx_drain_grc => waterlnd2atm_inst%qflx_rofliq_qsub_grc , & ! Input: [real(r8) (:) ] grid cell-level drainage (mm H20 /s)
+ qflx_streamflow_grc => waterlnd2atm_inst%qflx_rofliq_stream_grc, & ! Input: [real(r8) (:) ] streamflow [mm H2O/s]
qflx_ice_runoff_col => waterlnd2atm_inst%qflx_ice_runoff_col , & ! Input: [real(r8) (:) ] column level solid runoff from snow capping and from excess ice in soil (mm H2O /s)
qflx_ice_runoff_grc => waterlnd2atm_inst%qflx_rofice_grc , & ! Input: [real(r8) (:) ] grid cell-level solid runoff from snow capping and from excess ice in soil (mm H2O /s)
qflx_sl_top_soil => waterflux_inst%qflx_sl_top_soil_col , & ! Input: [real(r8) (:) ] liquid water + ice from layer above soil to top soil layer or sent to qflx_qrgwl (mm H2O/s)
@@ -725,6 +740,15 @@ subroutine BalanceCheck( bounds, &
- qflx_snwcp_discarded_ice_grc(g)) * dtime
end do
+ ! add landunit level flux variable, convert from (m3/s) to (kg m-2 s-1)
+ if (use_hillslope_routing) then
+ ! output water flux from streamflow (+)
+ do g = bounds%begg, bounds%endg
+ errh2o_grc(g) = errh2o_grc(g) &
+ + qflx_streamflow_grc(g) * dtime
+ enddo
+ endif
+
errh2o_max_val = maxval(abs(errh2o_grc(bounds%begg:bounds%endg)))
! BUG(rgk, 2021-04-13, ESCOMP/CTSM#1314) Temporarily bypassing gridcell-level check with use_fates_planthydro until issue 1314 is resolved
@@ -883,8 +907,8 @@ subroutine BalanceCheck( bounds, &
! level because of interactions between columns and since a separate check is done
! in the urban radiation module
if (.not. lun%urbpoi(l)) then
- errsol(p) = fsa(p) + fsr(p) &
- - (forc_solad(g,1) + forc_solad(g,2) + forc_solai(g,1) + forc_solai(g,2))
+ errsol(p) = fsa(p) + fsr(p) &
+ - (forc_solad_col(c,1) + forc_solad_col(c,2) + forc_solai(g,1) + forc_solai(g,2))
else
errsol(p) = spval
end if
diff --git a/src/biogeophys/CMakeLists.txt b/src/biogeophys/CMakeLists.txt
index 3cf5e0eaf0..2ffc346670 100644
--- a/src/biogeophys/CMakeLists.txt
+++ b/src/biogeophys/CMakeLists.txt
@@ -8,6 +8,7 @@ list(APPEND clm_sources
CanopyStateType.F90
EnergyFluxType.F90
GlacierSurfaceMassBalanceMod.F90
+ HillslopeHydrologyUtilsMod.F90
HumanIndexMod.F90
InfiltrationExcessRunoffMod.F90
IrrigationMod.F90
diff --git a/src/biogeophys/CanopyFluxesMod.F90 b/src/biogeophys/CanopyFluxesMod.F90
index f152e761eb..58334a70c0 100644
--- a/src/biogeophys/CanopyFluxesMod.F90
+++ b/src/biogeophys/CanopyFluxesMod.F90
@@ -1605,7 +1605,8 @@ subroutine CanopyFluxes(bounds, num_exposedvegp, filter_exposedvegp,
if (t_veg(p) > tfrz ) then ! above freezing, update accumulation in liqcan
if ((qflx_evap_veg(p)-qflx_tran_veg(p))*dtime > liqcan(p)) then ! all liq evap
! In this case, all liqcan will evap. Take remainder from snocan
- snocan(p)=snocan(p)+liqcan(p)+(qflx_tran_veg(p)-qflx_evap_veg(p))*dtime
+ snocan(p) = max(0._r8, &
+ snocan(p) + liqcan(p) + (qflx_tran_veg(p) - qflx_evap_veg(p)) * dtime)
end if
liqcan(p) = max(0._r8,liqcan(p)+(qflx_tran_veg(p)-qflx_evap_veg(p))*dtime)
diff --git a/src/biogeophys/HillslopeHydrologyMod.F90 b/src/biogeophys/HillslopeHydrologyMod.F90
new file mode 100644
index 0000000000..b2866df679
--- /dev/null
+++ b/src/biogeophys/HillslopeHydrologyMod.F90
@@ -0,0 +1,1148 @@
+module HillslopeHydrologyMod
+
+ !-----------------------------------------------------------------------
+ ! !DESCRIPTION:
+ ! Read geomorphological parameters for hillslope columns
+ !
+ ! !USES:
+#include "shr_assert.h"
+ use shr_kind_mod , only : r8 => shr_kind_r8
+ use shr_log_mod , only : errMsg => shr_log_errMsg
+ use spmdMod , only : masterproc, iam
+ use abortutils , only : endrun
+ use clm_varctl , only : iulog
+ use clm_varctl , only : use_hillslope_routing
+ use decompMod , only : bounds_type
+ use clm_varcon , only : rpi
+ use HillslopeHydrologyUtilsMod, only : HillslopeSoilThicknessProfile_linear
+
+ ! !PUBLIC TYPES:
+ implicit none
+
+ private
+ save
+
+ ! !PUBLIC MEMBER FUNCTIONS:
+ public hillslope_properties_init
+ public InitHillslope
+ public SetHillslopeSoilThickness
+ public HillslopeSoilThicknessProfile
+ public HillslopeSetLowlandUplandPfts
+ public HillslopeDominantLowlandPft
+ public HillslopePftFromFile
+ public HillslopeStreamOutflow
+ public HillslopeUpdateStreamWater
+
+ integer, public :: pft_distribution_method ! Method for distributing pfts across hillslope columns
+ integer, public :: soil_profile_method ! Method for varying soil thickness across hillslope columns
+
+ ! Streamflow methods
+ integer, public, parameter :: streamflow_manning = 0
+ ! Pft distribution methods
+ integer, public, parameter :: pft_standard = 0
+ integer, public, parameter :: pft_from_file = 1
+ integer, public, parameter :: pft_uniform_dominant_pft = 2
+ integer, public, parameter :: pft_lowland_dominant_pft = 3
+ integer, public, parameter :: pft_lowland_upland = 4
+
+ ! PRIVATE
+ character(len=*), parameter, private :: sourcefile = &
+ __FILE__
+ integer, private, parameter :: soil_profile_uniform = 0
+ integer, private, parameter :: soil_profile_from_file = 1
+ integer, private, parameter :: soil_profile_set_lowland_upland = 2
+ integer, private, parameter :: soil_profile_linear = 3
+
+ !-----------------------------------------------------------------------
+
+contains
+
+ !-----------------------------------------------------------------------
+ subroutine hillslope_properties_init(NLFilename)
+ !
+ ! DESCRIPTION
+ ! read in hillslope hydrology veg/soil properties namelist variables
+ !
+ ! !USES:
+ use abortutils , only : endrun
+ use fileutils , only : getavu, relavu
+ use spmdMod , only : mpicom, masterproc
+ use shr_mpi_mod , only : shr_mpi_bcast
+ use clm_varctl , only : iulog
+ use clm_nlUtilsMod , only : find_nlgroup_name
+
+ ! !ARGUMENTS:
+ implicit none
+ character(len=*), intent(in) :: NLFilename ! Namelist filename
+ !----------------------------------------------------------------------
+ integer :: nu_nml ! unit for namelist file
+ integer :: nml_error ! namelist i/o error flag
+ character(len=*), parameter :: nmlname = 'hillslope_properties_inparm'
+ character(*), parameter :: subName = "('read_hillslope_properties_namelist')"
+ ! Default values for namelist
+ character(len=50) :: hillslope_pft_distribution_method = 'Standard' ! pft distribution method string
+ character(len=50) :: hillslope_soil_profile_method = 'Uniform' ! soil thickness distribution method string
+ !-----------------------------------------------------------------------
+
+! MUST agree with name in namelist and read statement
+ namelist /hillslope_properties_inparm/ &
+ hillslope_pft_distribution_method, &
+ hillslope_soil_profile_method
+
+ ! Read hillslope hydrology namelist
+ if (masterproc) then
+ nu_nml = getavu()
+ open( nu_nml, file=trim(NLFilename), status='old', iostat=nml_error )
+ call find_nlgroup_name(nu_nml, 'hillslope_properties_inparm', status=nml_error)
+ if (nml_error == 0) then
+ read(nu_nml, nml=hillslope_properties_inparm,iostat=nml_error)
+ if (nml_error /= 0) then
+ call endrun(subname // ':: ERROR reading hillslope properties namelist')
+ end if
+ else
+ call endrun(subname // ':: ERROR reading hillslope properties namelist')
+ end if
+ close(nu_nml)
+ call relavu( nu_nml )
+
+ if ( trim(hillslope_pft_distribution_method) == 'Standard' ) then
+ pft_distribution_method = pft_standard
+ else if ( trim(hillslope_pft_distribution_method) == 'FromFile' ) then
+ pft_distribution_method = pft_from_file
+ else if ( trim(hillslope_pft_distribution_method) == 'DominantPftUniform') then
+ pft_distribution_method = pft_uniform_dominant_pft
+ else if ( trim(hillslope_pft_distribution_method) == 'DominantPftLowland') then
+ pft_distribution_method = pft_lowland_dominant_pft
+ else if ( trim(hillslope_pft_distribution_method) == 'PftLowlandUpland') then
+ pft_distribution_method = pft_lowland_upland
+ else
+ call endrun(msg="ERROR bad value for hillslope_pft_distribution_method in "//nmlname//"namelist"//errmsg(sourcefile, __LINE__))
+ end if
+
+ if ( trim(hillslope_soil_profile_method) == 'Uniform' ) then
+ soil_profile_method = soil_profile_uniform
+ else if ( trim(hillslope_soil_profile_method) == 'FromFile' ) then
+ soil_profile_method = soil_profile_from_file
+ else if ( trim(hillslope_soil_profile_method) == 'SetLowlandUpland' ) then
+ soil_profile_method = soil_profile_set_lowland_upland
+ else if ( trim(hillslope_soil_profile_method) == 'Linear') then
+ soil_profile_method = soil_profile_linear
+ else
+ call endrun(msg="ERROR bad value for hillslope_soil_profile_method in "//nmlname//"namelist"//errmsg(sourcefile, __LINE__))
+ end if
+
+ end if
+
+ call shr_mpi_bcast(pft_distribution_method, mpicom)
+ call shr_mpi_bcast(soil_profile_method, mpicom)
+
+ if (masterproc) then
+
+ write(iulog,*) ' '
+ write(iulog,*) 'hillslope_properties settings:'
+ write(iulog,*) ' hillslope_pft_distribution_method = ',hillslope_pft_distribution_method
+ write(iulog,*) ' hillslope_soil_profile_method = ',hillslope_soil_profile_method
+
+ end if
+
+ end subroutine hillslope_properties_init
+
+ !-----------------------------------------------------------------------
+ subroutine check_aquifer_layer()
+ !
+ ! !DESCRIPTION:
+ ! Check whether use_hillslope and use_aquifer_layer are both set
+ ! The use of use_hillslope is implied by the call to this function
+ ! in InitHillslope, but explicitly compare here for clarity.
+ !
+ ! !USES:
+ use clm_varctl , only : use_hillslope
+ use SoilWaterMovementMod , only : use_aquifer_layer
+ if (use_hillslope .and. use_aquifer_layer()) then
+ write(iulog,*) ' ERROR: use_hillslope and use_aquifer_layer may not be used simultaneously'
+ call endrun(msg=' ERROR: use_hillslope and use_aquifer_layer cannot both be set to true' // &
+ errMsg(sourcefile, __LINE__))
+ end if
+
+ end subroutine check_aquifer_layer
+
+ !-----------------------------------------------------------------------
+
+ subroutine InitHillslope(bounds,fsurdat)
+ !
+ ! !DESCRIPTION:
+ ! Initialize hillslope geomorphology from input dataset
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use GridcellType , only : grc
+ use ColumnType , only : col
+ use clm_varctl , only : nhillslope, max_columns_hillslope
+ use spmdMod , only : masterproc
+ use fileutils , only : getfil
+ use clm_varcon , only : spval, ispval, grlnd
+ use landunit_varcon , only : istsoil
+ use subgridWeightsMod , only : compute_higher_order_weights
+ use ncdio_pio
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ character(len=*) , intent(in) :: fsurdat ! surface data file name
+ integer, pointer :: ihillslope_in(:,:) ! read in - integer
+ integer, pointer :: ncolumns_hillslope_in(:) ! read in number of columns
+ integer, allocatable :: ncolumns_hillslope(:) ! number of hillslope columns
+ integer, allocatable :: hill_ndx(:,:) ! hillslope index
+ integer, allocatable :: col_ndx(:,:) ! column index
+ integer, allocatable :: col_dndx(:,:) ! downhill column index
+ integer, allocatable :: hill_pftndx(:,:) ! hillslope pft index []
+ integer, allocatable :: col_pftndx(:) ! hillslope column pft index []
+ real(r8), pointer :: fhillslope_in(:,:) ! read in - float
+ real(r8), allocatable :: pct_hillslope(:,:) ! percent of landunit occupied by hillslope
+ real(r8), allocatable :: hill_slope(:,:) ! hillslope slope [m/m]
+ real(r8), allocatable :: hill_aspect(:,:) ! hillslope azimuth [radians]
+ real(r8), allocatable :: hill_area(:,:) ! hillslope area [m2]
+ real(r8), allocatable :: hill_dist(:,:) ! hillslope length [m]
+ real(r8), allocatable :: hill_width(:,:) ! hillslope width [m]
+ real(r8), allocatable :: hill_elev(:,:) ! hillslope height [m]
+ real(r8), allocatable :: hill_bedrock(:,:) ! hillslope bedrock depth [m]
+ real(r8), pointer :: fstream_in(:) ! read in - 1D - float
+
+ type(file_desc_t) :: ncid ! netcdf id
+ logical :: readvar ! check whether variable on file
+ character(len=256) :: locfn ! local filename
+ integer :: ierr ! error code
+ integer :: c, l, g, i, j, ci, nh ! indices
+
+ real(r8) :: ncol_per_hillslope(nhillslope) ! number of columns per hillslope
+ real(r8) :: hillslope_area(nhillslope) ! area of hillslope
+ real(r8) :: nhill_per_landunit(nhillslope) ! total number of each representative hillslope per landunit
+
+ character(len=*), parameter :: subname = 'InitHillslope'
+
+ !-----------------------------------------------------------------------
+
+ ! consistency check
+ call check_aquifer_layer()
+
+ ! Open surface dataset to read in data below
+
+ call getfil (fsurdat, locfn, 0)
+ call ncd_pio_openfile (ncid, locfn, 0)
+
+ allocate( &
+ ncolumns_hillslope(bounds%begl:bounds%endl), &
+ pct_hillslope(bounds%begl:bounds%endl,nhillslope), &
+ hill_ndx (bounds%begl:bounds%endl,max_columns_hillslope), &
+ col_ndx (bounds%begl:bounds%endl,max_columns_hillslope), &
+ col_dndx (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_slope (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_aspect (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_area (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_dist (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_width (bounds%begl:bounds%endl,max_columns_hillslope), &
+ hill_elev (bounds%begl:bounds%endl,max_columns_hillslope), &
+ col_pftndx (bounds%begc:bounds%endc), &
+ stat=ierr)
+
+ allocate(ncolumns_hillslope_in(bounds%begg:bounds%endg))
+
+ call ncd_io(ncid=ncid, varname='nhillcolumns', flag='read', data=ncolumns_hillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: nhillcolumns not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ ncolumns_hillslope(l) = ncolumns_hillslope_in(g)
+ ! vegetated landunits having nonzero hillslope columns and nonzero weight
+ if (lun%wtgcell(l) > 0._r8 .and. lun%itype(l) == istsoil .and. ncolumns_hillslope_in(g) > 0) then
+ do c = lun%coli(l), lun%colf(l)
+ col%is_hillslope_column(c) = .true.
+ enddo
+ end if
+ enddo
+ deallocate(ncolumns_hillslope_in)
+
+ allocate(fhillslope_in(bounds%begg:bounds%endg,nhillslope))
+
+ call ncd_io(ncid=ncid, varname='pct_hillslope', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: pct_hillslope not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ pct_hillslope(l,:) = fhillslope_in(g,:)
+ enddo
+ deallocate(fhillslope_in)
+
+ allocate(ihillslope_in(bounds%begg:bounds%endg,max_columns_hillslope))
+
+ call ncd_io(ncid=ncid, varname='hillslope_index', flag='read', data=ihillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_index not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_ndx(l,:) = ihillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='column_index', flag='read', data=ihillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: column_index not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ col_ndx(l,:) = ihillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='downhill_column_index', flag='read', data=ihillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: downhill_column_index not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ col_dndx(l,:) = ihillslope_in(g,:)
+ enddo
+ deallocate(ihillslope_in)
+
+ allocate(fhillslope_in(bounds%begg:bounds%endg,max_columns_hillslope))
+ call ncd_io(ncid=ncid, varname='hillslope_slope', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_slope not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_slope(l,:) = fhillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_aspect', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_aspect not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_aspect(l,:) = fhillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_area', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_area not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_area(l,:) = fhillslope_in(g,:)
+ enddo
+ call ncd_io(ncid=ncid, varname='hillslope_distance', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_length not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_dist(l,:) = fhillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_width', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_width not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_width(l,:) = fhillslope_in(g,:)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_elevation', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_height not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_elev(l,:) = fhillslope_in(g,:)
+ enddo
+
+ deallocate(fhillslope_in)
+
+ allocate(ihillslope_in(bounds%begg:bounds%endg,max_columns_hillslope))
+ call ncd_io(ncid=ncid, varname='hillslope_pftndx', flag='read', data=ihillslope_in, dim1name=grlnd, readvar=readvar)
+ if (readvar) then
+ allocate(hill_pftndx (bounds%begl:bounds%endl,max_columns_hillslope), stat=ierr)
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ hill_pftndx(l,:) = ihillslope_in(g,:)
+ enddo
+ end if
+
+ deallocate(ihillslope_in)
+
+ if (use_hillslope_routing) then
+ allocate(fstream_in(bounds%begg:bounds%endg))
+
+ call ncd_io(ncid=ncid, varname='hillslope_stream_depth', flag='read', data=fstream_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_stream_depth not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ lun%stream_channel_depth(l) = fstream_in(g)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_stream_width', flag='read', data=fstream_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_stream_width not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ lun%stream_channel_width(l) = fstream_in(g)
+ enddo
+
+ call ncd_io(ncid=ncid, varname='hillslope_stream_slope', flag='read', data=fstream_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: hillslope_stream_slope not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ lun%stream_channel_slope(l) = fstream_in(g)
+ enddo
+
+ deallocate(fstream_in)
+ end if
+
+ ! Set hillslope hydrology column level variables
+ ! This needs to match how columns set up in subgridMod
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ if (lun%itype(l) == istsoil) then
+
+ ! map external column index to internal column index
+ do c = lun%coli(l), lun%colf(l)
+ ! ci should span [1:nhillcolumns(l)]
+ ci = c-lun%coli(l)+1
+
+ if (col_dndx(l,ci) <= -999) then
+ ! lowermost column of hillslope has no downstream neighbor
+ col%cold(c) = ispval
+ else
+ ! relative separation should be the same
+ col%cold(c) = c + (col_dndx(l,ci) - col_ndx(l,ci))
+ end if
+ enddo
+
+ do c = lun%coli(l), lun%colf(l)
+ ci = c-lun%coli(l)+1
+
+ col%hillslope_ndx(c) = hill_ndx(l,ci)
+
+ ! Find uphill neighbors (this may not actually be useful...)
+ col%colu(c) = ispval
+ do i = lun%coli(l), lun%colf(l)
+ if (c == col%cold(i)) then
+ col%colu(c) = i
+ end if
+ enddo
+
+ ! distance of lower edge of column from hillslope bottom
+ col%hill_distance(c) = hill_dist(l,ci)
+ ! width of lower edge of column
+ col%hill_width(c) = hill_width(l,ci)
+ ! mean elevation of column relative to gridcell mean elevation
+ col%hill_elev(c) = hill_elev(l,ci)
+ ! mean along-hill slope of column
+ col%hill_slope(c) = hill_slope(l,ci)
+ ! area of column
+ col%hill_area(c) = hill_area(l,ci)
+ ! azimuth of column
+ col%hill_aspect(c) = hill_aspect(l,ci)
+ ! pft index of column
+ if ( allocated(hill_pftndx) ) then
+ col_pftndx(c) = hill_pftndx(l,ci)
+ end if
+
+ enddo
+
+ ! Calculate total hillslope area on landunit and
+ ! number of columns in each hillslope
+ ncol_per_hillslope(:)= 0._r8
+ hillslope_area(:) = 0._r8
+ do c = lun%coli(l), lun%colf(l)
+ nh = col%hillslope_ndx(c)
+ if (nh > 0) then
+ ncol_per_hillslope(nh) = ncol_per_hillslope(nh) + 1
+ hillslope_area(nh) = hillslope_area(nh) + col%hill_area(c)
+ end if
+ enddo
+
+ if (use_hillslope_routing) then
+
+ ! Total area occupied by each hillslope (m2) is
+ ! grc%area(g)*1.e6*lun%wtgcell(l)*pct_hillslope(l,nh)*0.01
+ ! Number of representative hillslopes per landunit
+ ! is the total area divided by individual area
+ ! include factor of 0.5 because a channel is shared by ~2 hillslopes
+
+ lun%stream_channel_number(l) = 0._r8
+ do nh = 1, nhillslope
+ if (hillslope_area(nh) > 0._r8) then
+ nhill_per_landunit(nh) = grc%area(g)*1.e6_r8*lun%wtgcell(l) &
+ *pct_hillslope(l,nh)*0.01/hillslope_area(nh)
+
+ lun%stream_channel_number(l) = lun%stream_channel_number(l) &
+ + 0.5_r8 * nhill_per_landunit(nh)
+ end if
+ enddo
+
+ ! Calculate steam channel length
+ ! Total length of stream banks is individual widths
+ ! times number of hillslopes per landunit
+ ! include factor of 0.5 because a channel is shared by ~2 hillslopes
+ lun%stream_channel_length(l) = 0._r8
+ do c = lun%coli(l), lun%colf(l)
+ if (col%cold(c) == ispval) then
+ lun%stream_channel_length(l) = lun%stream_channel_length(l) &
+ + col%hill_width(c) * 0.5_r8 * nhill_per_landunit(col%hillslope_ndx(c))
+ end if
+ enddo
+ end if
+
+ ! if missing hillslope information on surface dataset,
+ ! call endrun
+ if (ncolumns_hillslope(l) > 0 .and. sum(hillslope_area) == 0._r8 .and. masterproc) then
+ write(iulog,*) 'Problem with input data: nhillcolumns is non-zero, but hillslope area is zero'
+ write(iulog,*) 'Check surface data for gridcell at (lon/lat): ', grc%londeg(g),grc%latdeg(g)
+ call endrun( 'ERROR:: sum of hillslope areas is zero.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ ! Recalculate column weights using input areas
+ ! The higher order weights will be updated in a subsequent reweight_wrapup call
+ do c = lun%coli(l), lun%colf(l)
+ nh = col%hillslope_ndx(c)
+ if (col%is_hillslope_column(c)) then
+ col%wtlunit(c) = (col%hill_area(c)/hillslope_area(nh)) &
+ * (pct_hillslope(l,nh)*0.01_r8)
+ end if
+ enddo
+ end if
+ enddo ! end of landunit loop
+
+ deallocate(ncolumns_hillslope,pct_hillslope,hill_ndx,col_ndx,col_dndx, &
+ hill_slope,hill_area,hill_dist, &
+ hill_width,hill_elev,hill_aspect)
+
+ ! Modify pft distributions
+ ! this may require modifying subgridMod/natveg_patch_exists
+ ! to ensure patch exists in every gridcell
+ if (pft_distribution_method == pft_from_file) then
+ call HillslopePftFromFile(bounds,col_pftndx)
+ else if (pft_distribution_method == pft_lowland_dominant_pft) then
+ ! Specify different pfts for uplands / lowlands
+ call HillslopeDominantLowlandPft(bounds)
+ else if (pft_distribution_method == pft_lowland_upland) then
+ ! example usage:
+ ! upland_ivt = 13 ! c3 non-arctic grass
+ ! lowland_ivt = 7 ! broadleaf deciduous tree
+ call HillslopeSetLowlandUplandPfts(bounds,lowland_ivt=7,upland_ivt=13)
+ else if (masterproc .and. .not. (pft_distribution_method == pft_standard .or. pft_distribution_method ==pft_uniform_dominant_pft)) then
+ call endrun( 'ERROR:: unrecognized hillslope_pft_distribution_method'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ if ( allocated(hill_pftndx) ) then
+ deallocate(hill_pftndx)
+ deallocate(col_pftndx)
+ end if
+
+ ! Update higher order weights and check that weights sum to 1
+ call compute_higher_order_weights(bounds)
+
+ call ncd_pio_closefile(ncid)
+
+ end subroutine InitHillslope
+
+ !-----------------------------------------------------------------------
+
+ subroutine SetHillslopeSoilThickness(bounds,fsurdat,soil_depth_lowland_in,soil_depth_upland_in)
+ !
+ ! !DESCRIPTION:
+ ! Set hillslope column nbedrock values
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use GridcellType , only : grc
+ use ColumnType , only : col
+ use clm_varctl , only : nhillslope, max_columns_hillslope
+ use clm_varcon , only : zmin_bedrock, zisoi
+ use clm_varpar , only : nlevsoi
+ use spmdMod , only : masterproc
+ use fileutils , only : getfil
+ use clm_varcon , only : spval, ispval, grlnd
+ use ncdio_pio
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ character(len=*) , intent(in) :: fsurdat ! surface data file name
+ real(r8), intent(in), optional :: soil_depth_lowland_in
+ real(r8), intent(in), optional :: soil_depth_upland_in
+ real(r8), pointer :: fhillslope_in(:,:) ! read in - float
+
+ type(file_desc_t) :: ncid ! netcdf id
+ logical :: readvar ! check whether variable on file
+ character(len=256) :: locfn ! local filename
+ integer :: ierr ! error code
+ integer :: c, l, g, j, ci ! indices
+
+ real(r8) :: soil_depth_lowland
+ real(r8) :: soil_depth_upland
+ real(r8), parameter :: soil_depth_lowland_default = 8.0
+ real(r8), parameter :: soil_depth_upland_default = 8.0
+ character(len=*), parameter :: subname = 'SetHillslopeSoilThickness'
+
+ !-----------------------------------------------------------------------
+
+ if (soil_profile_method==soil_profile_from_file) then
+
+ ! Open surface dataset to read in data below
+ call getfil (fsurdat, locfn, 0)
+ call ncd_pio_openfile (ncid, locfn, 0)
+
+ allocate(fhillslope_in(bounds%begg:bounds%endg,max_columns_hillslope))
+ call ncd_io(ncid=ncid, varname='hillslope_bedrock_depth', flag='read', data=fhillslope_in, dim1name=grlnd, readvar=readvar)
+ if (masterproc .and. .not. readvar) then
+ call endrun( 'ERROR:: soil_profile_method = "FromFile", but hillslope_bedrock not found on surface data set.'//errmsg(sourcefile, __LINE__) )
+ end if
+ do l = bounds%begl,bounds%endl
+ g = lun%gridcell(l)
+ do c = lun%coli(l), lun%colf(l)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ ci = c-lun%coli(l)+1
+ do j = 1,nlevsoi
+ if (zisoi(j-1) > zmin_bedrock) then
+ if (zisoi(j-1) < fhillslope_in(g,ci) &
+ .and. zisoi(j) >= fhillslope_in(g,ci)) then
+ col%nbedrock(c) = j
+ end if
+ end if
+ enddo
+ end if
+ enddo
+ enddo
+ deallocate(fhillslope_in)
+ call ncd_pio_closefile(ncid)
+
+ else if (soil_profile_method==soil_profile_set_lowland_upland &
+ .or. soil_profile_method==soil_profile_linear) then
+
+ if (present(soil_depth_lowland_in)) then
+ soil_depth_lowland = soil_depth_lowland_in
+ else
+ soil_depth_lowland = soil_depth_lowland_default
+ end if
+
+ if (present(soil_depth_upland_in)) then
+ soil_depth_upland = soil_depth_upland_in
+ else
+ soil_depth_upland = soil_depth_upland_default
+ end if
+
+ ! Modify hillslope soil thickness profile
+ call HillslopeSoilThicknessProfile(bounds,&
+ soil_profile_method=soil_profile_method,&
+ soil_depth_lowland_in=soil_depth_lowland,&
+ soil_depth_upland_in=soil_depth_upland)
+
+ else if (soil_profile_method /= soil_profile_uniform .and. masterproc) then
+ call endrun( msg=' ERROR: unrecognized hillslope_soil_profile_method'//errMsg(sourcefile, __LINE__))
+
+ end if
+
+ end subroutine SetHillslopeSoilThickness
+
+ !-----------------------------------------------------------------------
+ subroutine HillslopeSoilThicknessProfile(bounds,&
+ soil_profile_method,soil_depth_lowland_in,soil_depth_upland_in)
+ !
+ ! !DESCRIPTION:
+ ! Modify soil thickness across hillslope by changing
+ ! col%nbedrock
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use GridcellType , only : grc
+ use ColumnType , only : col
+ use clm_varcon , only : zmin_bedrock, zisoi
+ use clm_varpar , only : nlevsoi
+ use spmdMod , only : masterproc
+ use fileutils , only : getfil
+ use clm_varcon , only : spval, ispval, grlnd
+ use ncdio_pio
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ integer, intent(in) :: soil_profile_method
+ real(r8), intent(in), optional :: soil_depth_lowland_in
+ real(r8), intent(in), optional :: soil_depth_upland_in
+
+ integer :: c, l, g, i, j
+ real(r8) :: min_hill_dist, max_hill_dist
+ real(r8) :: m, b ! linear soil thickness slope/intercept
+ real(r8) :: soil_depth_col
+ real(r8) :: soil_depth_lowland
+ real(r8) :: soil_depth_upland
+ real(r8), parameter :: soil_depth_lowland_default = 8.0
+ real(r8), parameter :: soil_depth_upland_default = 8.0
+
+ character(len=*), parameter :: subname = 'HillslopeSoilThicknessProfile'
+
+ !-----------------------------------------------------------------------
+
+ if (present(soil_depth_lowland_in)) then
+ soil_depth_lowland = soil_depth_lowland_in
+ else
+ soil_depth_lowland = soil_depth_lowland_default
+ end if
+
+ if (present(soil_depth_upland_in)) then
+ soil_depth_upland = soil_depth_upland_in
+ else
+ soil_depth_upland = soil_depth_upland_default
+ end if
+
+ ! Specify lowland/upland soil thicknesses separately
+ if (soil_profile_method == soil_profile_set_lowland_upland) then
+ do c = bounds%begc,bounds%endc
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ if (col%cold(c) /= ispval) then
+ do j = 1,nlevsoi
+ if (zisoi(j-1) > zmin_bedrock) then
+ if (zisoi(j-1) < soil_depth_upland .and. zisoi(j) >= soil_depth_upland) then
+ col%nbedrock(c) = j
+ end if
+ end if
+ enddo
+ else
+ do j = 1,nlevsoi
+ if (zisoi(j-1) > zmin_bedrock) then
+ if (zisoi(j-1) < soil_depth_lowland .and. zisoi(j) >= soil_depth_lowland) then
+ col%nbedrock(c) = j
+ end if
+ end if
+ enddo
+ end if
+ end if
+ end do
+ ! Linear soil thickness profile
+ else if (soil_profile_method == soil_profile_linear) then
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland)
+ else if (masterproc) then
+ call endrun( 'ERROR:: invalid soil_profile_method.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ end subroutine HillslopeSoilThicknessProfile
+
+ !------------------------------------------------------------------------
+ subroutine HillslopeSetLowlandUplandPfts(bounds,lowland_ivt,upland_ivt)
+ !
+ ! !DESCRIPTION:
+ ! Reassign patch type of each column based on whether a column
+ ! is identified as a lowland or an upland.
+ ! Assumes each column has a single pft.
+ ! In preparation for this reassignment of patch type, only the
+ ! first patch was given a non-zero weight in surfrd_hillslope
+ !
+ ! !USES
+ use LandunitType , only : lun
+ use ColumnType , only : col
+ use clm_varcon , only : ispval
+ use clm_varpar , only : natpft_lb
+ use PatchType , only : patch
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ integer, intent(in) :: upland_ivt
+ integer, intent(in) :: lowland_ivt
+ !
+ ! !LOCAL VARIABLES:
+ integer :: p,c ! indices
+ integer :: npatches_per_column
+
+ !------------------------------------------------------------------------
+
+ do c = bounds%begc, bounds%endc
+ if (col%is_hillslope_column(c)) then
+ npatches_per_column = 0
+ do p = col%patchi(c), col%patchf(c)
+ if (col%cold(c) == ispval) then
+ ! lowland
+ patch%itype(p) = lowland_ivt
+ else
+ ! upland
+ patch%itype(p) = upland_ivt
+ end if
+ ! update mxy as is done in initSubgridMod.add_patch
+ patch%mxy(p) = patch%itype(p) + (1 - natpft_lb)
+
+ npatches_per_column = npatches_per_column + 1
+ enddo
+ if ((npatches_per_column /= 1) .and. masterproc) then
+ call endrun( 'ERROR:: number of patches per hillslope column not equal to 1'//errmsg(sourcefile, __LINE__) )
+ end if
+ end if
+ enddo
+
+ end subroutine HillslopeSetLowlandUplandPfts
+
+ !------------------------------------------------------------------------
+ subroutine HillslopeDominantLowlandPft(bounds)
+ !
+ ! !DESCRIPTION:
+ ! Reassign patch weights of each column based on each gridcell's
+ ! two most dominant pfts on the input dataset.
+ ! HillslopeTwoLargestPftIndices is called in surfrd_hillslope to
+ ! prepare the patch weights for this routine.
+ ! Assumes each column has a single pft.
+ ! Use largest weight for lowland, 2nd largest weight for uplands
+ !
+ ! !USES
+ use LandunitType , only : lun
+ use ColumnType , only : col
+ use decompMod , only : get_clump_bounds, get_proc_clumps
+ use clm_varcon , only : ispval
+ use PatchType , only : patch
+ use pftconMod , only : pftcon, ndllf_evr_tmp_tree, nc3_nonarctic_grass, nc4_grass
+ use array_utils , only : find_k_max_indices
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ !
+ ! !LOCAL VARIABLES:
+ integer :: p,c ! indices
+ integer :: plow, phigh
+ integer :: max_index(1)
+ integer, allocatable :: max_indices(:) ! largest weight pft indices
+ real(r8) :: sum_wtcol, sum_wtlun, sum_wtgrc
+
+ !------------------------------------------------------------------------
+
+ allocate(max_indices(2))
+ do c = bounds%begc,bounds%endc
+ if (col%is_hillslope_column(c)) then
+
+ ! if only one pft exists, find dominant pft index and set 2nd index to the same value
+
+ if (size(patch%wtcol(col%patchi(c):col%patchf(c))) == 1) then
+ call find_k_max_indices(patch%wtcol(col%patchi(c):col%patchf(c)),1,1,max_index)
+ max_indices(1) = max_index(1) + (col%patchi(c) - 1)
+ max_indices(2) = max_indices(1)
+ else
+ call find_k_max_indices(patch%wtcol(col%patchi(c):col%patchf(c)),1,2,max_indices)
+ max_indices = max_indices + (col%patchi(c) - 1)
+ end if
+
+ sum_wtcol = sum(patch%wtcol(col%patchi(c):col%patchf(c)))
+ sum_wtlun = sum(patch%wtlunit(col%patchi(c):col%patchf(c)))
+ sum_wtgrc = sum(patch%wtgcell(col%patchi(c):col%patchf(c)))
+
+ patch%wtcol(col%patchi(c):col%patchf(c)) = 0._r8
+ patch%wtlunit(col%patchi(c):col%patchf(c)) = 0._r8
+ patch%wtgcell(col%patchi(c):col%patchf(c)) = 0._r8
+
+ ! Put the highest stature vegetation on the lowland column
+ ! non-tree and tree ; place tree on lowland
+ ! grass and shrub ; place shrub on lowland
+ ! bare soil and vegetation; place vegetation on lowland
+ if ((.not. pftcon%is_tree(patch%itype(max_indices(1))) .and. pftcon%is_tree(patch%itype(max_indices(2)))) &
+ .or. (pftcon%is_grass(patch%itype(max_indices(1))) .and. pftcon%is_shrub(patch%itype(max_indices(2)))) &
+ .or. (patch%itype(max_indices(1)) == 0)) then
+ plow = max_indices(2)
+ phigh = max_indices(1)
+ else
+ plow = max_indices(1)
+ phigh = max_indices(2)
+ end if
+
+ ! Special cases (subjective)
+
+ ! if NET/BDT assign BDT to lowland
+ if ((patch%itype(max_indices(1)) == ndllf_evr_tmp_tree) .and. pftcon%is_tree(patch%itype(max_indices(2)))) then
+ plow = max_indices(2)
+ phigh = max_indices(1)
+ end if
+ ! if C3/C4 assign C4 to lowland
+ if ((patch%itype(max_indices(1)) == nc4_grass) .and. (patch%itype(max_indices(2)) == nc3_nonarctic_grass)) then
+ plow = max_indices(1)
+ phigh = max_indices(2)
+ end if
+ if ((patch%itype(max_indices(1)) == nc3_nonarctic_grass) .and. (patch%itype(max_indices(2)) == nc4_grass)) then
+ plow = max_indices(2)
+ phigh = max_indices(1)
+ end if
+
+ if (col%cold(c) == ispval) then
+ ! lowland column
+ patch%wtcol(plow) = sum_wtcol
+ patch%wtlunit(plow) = sum_wtlun
+ patch%wtgcell(plow) = sum_wtgrc
+ else
+ ! upland columns
+ patch%wtcol(phigh) = sum_wtcol
+ patch%wtlunit(phigh) = sum_wtlun
+ patch%wtgcell(phigh) = sum_wtgrc
+ end if
+ end if
+ enddo ! end loop c
+ deallocate(max_indices)
+
+ end subroutine HillslopeDominantLowlandPft
+
+ !------------------------------------------------------------------------
+ subroutine HillslopePftFromFile(bounds,col_pftndx)
+ !
+ ! !DESCRIPTION:
+ ! Reassign patch type using indices from surface data file
+ ! Assumes one patch per hillslope column
+ ! In preparation for this reassignment of patch type, only the
+ ! first patch was given a non-zero weight in surfrd_hillslope.
+ !
+ ! !USES
+ use ColumnType , only : col
+ use PatchType , only : patch
+ use clm_varpar , only : natpft_lb
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ integer, intent(in) :: col_pftndx(:)
+ !
+ ! !LOCAL VARIABLES:
+ integer :: p,c ! indices
+ integer :: npatches_per_column
+
+ !------------------------------------------------------------------------
+
+ do c = bounds%begc, bounds%endc
+ if (col%is_hillslope_column(c)) then
+ ! In preparation for this re-weighting of patch type
+ ! only first patch was given a non-zero weight in surfrd_hillslope
+ npatches_per_column = 0
+ do p = col%patchi(c), col%patchf(c)
+ patch%itype(p) = col_pftndx(c)
+ ! update mxy as is done in initSubgridMod.add_patch
+ patch%mxy(p) = patch%itype(p) + (1 - natpft_lb)
+ npatches_per_column = npatches_per_column + 1
+ enddo
+ if ((npatches_per_column /= 1) .and. masterproc) then
+ call endrun( 'ERROR:: number of patches per hillslope column not equal to 1'//errmsg(sourcefile, __LINE__) )
+ end if
+ end if
+ enddo
+
+ end subroutine HillslopePftFromFile
+
+ !-----------------------------------------------------------------------
+ subroutine HillslopeStreamOutflow(bounds, &
+ waterstatebulk_inst, waterfluxbulk_inst,streamflow_method)
+ !
+ ! !DESCRIPTION:
+ ! Calculate discharge from stream channel
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use GridcellType , only : grc
+ use ColumnType , only : col
+ use WaterFluxBulkType , only : waterfluxbulk_type
+ use WaterStateBulkType , only : waterstatebulk_type
+ use spmdMod , only : masterproc
+ use clm_varcon , only : spval, ispval, grlnd
+ use landunit_varcon , only : istsoil
+ use ncdio_pio
+ use clm_time_manager , only : get_step_size_real
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ integer, intent(in) :: streamflow_method
+ type(waterstatebulk_type), intent(inout) :: waterstatebulk_inst
+ type(waterfluxbulk_type), intent(inout) :: waterfluxbulk_inst
+
+ integer :: c, l, g, i, j
+ integer :: nstep
+ real(r8) :: dtime ! land model time step (sec)
+ real(r8) :: cross_sectional_area ! cross sectional area of stream water (m2)
+ real(r8) :: stream_depth ! depth of stream water (m)
+ real(r8) :: hydraulic_radius ! cross sectional area divided by wetted perimeter (m)
+ real(r8) :: flow_velocity ! flow velocity (m/s)
+ real(r8) :: overbank_area ! area of water above bankfull (m2)
+ real(r8), parameter :: manning_roughness = 0.03_r8 ! manning roughness
+ real(r8), parameter :: manning_exponent = 0.667_r8 ! manning exponent
+
+ integer, parameter :: overbank_method = 1 ! method to treat overbank stream storage; 1 = increase dynamic slope, 2 = increase flow area cross section, 3 = remove instantaneously
+ logical :: active_stream
+ character(len=*), parameter :: subname = 'HillslopeStreamOutflow'
+
+ !-----------------------------------------------------------------------
+ associate( &
+ stream_water_volume => waterstatebulk_inst%stream_water_volume_lun , & ! Input: [real(r8) (:) ] stream water volume (m3)
+ volumetric_streamflow => waterfluxbulk_inst%volumetric_streamflow_lun & ! Input: [real(r8) (:) ] stream water discharge (m3/s)
+ )
+
+ ! Get time step
+ dtime = get_step_size_real()
+
+ do l = bounds%begl,bounds%endl
+ volumetric_streamflow(l) = 0._r8
+
+ ! Check for vegetated landunits having initialized stream channel properties
+ active_stream = .false.
+ if (lun%itype(l) == istsoil .and. &
+ lun%stream_channel_length(l) > 0._r8 .and. &
+ lun%stream_channel_width(l) > 0._r8) then
+ active_stream = .true.
+ end if
+
+ if (lun%active(l) .and. active_stream) then
+ ! Streamflow calculated from Manning equation
+ if (streamflow_method == streamflow_manning) then
+ cross_sectional_area = stream_water_volume(l) &
+ /lun%stream_channel_length(l)
+ stream_depth = cross_sectional_area &
+ /lun%stream_channel_width(l)
+ hydraulic_radius = cross_sectional_area &
+ /(lun%stream_channel_width(l) + 2*stream_depth)
+
+ if (hydraulic_radius <= 0._r8) then
+ volumetric_streamflow(l) = 0._r8
+ else
+ flow_velocity = (hydraulic_radius)**manning_exponent &
+ * sqrt(lun%stream_channel_slope(l)) &
+ / manning_roughness
+ ! overbank flow
+ if (stream_depth > lun%stream_channel_depth(l)) then
+ if (overbank_method == 1) then
+ ! try increasing dynamic slope
+ volumetric_streamflow(l) = cross_sectional_area * flow_velocity &
+ *(stream_depth/lun%stream_channel_depth(l))
+ else if (overbank_method == 2) then
+ ! try increasing flow area cross section
+ overbank_area = (stream_depth -lun%stream_channel_depth(l)) * 30._r8 * lun%stream_channel_width(l)
+ volumetric_streamflow(l) = (cross_sectional_area + overbank_area) * flow_velocity
+ else if (overbank_method == 3) then
+ ! try removing all overbank flow instantly
+ volumetric_streamflow(l) = cross_sectional_area * flow_velocity &
+ + (stream_depth-lun%stream_channel_depth(l)) &
+ *lun%stream_channel_width(l)*lun%stream_channel_length(l)/dtime
+ else
+ call endrun( 'ERROR:: invalid overbank_method.'//errmsg(sourcefile, __LINE__) )
+ end if
+
+ else
+ volumetric_streamflow(l) = cross_sectional_area * flow_velocity
+ end if
+
+ ! scale streamflow by number of channel reaches
+ volumetric_streamflow(l) = volumetric_streamflow(l) * lun%stream_channel_number(l)
+
+ volumetric_streamflow(l) = max(0._r8,min(volumetric_streamflow(l),stream_water_volume(l)/dtime))
+ end if
+ else
+ call endrun( 'ERROR:: invalid streamflow_method'//errmsg(sourcefile, __LINE__) )
+ end if
+ end if ! end of istsoil
+ enddo ! end of loop over landunits
+
+ end associate
+
+ end subroutine HillslopeStreamOutflow
+
+ !-----------------------------------------------------------------------
+ subroutine HillslopeUpdateStreamWater(bounds, waterstatebulk_inst, &
+ waterfluxbulk_inst,waterdiagnosticbulk_inst)
+ !
+ ! !DESCRIPTION:
+ ! Calculate discharge from stream channel
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use GridcellType , only : grc
+ use ColumnType , only : col
+ use WaterFluxBulkType , only : waterfluxbulk_type
+ use WaterStateBulkType , only : waterstatebulk_type
+ use WaterDiagnosticBulkType , only : waterdiagnosticbulk_type
+ use spmdMod , only : masterproc
+ use clm_varcon , only : spval, ispval, grlnd
+ use landunit_varcon , only : istsoil
+ use clm_time_manager, only : get_step_size_real
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ type(waterstatebulk_type), intent(inout) :: waterstatebulk_inst
+ type(waterfluxbulk_type), intent(inout) :: waterfluxbulk_inst
+ type(waterdiagnosticbulk_type), intent(inout) :: waterdiagnosticbulk_inst
+
+ integer :: c, l, g, i, j
+ real(r8) :: qflx_surf_vol ! volumetric surface runoff (m3/s)
+ real(r8) :: qflx_drain_perched_vol ! volumetric perched saturated drainage (m3/s)
+ real(r8) :: qflx_drain_vol ! volumetric saturated drainage (m3/s)
+ real(r8) :: dtime ! land model time step (sec)
+ logical :: active_stream
+
+ character(len=*), parameter :: subname = 'HillslopeUpdateStreamWater'
+
+ !-----------------------------------------------------------------------
+ associate( &
+ stream_water_volume => waterstatebulk_inst%stream_water_volume_lun, & ! Input/Output: [real(r8) (:) ] stream water volume (m3)
+ volumetric_streamflow => waterfluxbulk_inst%volumetric_streamflow_lun,& ! Input: [real(r8) (:) ] stream water discharge (m3/s)
+ qflx_drain => waterfluxbulk_inst%qflx_drain_col, & ! Input: [real(r8) (:) ] column level sub-surface runoff (mm H2O /s)
+ qflx_drain_perched => waterfluxbulk_inst%qflx_drain_perched_col, & ! Input: [real(r8) (:) ] column level sub-surface runoff (mm H2O /s)
+ qflx_surf => waterfluxbulk_inst%qflx_surf_col, & ! Input: [real(r8) (:) ] total surface runoff (mm H2O /s)
+ stream_water_depth => waterdiagnosticbulk_inst%stream_water_depth_lun & ! Output: [real(r8) (:) ] stream water depth (m)
+ )
+
+ ! Get time step
+ dtime = get_step_size_real()
+
+ do l = bounds%begl,bounds%endl
+
+ ! Check for vegetated landunits having initialized stream channel properties
+ active_stream = .false.
+ if (lun%itype(l) == istsoil .and. &
+ lun%stream_channel_length(l) > 0._r8 .and. &
+ lun%stream_channel_width(l) > 0._r8) then
+ active_stream = .true.
+ end if
+
+ if (lun%active(l) .and. active_stream) then
+ g = lun%gridcell(l)
+ ! the drainage terms are 'net' quantities, so summing over
+ ! all columns in a hillslope is equivalent to the outflow
+ ! from the lowland column
+ do c = lun%coli(l), lun%colf(l)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ qflx_surf_vol = qflx_surf(c)*1.e-3_r8 &
+ *(grc%area(g)*1.e6_r8*col%wtgcell(c))
+ qflx_drain_perched_vol = qflx_drain_perched(c)*1.e-3_r8 &
+ *(grc%area(g)*1.e6_r8*col%wtgcell(c))
+ qflx_drain_vol = qflx_drain(c)*1.e-3_r8 &
+ *(grc%area(g)*1.e6_r8*col%wtgcell(c))
+
+ stream_water_volume(l) = stream_water_volume(l) &
+ + (qflx_drain_perched_vol &
+ + qflx_drain_vol + qflx_surf_vol) * dtime
+ end if
+ enddo
+ stream_water_volume(l) = stream_water_volume(l) &
+ - volumetric_streamflow(l) * dtime
+
+ ! account for negative drainage (via searchforwater in soilhydrology)
+ if (stream_water_volume(l) < 0._r8) then
+ volumetric_streamflow(l) = volumetric_streamflow(l) + stream_water_volume(l)/dtime
+ stream_water_volume(l) = 0._r8
+ end if
+
+ stream_water_depth(l) = stream_water_volume(l) &
+ /lun%stream_channel_length(l) &
+ /lun%stream_channel_width(l)
+
+ end if
+ enddo
+
+ end associate
+
+ end subroutine HillslopeUpdateStreamWater
+
+end module HillslopeHydrologyMod
diff --git a/src/biogeophys/HillslopeHydrologyUtilsMod.F90 b/src/biogeophys/HillslopeHydrologyUtilsMod.F90
new file mode 100644
index 0000000000..299971055c
--- /dev/null
+++ b/src/biogeophys/HillslopeHydrologyUtilsMod.F90
@@ -0,0 +1,85 @@
+module HillslopeHydrologyUtilsMod
+
+ !-----------------------------------------------------------------------
+ ! !DESCRIPTION:
+ ! Utilities used in HillslopeHydrologyMod
+ !
+ ! !USES:
+#include "shr_assert.h"
+ use decompMod , only : bounds_type
+ use shr_kind_mod , only : r8 => shr_kind_r8
+ use shr_log_mod , only : errMsg => shr_log_errMsg
+ use spmdMod , only : masterproc, iam
+ use abortutils , only : endrun
+ use clm_varctl , only : iulog
+
+ ! !PUBLIC TYPES:
+ implicit none
+
+ private
+ save
+
+ real(r8), parameter :: toosmall_distance_default = 1e-6
+
+ ! !PUBLIC MEMBER FUNCTIONS:
+ public HillslopeSoilThicknessProfile_linear
+
+contains
+
+ !------------------------------------------------------------------------
+ subroutine HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland, toosmall_distance_in)
+ !
+ ! !DESCRIPTION:
+ ! Modify soil thickness across hillslope by changing
+ ! nbedrock according to the "Linear" method
+ !
+ ! !USES:
+ use LandunitType , only : lun
+ use ColumnType , only : col
+ use clm_varpar , only : nlevsoi
+ use clm_varcon , only : zisoi
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ real(r8), intent(in) :: soil_depth_lowland, soil_depth_upland
+ real(r8), intent(in), optional :: toosmall_distance_in
+ !
+ ! !LOCAL VARIABLES
+ real(r8) :: min_hill_dist, max_hill_dist
+ real(r8) :: toosmall_distance
+ real(r8) :: soil_depth_col
+ real(r8) :: m, b
+ integer :: c, j, l
+
+ if (present(toosmall_distance_in)) then
+ toosmall_distance = toosmall_distance_in
+ else
+ toosmall_distance = toosmall_distance_default
+ end if
+
+ do l = bounds%begl,bounds%endl
+ min_hill_dist = minval(col%hill_distance(lun%coli(l):lun%colf(l)))
+ max_hill_dist = maxval(col%hill_distance(lun%coli(l):lun%colf(l)))
+
+ if (abs(max_hill_dist - min_hill_dist) > toosmall_distance) then
+ m = (soil_depth_lowland - soil_depth_upland)/ &
+ (max_hill_dist - min_hill_dist)
+ else
+ m = 0._r8
+ end if
+ b = soil_depth_upland
+
+ do c = lun%coli(l), lun%colf(l)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ soil_depth_col = m*(max_hill_dist - col%hill_distance(c)) + b
+ do j = 1,nlevsoi
+ if ((zisoi(j-1) < soil_depth_col) .and. (zisoi(j) >= soil_depth_col)) then
+ col%nbedrock(c) = j
+ exit
+ end if
+ enddo
+ end if
+ enddo
+ enddo
+ end subroutine HillslopeSoilThicknessProfile_linear
+end module HillslopeHydrologyUtilsMod
\ No newline at end of file
diff --git a/src/biogeophys/HydrologyDrainageMod.F90 b/src/biogeophys/HydrologyDrainageMod.F90
index 31ffc817a0..ce5b78e3ff 100644
--- a/src/biogeophys/HydrologyDrainageMod.F90
+++ b/src/biogeophys/HydrologyDrainageMod.F90
@@ -40,7 +40,7 @@ subroutine HydrologyDrainage(bounds, &
num_hydrologyc, filter_hydrologyc, &
num_urbanc, filter_urbanc, &
num_do_smb_c, filter_do_smb_c, &
- atm2lnd_inst, glc2lnd_inst, temperature_inst, &
+ glc2lnd_inst, temperature_inst, &
soilhydrology_inst, soilstate_inst, waterstatebulk_inst, &
waterdiagnosticbulk_inst, waterbalancebulk_inst, waterfluxbulk_inst, &
wateratm2lndbulk_inst, glacier_smb_inst)
@@ -52,11 +52,12 @@ subroutine HydrologyDrainage(bounds, &
use landunit_varcon , only : istwet, istsoil, istice, istcrop
use column_varcon , only : icol_roof, icol_road_imperv, icol_road_perv, icol_sunwall, icol_shadewall
use clm_varcon , only : denh2o, denice
- use clm_varctl , only : use_vichydro
+ use clm_varctl , only : use_vichydro, use_hillslope, use_hillslope_routing
use clm_varpar , only : nlevgrnd, nlevurb
use clm_time_manager , only : get_step_size_real, get_nstep
- use SoilHydrologyMod , only : CLMVICMap, Drainage, PerchedLateralFlow, LateralFlowPowerLaw
+ use SoilHydrologyMod , only : CLMVICMap, Drainage, PerchedLateralFlow, SubsurfaceLateralFlow
use SoilWaterMovementMod , only : use_aquifer_layer
+ use HillslopeHydrologyMod, only : streamflow_manning, HillslopeStreamOutflow, HillslopeUpdateStreamWater
!
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds
@@ -66,18 +67,18 @@ subroutine HydrologyDrainage(bounds, &
integer , intent(in) :: filter_hydrologyc(:) ! column filter for soil points
integer , intent(in) :: num_urbanc ! number of column urban points in column filter
integer , intent(in) :: filter_urbanc(:) ! column filter for urban points
- integer , intent(in) :: num_do_smb_c ! number of columns in which SMB is calculated, in column filter
- integer , intent(in) :: filter_do_smb_c(:) ! column filter for bare landwhere SMB is calculated
- type(atm2lnd_type) , intent(in) :: atm2lnd_inst
+ integer , intent(in) :: num_do_smb_c ! number of bareland columns in which SMB is calculated, in column filter
+ integer , intent(in) :: filter_do_smb_c(:) ! column filter for bare land SMB columns
+
type(glc2lnd_type) , intent(in) :: glc2lnd_inst
type(temperature_type) , intent(in) :: temperature_inst
type(soilhydrology_type) , intent(inout) :: soilhydrology_inst
type(soilstate_type) , intent(inout) :: soilstate_inst
type(waterstatebulk_type) , intent(inout) :: waterstatebulk_inst
type(waterdiagnosticbulk_type) , intent(inout) :: waterdiagnosticbulk_inst
- type(waterbalance_type) , intent(inout) :: waterbalancebulk_inst
+ type(waterbalance_type) , intent(inout) :: waterbalancebulk_inst
type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
- type(wateratm2lndbulk_type) , intent(inout) :: wateratm2lndbulk_inst
+ type(wateratm2lndbulk_type) , intent(inout) :: wateratm2lndbulk_inst
type(glacier_smb_type) , intent(in) :: glacier_smb_inst
!
! !LOCAL VARIABLES:
@@ -112,6 +113,7 @@ subroutine HydrologyDrainage(bounds, &
qflx_surf => waterfluxbulk_inst%qflx_surf_col , & ! surface runoff (mm H2O /s)
qflx_infl => waterfluxbulk_inst%qflx_infl_col , & ! infiltration (mm H2O /s)
qflx_qrgwl => waterfluxbulk_inst%qflx_qrgwl_col , & ! qflx_surf at glaciers, wetlands, lakes
+ qflx_latflow_out => waterfluxbulk_inst%qflx_latflow_out_col , & ! lateral subsurface flow
qflx_runoff => waterfluxbulk_inst%qflx_runoff_col , & ! total runoff (qflx_drain+qflx_surf+qflx_qrgwl) (mm H2O /s)
qflx_runoff_u => waterfluxbulk_inst%qflx_runoff_u_col , & ! Urban total runoff (qflx_drain+qflx_surf) (mm H2O /s)
qflx_runoff_r => waterfluxbulk_inst%qflx_runoff_r_col , & ! Rural total runoff (qflx_drain+qflx_surf+qflx_qrgwl) (mm H2O /s)
@@ -135,16 +137,26 @@ subroutine HydrologyDrainage(bounds, &
else
call PerchedLateralFlow(bounds, num_hydrologyc, filter_hydrologyc, &
- num_urbanc, filter_urbanc,&
- soilhydrology_inst, soilstate_inst, &
- waterstatebulk_inst, waterfluxbulk_inst)
-
+ soilhydrology_inst, soilstate_inst, &
+ waterstatebulk_inst, waterfluxbulk_inst, &
+ wateratm2lndbulk_inst)
+ call SubsurfaceLateralFlow(bounds, &
+ num_hydrologyc, filter_hydrologyc, &
+ num_urbanc, filter_urbanc,&
+ soilhydrology_inst, soilstate_inst, &
+ waterstatebulk_inst, waterfluxbulk_inst, &
+ wateratm2lndbulk_inst)
+
+ if (use_hillslope_routing) then
+ call HillslopeStreamOutflow(bounds,&
+ waterstatebulk_inst, waterfluxbulk_inst, &
+ streamflow_method=streamflow_manning)
+
+ call HillslopeUpdateStreamWater(bounds, &
+ waterstatebulk_inst, waterfluxbulk_inst, &
+ waterdiagnosticbulk_inst)
+ endif
- call LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
- num_urbanc, filter_urbanc,&
- soilhydrology_inst, soilstate_inst, &
- waterstatebulk_inst, waterfluxbulk_inst)
-
endif
do j = 1, nlevgrnd
@@ -182,6 +194,7 @@ subroutine HydrologyDrainage(bounds, &
if (lun%itype(l)==istwet .or. lun%itype(l)==istice) then
+ qflx_latflow_out(c) = 0._r8
qflx_drain(c) = 0._r8
qflx_drain_perched(c) = 0._r8
qflx_surf(c) = 0._r8
diff --git a/src/biogeophys/SaturatedExcessRunoffMod.F90 b/src/biogeophys/SaturatedExcessRunoffMod.F90
index 309d251460..5643a95394 100644
--- a/src/biogeophys/SaturatedExcessRunoffMod.F90
+++ b/src/biogeophys/SaturatedExcessRunoffMod.F90
@@ -233,10 +233,8 @@ subroutine SaturatedExcessRunoff (this, bounds, num_hydrologyc, filter_hydrology
qflx_sat_excess_surf => waterfluxbulk_inst%qflx_sat_excess_surf_col, & ! Output: [real(r8) (:) ] surface runoff due to saturated surface (mm H2O /s)
qflx_floodc => waterfluxbulk_inst%qflx_floodc_col , & ! Input: [real(r8) (:) ] column flux of flood water from RTM
- qflx_rain_plus_snomelt => waterfluxbulk_inst%qflx_rain_plus_snomelt_col , & ! Input: [real(r8) (:) ] rain plus snow melt falling on the soil (mm/s)
+ qflx_rain_plus_snomelt => waterfluxbulk_inst%qflx_rain_plus_snomelt_col & ! Input: [real(r8) (:) ] rain plus snow melt falling on the soil (mm/s)
- origflag => soilhydrology_inst%origflag , & ! Input: logical
- fracice => soilhydrology_inst%fracice_col & ! Input: [real(r8) (:,:) ] fractional impermeability (-)
)
! ------------------------------------------------------------------------
@@ -275,29 +273,14 @@ subroutine SaturatedExcessRunoff (this, bounds, num_hydrologyc, filter_hydrology
! qflx_rain_plus_snomelt in control
! ------------------------------------------------------------------------
- if (origflag == 1) then
- if (this%fsat_method == FSAT_METHOD_VIC) then
- ! NOTE(wjs, 2017-07-12) I'm not sure if it's the VIC fsat method per se that
- ! is incompatible with origflag, or some other aspect of VIC: The original
- ! check was for origflag == 1 and use_vichydro, which also appears in error
- ! checks elsewhere.
- call endrun(msg="VICHYDRO is not available for origflag=1"//errmsg(sourcefile, __LINE__))
- end if
- do fc = 1, num_hydrologyc
- c = filter_hydrologyc(fc)
- fcov(c) = (1._r8 - fracice(c,1)) * fsat(c) + fracice(c,1)
- qflx_sat_excess_surf(c) = fcov(c) * qflx_rain_plus_snomelt(c)
- end do
- else
- do fc = 1, num_hydrologyc
- c = filter_hydrologyc(fc)
- ! only send fast runoff directly to streams
- qflx_sat_excess_surf(c) = fsat(c) * qflx_rain_plus_snomelt(c)
-
- ! Set fcov just to have it on the history file
- fcov(c) = fsat(c)
- end do
- end if
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ ! only send fast runoff directly to streams
+ qflx_sat_excess_surf(c) = fsat(c) * qflx_rain_plus_snomelt(c)
+
+ ! Set fcov just to have it on the history file
+ fcov(c) = fsat(c)
+ end do
! ------------------------------------------------------------------------
! For urban columns, send flood water flux to runoff
diff --git a/src/biogeophys/SoilHydrologyMod.F90 b/src/biogeophys/SoilHydrologyMod.F90
index 4bc6a784de..5a4aa50f6e 100644
--- a/src/biogeophys/SoilHydrologyMod.F90
+++ b/src/biogeophys/SoilHydrologyMod.F90
@@ -10,6 +10,7 @@ module SoilHydrologyMod
use abortutils , only : endrun
use decompMod , only : bounds_type, subgrid_level_column
use clm_varctl , only : iulog, use_vichydro
+ use clm_varcon , only : ispval
use clm_varcon , only : denh2o, denice, rpi
use clm_varcon , only : pondmx_urban
use clm_varpar , only : nlevsoi, nlevgrnd, nlayer, nlayert
@@ -31,7 +32,8 @@ module SoilHydrologyMod
use TemperatureType , only : temperature_type
use LandunitType , only : lun
use ColumnType , only : column_type, col
- use PatchType , only : patch
+ use PatchType , only : patch
+
!
! !PUBLIC TYPES:
implicit none
@@ -51,7 +53,7 @@ module SoilHydrologyMod
public :: PerchedWaterTable ! Calculate perched water table
public :: PerchedLateralFlow ! Calculate lateral flow from perched saturated zone
public :: ThetaBasedWaterTable ! Calculate water table from soil moisture state
- public :: LateralFlowPowerLaw ! Calculate lateral flow based on power law drainage function
+ public :: SubsurfaceLateralFlow ! Calculate subsurface lateral flow from saturated zone
public :: RenewCondensation ! Misc. corrections
public :: CalcIrrigWithdrawals ! Calculate irrigation withdrawals from groundwater by layer
public :: WithdrawGroundwaterIrrigation ! Remove groundwater irrigation from unconfined and confined aquifers
@@ -63,17 +65,112 @@ module SoilHydrologyMod
real(r8) :: perched_baseflow_scalar ! Scalar multiplier for perched base flow rate (kg/m2/s)
real(r8) :: e_ice ! Soil ice impedance factor (unitless)
end type params_type
- type(params_type), private :: params_inst
+ type(params_type), public :: params_inst
!-----------------------------------------------------------------------
real(r8), private :: baseflow_scalar = 1.e-2_r8
real(r8), parameter :: tolerance = 1.e-12_r8 ! tolerance for checking whether sublimation is greater than ice in top soil layer
+ integer, private :: head_gradient_method ! Method for calculating hillslope saturated head gradient
+ integer, private :: transmissivity_method ! Method for calculating transmissivity of hillslope columns
+
+ ! Head gradient methods
+ integer, parameter, private :: kinematic = 0
+ integer, parameter, private :: darcy = 1
+ ! Transmissivity methods
+ integer, parameter, private :: uniform_transmissivity = 0
+ integer, parameter, private :: layersum = 1
+
character(len=*), parameter, private :: sourcefile = &
__FILE__
contains
+ !-----------------------------------------------------------------------
+ subroutine hillslope_hydrology_ReadNML(NLFilename)
+ !
+ ! DESCRIPTION
+ ! read in hillslope hydrology namelist variables related to
+ ! subsurface lateral flow
+ !
+ ! !USES:
+ use abortutils , only : endrun
+ use fileutils , only : getavu, relavu
+ use spmdMod , only : mpicom, masterproc
+ use shr_mpi_mod , only : shr_mpi_bcast
+ use clm_varctl , only : iulog
+ use clm_nlUtilsMod , only : find_nlgroup_name
+
+ ! !ARGUMENTS:
+ implicit none
+ character(len=*), intent(in) :: NLFilename ! Namelist filename
+ !--------------------------------------------------------------------
+ integer :: nu_nml ! unit for namelist file
+ integer :: nml_error ! namelist i/o error flag
+ character(len=*), parameter :: nmlname = 'hillslope_hydrology_inparm'
+ character(*), parameter :: subName = "('hillslope_hydrology_ReadNML')"
+ character(len=50) :: hillslope_head_gradient_method = 'Darcy' ! head gradient method string
+ character(len=50) :: hillslope_transmissivity_method = 'LayerSum' ! transmissivity method string
+ !-----------------------------------------------------------------------
+
+! MUST agree with name in namelist and read statement
+ namelist /hillslope_hydrology_inparm/ &
+ hillslope_head_gradient_method, &
+ hillslope_transmissivity_method
+
+ ! Default values for namelist
+ head_gradient_method = darcy
+ transmissivity_method = layersum
+
+ ! Read hillslope hydrology namelist
+ if (masterproc) then
+ nu_nml = getavu()
+ open( nu_nml, file=trim(NLFilename), status='old', iostat=nml_error )
+ call find_nlgroup_name(nu_nml, 'hillslope_hydrology_inparm', status=nml_error)
+ if (nml_error == 0) then
+ read(nu_nml, nml=hillslope_hydrology_inparm,iostat=nml_error)
+ if (nml_error /= 0) then
+ call endrun(subname // ':: ERROR reading hillslope hydrology namelist')
+ end if
+ else
+ call endrun(subname // ':: ERROR reading hillslope hydrology namelist')
+ end if
+ close(nu_nml)
+ call relavu( nu_nml )
+
+ ! Convert namelist strings to numerical values
+ if ( trim(hillslope_head_gradient_method) == 'Kinematic' ) then
+ head_gradient_method = kinematic
+ else if ( trim(hillslope_head_gradient_method) == 'Darcy' ) then
+ head_gradient_method = darcy
+ else
+ call endrun(msg="ERROR bad value for hillslope_head_gradient_method in "//nmlname//"namelist"//errmsg(sourcefile, __LINE__))
+ end if
+
+ if ( trim(hillslope_transmissivity_method) == 'Uniform' ) then
+ transmissivity_method = uniform_transmissivity
+ else if ( trim(hillslope_transmissivity_method) == 'LayerSum') then
+ transmissivity_method = layersum
+ else
+ call endrun(msg="ERROR bad value for hillslope_transmissivity_method in "//nmlname//"namelist"//errmsg(sourcefile, __LINE__))
+ end if
+
+ endif
+
+ call shr_mpi_bcast(head_gradient_method, mpicom)
+ call shr_mpi_bcast(transmissivity_method, mpicom)
+
+ if (masterproc) then
+
+ write(iulog,*) ' '
+ write(iulog,*) 'hillslope_hydrology lateral flow settings:'
+ write(iulog,*) ' hillslope_head_gradient_method = ',hillslope_head_gradient_method
+ write(iulog,*) ' hillslope_transmissivity_method = ',hillslope_transmissivity_method
+
+ endif
+
+ end subroutine hillslope_hydrology_ReadNML
+
!-----------------------------------------------------------------------
subroutine readParams( ncid )
!
@@ -157,6 +254,8 @@ subroutine soilHydReadNML( NLFilename )
end subroutine soilhydReadNML
+
+
!-----------------------------------------------------------------------
subroutine SetSoilWaterFractions(bounds, num_hydrologyc, filter_hydrologyc, &
soilhydrology_inst, soilstate_inst, waterstatebulk_inst)
@@ -193,10 +292,7 @@ subroutine SetSoilWaterFractions(bounds, num_hydrologyc, filter_hydrologyc, &
h2osoi_liq => waterstatebulk_inst%h2osoi_liq_col , & ! Input: [real(r8) (:,:) ] liquid water (kg/m2)
h2osoi_ice => waterstatebulk_inst%h2osoi_ice_col , & ! Input: [real(r8) (:,:) ] ice water (kg/m2)
excess_ice => waterstatebulk_inst%excess_ice_col , & ! Input: [real(r8) (:,:) ] excess ice (kg/m2)
-
- origflag => soilhydrology_inst%origflag , & ! Input: logical
- icefrac => soilhydrology_inst%icefrac_col , & ! Output: [real(r8) (:,:) ]
- fracice => soilhydrology_inst%fracice_col & ! Output: [real(r8) (:,:) ] fractional impermeability (-)
+ icefrac => soilhydrology_inst%icefrac_col & ! Output: [real(r8) (:,:) ]
)
do j = 1,nlevsoi
@@ -210,15 +306,6 @@ subroutine SetSoilWaterFractions(bounds, num_hydrologyc, filter_hydrologyc, &
eff_porosity(c,j) = max(0.01_r8,watsat(c,j)-vol_ice(c,j))
icefrac(c,j) = min(1._r8,vol_ice(c,j)/watsat(c,j))
- ! fracice is only used in code with origflag == 1. For this calculation, we use
- ! the version of icefrac that was used in this original hydrology code.
- if (h2osoi_ice(c,j) == 0._r8) then
- ! Avoid possible divide by zero (in case h2osoi_liq(c,j) is also 0)
- icefrac_orig = 0._r8
- else
- icefrac_orig = min(1._r8,h2osoi_ice(c,j)/(h2osoi_ice(c,j)+h2osoi_liq(c,j)))
- end if
- fracice(c,j) = max(0._r8,exp(-3._r8*(1._r8-icefrac_orig))- exp(-3._r8))/(1.0_r8-exp(-3._r8))
end do
end do
@@ -601,7 +688,6 @@ subroutine WaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
real(r8) :: xs(bounds%begc:bounds%endc) ! water needed to bring soil moisture to watmin (mm)
real(r8) :: dzmm(bounds%begc:bounds%endc,1:nlevsoi) ! layer thickness (mm)
integer :: jwt(bounds%begc:bounds%endc) ! index of the soil layer right above the water table (-)
- real(r8) :: rsub_bot(bounds%begc:bounds%endc) ! subsurface runoff - bottom drainage (mm/s)
real(r8) :: rsub_top(bounds%begc:bounds%endc) ! subsurface runoff - topographic control (mm/s)
real(r8) :: xsi(bounds%begc:bounds%endc) ! excess soil water above saturation at layer i (mm)
real(r8) :: rous ! aquifer yield (-)
@@ -610,7 +696,6 @@ subroutine WaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
real(r8) :: s_node ! soil wetness (-)
real(r8) :: dzsum ! summation of dzmm of layers below water table (mm)
real(r8) :: icefracsum ! summation of icefrac*dzmm of layers below water table (-)
- real(r8) :: fracice_rsub(bounds%begc:bounds%endc) ! fractional impermeability of soil layers (-)
real(r8) :: ka ! hydraulic conductivity of the aquifer (mm/s)
real(r8) :: available_h2osoi_liq ! available soil liquid water in a layer
real(r8) :: imped
@@ -657,7 +742,6 @@ subroutine WaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
frost_table => soilhydrology_inst%frost_table_col , & ! Output: [real(r8) (:) ] frost table depth (m)
wa => waterstatebulk_inst%wa_col , & ! Output: [real(r8) (:) ] water in the unconfined aquifer (mm)
qcharge => soilhydrology_inst%qcharge_col , & ! Input: [real(r8) (:) ] aquifer recharge rate (mm/s)
- origflag => soilhydrology_inst%origflag , & ! Input: logical
qflx_drain => waterfluxbulk_inst%qflx_drain_col , & ! Output: [real(r8) (:) ] sub-surface runoff (mm H2O /s)
qflx_drain_perched => waterfluxbulk_inst%qflx_drain_perched_col , & ! Output: [real(r8) (:) ] perched wt sub-surface runoff (mm H2O /s)
@@ -792,8 +876,7 @@ subroutine WaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
!=================== water table above frost table =============================
! if water table is above frost table, do not use topmodel baseflow formulation
- if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz &
- .and. origflag == 0) then
+ if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz) then
else
!=================== water table below frost table =============================
!-- compute possible perched water table *and* groundwater table afterwards
@@ -865,7 +948,6 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
real(r8) :: xs(bounds%begc:bounds%endc) ! water needed to bring soil moisture to watmin (mm)
real(r8) :: dzmm(bounds%begc:bounds%endc,1:nlevsoi) ! layer thickness (mm)
integer :: jwt(bounds%begc:bounds%endc) ! index of the soil layer right above the water table (-)
- real(r8) :: rsub_bot(bounds%begc:bounds%endc) ! subsurface runoff - bottom drainage (mm/s)
real(r8) :: rsub_top(bounds%begc:bounds%endc) ! subsurface runoff - topographic control (mm/s)
real(r8) :: fff(bounds%begc:bounds%endc) ! decay factor (m-1)
real(r8) :: xsi(bounds%begc:bounds%endc) ! excess soil water above saturation at layer i (mm)
@@ -880,7 +962,6 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
real(r8) :: s_node ! soil wetness (-)
real(r8) :: dzsum ! summation of dzmm of layers below water table (mm)
real(r8) :: icefracsum ! summation of icefrac*dzmm of layers below water table (-)
- real(r8) :: fracice_rsub(bounds%begc:bounds%endc) ! fractional impermeability of soil layers (-)
real(r8) :: ka ! hydraulic conductivity of the aquifer (mm/s)
real(r8) :: dza ! fff*(zwt-z(jwt)) (-)
real(r8) :: available_h2osoi_liq ! available soil liquid water in a layer
@@ -943,7 +1024,6 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
wa => waterstatebulk_inst%wa_col , & ! Input: [real(r8) (:) ] water in the unconfined aquifer (mm)
ice => soilhydrology_inst%ice_col , & ! Input: [real(r8) (:,:) ] soil layer moisture (mm)
qcharge => soilhydrology_inst%qcharge_col , & ! Input: [real(r8) (:) ] aquifer recharge rate (mm/s)
- origflag => soilhydrology_inst%origflag , & ! Input: logical
h2osfcflag => soilhydrology_inst%h2osfcflag , & ! Input: integer
qflx_snwcp_liq => waterfluxbulk_inst%qflx_snwcp_liq_col , & ! Output: [real(r8) (:) ] excess liquid h2o due to snow capping (outgoing) (mm H2O /s) [+]
@@ -981,11 +1061,8 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
do fc = 1, num_hydrologyc
c = filter_hydrologyc(fc)
qflx_drain(c) = 0._r8
- rsub_bot(c) = 0._r8
qflx_rsub_sat(c) = 0._r8
rsub_top(c) = 0._r8
- fracice_rsub(c) = 0._r8
-
end do
! The layer index of the first unsaturated layer, i.e., the layer right above
@@ -1039,8 +1116,7 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
!=================== water table above frost table =============================
! if water table is above frost table, do not use topmodel baseflow formulation
- if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz &
- .and. origflag == 0) then
+ if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz) then
! compute drainage from perched saturated region
wtsub = 0._r8
q_perch = 0._r8
@@ -1130,9 +1206,6 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
qflx_drain_perched(c) = q_perch_max * q_perch &
*(frost_table(c) - zwt_perched(c))
- ! no perched water table drainage if using original formulation
- if(origflag == 1) qflx_drain_perched(c) = 0._r8
-
! remove drainage from perched saturated layers
rsub_top_tot = - qflx_drain_perched(c) * dtime
do k = k_perch+1, k_frz
@@ -1168,25 +1241,15 @@ subroutine Drainage(bounds, num_hydrologyc, filter_hydrologyc, num_urbanc, filte
icefracsum = icefracsum + icefrac(c,j) * dzmm(c,j)
end do
! add ice impedance factor to baseflow
- if(origflag == 1) then
- if (use_vichydro) then
- call endrun(msg="VICHYDRO is not available for origflag=1"//errmsg(sourcefile, __LINE__))
- else
- fracice_rsub(c) = max(0._r8,exp(-3._r8*(1._r8-(icefracsum/dzsum))) &
- - exp(-3._r8))/(1.0_r8-exp(-3._r8))
- imped=(1._r8 - fracice_rsub(c))
- rsub_top_max = 5.5e-3_r8
- end if
+ if (use_vichydro) then
+ imped=10._r8**(-params_inst%e_ice*min(1.0_r8,ice(c,nlayer)/max_moist(c,nlayer)))
+ dsmax_tmp(c) = Dsmax(c) * dtime/ secspday !mm/day->mm/dtime
+ rsub_top_max = dsmax_tmp(c)
else
- if (use_vichydro) then
- imped=10._r8**(-params_inst%e_ice*min(1.0_r8,ice(c,nlayer)/max_moist(c,nlayer)))
- dsmax_tmp(c) = Dsmax(c) * dtime/ secspday !mm/day->mm/dtime
- rsub_top_max = dsmax_tmp(c)
- else
- imped=10._r8**(-params_inst%e_ice*(icefracsum/dzsum))
- rsub_top_max = 10._r8 * sin((rpi/180.) * col%topo_slope(c))
- end if
- endif
+ imped=10._r8**(-params_inst%e_ice*(icefracsum/dzsum))
+ rsub_top_max = 10._r8 * sin((rpi/180.) * col%topo_slope(c))
+ end if
+
if (use_vichydro) then
! ARNO model for the bottom soil layer (based on bottom soil layer
! moisture from previous time step
@@ -1525,7 +1588,7 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
! Calculate watertable, considering aquifer recharge but no drainage.
!
! !USES:
- use clm_varcon , only : pondmx, tfrz, watmin,denice,denh2o
+ use clm_varcon , only : tfrz, denice, denh2o
use column_varcon , only : icol_roof, icol_road_imperv
!
! !ARGUMENTS:
@@ -1537,19 +1600,15 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
type(soilhydrology_type) , intent(inout) :: soilhydrology_inst
type(soilstate_type) , intent(in) :: soilstate_inst
type(temperature_type) , intent(in) :: temperature_inst
- type(waterstatebulk_type) , intent(inout) :: waterstatebulk_inst
- type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
+ type(waterstatebulk_type), intent(inout) :: waterstatebulk_inst
+ type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
!
! !LOCAL VARIABLES:
- integer :: c,j,fc,i ! indices
- real(r8) :: s_y
- integer :: k,k_frz,k_perch,k_zwt
- real(r8) :: sat_lev
- real(r8) :: s1
- real(r8) :: s2
- real(r8) :: m
- real(r8) :: b
- integer :: sat_flag
+ integer :: c,j,fc,i ! indices
+ integer :: k,k_frz,k_perch,k_zwt ! indices
+ real(r8) :: s1, s2 ! temporary moisture values
+ real(r8) :: m, b ! slope and intercept
+ real(r8), parameter :: sat_lev = 0.9 ! saturation value used to identify saturated layers
!-----------------------------------------------------------------------
associate( &
@@ -1564,8 +1623,7 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
watsat => soilstate_inst%watsat_col , & ! Input: [real(r8) (:,:) ] volumetric soil water at saturation (porosity)
zwt => soilhydrology_inst%zwt_col , & ! Output: [real(r8) (:) ] water table depth (m)
zwt_perched => soilhydrology_inst%zwt_perched_col , & ! Output: [real(r8) (:) ] perched water table depth (m)
- frost_table => soilhydrology_inst%frost_table_col , & ! Output: [real(r8) (:) ] frost table depth (m)
- origflag => soilhydrology_inst%origflag & ! Input: logical
+ frost_table => soilhydrology_inst%frost_table_col & ! Output: [real(r8) (:) ] frost table depth (m)
)
! calculate perched water table location
@@ -1594,16 +1652,13 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
!======= water table above frost table ===================
! if water table is above frost table, do nothing
- if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz &
- .and. origflag == 0) then
+ if (zwt(c) < frost_table(c) .and. t_soisno(c,k_frz) <= tfrz) then
else if (k_frz > 1) then
!========== water table below frost table ============
! locate perched water table from bottom up starting at
! frost table sat_lev is an arbitrary saturation level
! used to determine perched water table
- sat_lev = 0.9
-
k_perch = 1
do k=k_frz,1,-1
h2osoi_vol(c,k) = h2osoi_liq(c,k)/(dz(c,k)*denh2o) &
@@ -1619,7 +1674,7 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
! and only compute perched water table if frozen
if (t_soisno(c,k_frz) > tfrz) k_perch=k_frz
- ! if perched water table exists
+ ! if perched water table exists above frost table,
! interpolate between k_perch and k_perch+1 to find
! perched water table height
if (k_frz > k_perch) then
@@ -1635,8 +1690,7 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
b=z(c,k_perch+1)-m*s2
zwt_perched(c)=max(0._r8,m*sat_lev+b)
endif
-
- endif !k_frz > k_perch
+ endif
endif
end do
@@ -1644,48 +1698,53 @@ subroutine PerchedWaterTable(bounds, num_hydrologyc, filter_hydrologyc, &
end subroutine PerchedWaterTable
-!#4
+!#4
!-----------------------------------------------------------------------
- subroutine PerchedLateralFlow(bounds, num_hydrologyc, filter_hydrologyc, &
- num_urbanc, filter_urbanc, soilhydrology_inst, soilstate_inst, &
- waterstatebulk_inst, waterfluxbulk_inst)
+ subroutine PerchedLateralFlow(bounds, num_hydrologyc, &
+ filter_hydrologyc, soilhydrology_inst, soilstate_inst, &
+ waterstatebulk_inst, waterfluxbulk_inst, wateratm2lndbulk_inst)
!
! !DESCRIPTION:
! Calculate subsurface drainage from perched saturated zone
!
! !USES:
use clm_varcon , only : pondmx, tfrz, watmin,rpi, secspday, nlvic
- use column_varcon , only : icol_roof, icol_road_imperv, icol_road_perv
+ use LandunitType , only : lun
+ use landunit_varcon , only : istsoil
+ use clm_varctl , only : use_hillslope_routing
!
! !ARGUMENTS:
- type(bounds_type) , intent(in) :: bounds
- integer , intent(in) :: num_hydrologyc ! number of column soil points in column filter
- integer , intent(in) :: num_urbanc ! number of column urban points in column filter
- integer , intent(in) :: filter_urbanc(:) ! column filter for urban points
- integer , intent(in) :: filter_hydrologyc(:) ! column filter for soil points
- type(soilstate_type) , intent(in) :: soilstate_inst
- type(soilhydrology_type) , intent(inout) :: soilhydrology_inst
- type(waterstatebulk_type) , intent(inout) :: waterstatebulk_inst
- type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
+ type(bounds_type) , intent(in) :: bounds
+ integer , intent(in) :: num_hydrologyc ! number of column soil points in column filter
+ integer , intent(in) :: filter_hydrologyc(:) ! column filter for soil points
+ type(soilstate_type) , intent(in) :: soilstate_inst
+ type(soilhydrology_type) , intent(inout) :: soilhydrology_inst
+ type(waterstatebulk_type) , intent(inout) :: waterstatebulk_inst
+ type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
+ type(wateratm2lndbulk_type), intent(in) :: wateratm2lndbulk_inst
!
! !LOCAL VARIABLES:
- character(len=32) :: subname = 'PerchedLateralFlow' ! subroutine name
- integer :: c,j,fc,i ! indices
- real(r8) :: dtime ! land model time step (sec)
- real(r8) :: wtsub ! summation of hk*dzmm for layers below water table (mm**2/s)
- real(r8) :: h2osoi_vol
- real(r8) :: drainage_tot
- real(r8) :: drainage_layer
- real(r8) :: s_y
- integer :: k
- integer :: k_frost(bounds%begc:bounds%endc)
- integer :: k_perch(bounds%begc:bounds%endc)
- real(r8) :: sat_lev
- real(r8) :: s1, s2, m, b
- real(r8) :: q_perch
- real(r8) :: q_perch_max
- !-----------------------------------------------------------------------
+ character(len=32) :: subname = 'PerchedLateralFlowHillslope' ! subroutine name
+ integer :: c,fc,k,l,g ! indices
+ real(r8) :: dtime ! land model time step (sec)
+ real(r8) :: drainage_tot ! total amount of drainage to be removed from the column (mm/s)
+ real(r8) :: drainage_layer ! amount of drainage to be removed from current layer (mm/s)
+ real(r8) :: s_y ! specific yield (unitless)
+ integer :: k_frost(bounds%begc:bounds%endc) ! indices identifying frost table layer
+ integer :: k_perch(bounds%begc:bounds%endc) ! indices identifying perched water table layer
+ real(r8) :: wtsub ! temporary variable
+ real(r8) :: q_perch ! transmissivity (mm2/s)
+ real(r8) :: q_perch_max ! baseflow coefficient
+ real(r8) :: stream_water_depth ! depth of water in stream channel (m)
+ real(r8) :: stream_channel_depth ! depth of stream channel (m)
+
+ real(r8) :: transmis ! transmissivity (m2/s)
+ real(r8) :: head_gradient ! head gradient (m/m)
+ real(r8), parameter :: k_anisotropic = 1._r8 ! anisotropy factor
+ integer :: c0, c_src, c_dst ! indices
+ real(r8) :: qflx_drain_perched_vol(bounds%begc:bounds%endc) ! volumetric lateral subsurface flow through active layer [m3/s]
+ real(r8) :: qflx_drain_perched_out(bounds%begc:bounds%endc) ! lateral subsurface flow through active layer [mm/s]
associate( &
nbedrock => col%nbedrock , & ! Input: [real(r8) (:,:) ] depth to bedrock (m)
@@ -1700,7 +1759,11 @@ subroutine PerchedLateralFlow(bounds, num_hydrologyc, filter_hydrologyc, &
frost_table => soilhydrology_inst%frost_table_col , & ! Input: [real(r8) (:) ] frost table depth (m)
zwt => soilhydrology_inst%zwt_col , & ! Input: [real(r8) (:) ] water table depth (m)
zwt_perched => soilhydrology_inst%zwt_perched_col , & ! Input: [real(r8) (:) ] perched water table depth (m)
-
+ tdepth => wateratm2lndbulk_inst%tdepth_grc , & ! Input: [real(r8) (:) ] depth of water in tributary channels (m)
+ tdepth_bankfull => wateratm2lndbulk_inst%tdepthmax_grc , & ! Input: [real(r8) (:) ] bankfull depth of tributary channels (m)
+ stream_water_volume => waterstatebulk_inst%stream_water_volume_lun , & ! Input: [real(r8) (:) ] stream water volume (m3)
+
+
qflx_drain_perched => waterfluxbulk_inst%qflx_drain_perched_col , & ! Output: [real(r8) (:) ] perched wt sub-surface runoff (mm H2O /s)
h2osoi_liq => waterstatebulk_inst%h2osoi_liq_col , & ! Output: [real(r8) (:,:) ] liquid water (kg/m2)
@@ -1716,14 +1779,14 @@ subroutine PerchedLateralFlow(bounds, num_hydrologyc, filter_hydrologyc, &
c = filter_hydrologyc(fc)
k_frost(c) = nbedrock(c)
k_perch(c) = nbedrock(c)
- do k = 1, nbedrock(c)
+ do k = 1,nbedrock(c)
if (frost_table(c) >= zi(c,k-1) .and. frost_table(c) < zi(c,k)) then
k_frost(c) = k
exit
endif
enddo
- do k = 1, nbedrock(c)
+ do k = 1,nbedrock(c)
if (zwt_perched(c) >= zi(c,k-1) .and. zwt_perched(c) < zi(c,k)) then
k_perch(c) = k
exit
@@ -1734,48 +1797,183 @@ subroutine PerchedLateralFlow(bounds, num_hydrologyc, filter_hydrologyc, &
! compute drainage from perched saturated region
do fc = 1, num_hydrologyc
c = filter_hydrologyc(fc)
+ l = col%landunit(c)
+ g = col%gridcell(c)
+ qflx_drain_perched(c) = 0._r8
+ qflx_drain_perched_out(c) = 0._r8
+ qflx_drain_perched_vol(c) = 0._r8
- qflx_drain_perched(c) = 0._r8
if (frost_table(c) > zwt_perched(c)) then
+ ! Hillslope columns
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+
+ ! calculate head gradient
+
+ if (head_gradient_method == kinematic) then
+ ! kinematic wave approximation
+ head_gradient = col%hill_slope(c)
+ else if (head_gradient_method == darcy) then
+ ! darcy's law
+ if (col%cold(c) /= ispval) then
+ head_gradient = (col%hill_elev(c)-zwt_perched(c)) &
+ - (col%hill_elev(col%cold(c))-zwt_perched(col%cold(c)))
+ head_gradient = head_gradient / (col%hill_distance(c) - col%hill_distance(col%cold(c)))
+ else
+ if (use_hillslope_routing) then
+ stream_water_depth = stream_water_volume(l) &
+ /lun%stream_channel_length(l)/lun%stream_channel_width(l)
+ stream_channel_depth = lun%stream_channel_depth(l)
+ else
+ stream_water_depth = tdepth(g)
+ stream_channel_depth = tdepth_bankfull(g)
+ endif
+
+ ! flow between channel and lowest column
+ ! bankfull height is defined to be zero
+ head_gradient = (col%hill_elev(c)-zwt_perched(c)) &
+ ! ignore overbankfull storage
+ - max(min((stream_water_depth - stream_channel_depth),0._r8), &
+ (col%hill_elev(c)-frost_table(c)))
+
+ head_gradient = head_gradient / (col%hill_distance(c))
+
+ ! head_gradient cannot be negative when channel is empty
+ if (stream_water_depth <= 0._r8) then
+ head_gradient = max(head_gradient, 0._r8)
+ endif
+ endif
+ else
+ call endrun(msg="head_gradient_method must be kinematic or darcy"//errmsg(sourcefile, __LINE__))
+ endif
- ! specify maximum drainage rate
- q_perch_max = params_inst%perched_baseflow_scalar &
- * sin(col%topo_slope(c) * (rpi/180._r8))
+ ! Determine source and destination columns
+ if (head_gradient >= 0._r8) then
+ c_src = c
+ c_dst = col%cold(c)
+ else
+ c_src = col%cold(c)
+ c_dst = c
+ endif
- wtsub = 0._r8
- q_perch = 0._r8
- do k = k_perch(c), k_frost(c)-1
- q_perch = q_perch + hksat(c,k)*dz(c,k)
- wtsub = wtsub + dz(c,k)
- end do
- if (wtsub > 0._r8) q_perch = q_perch/wtsub
+ ! Calculate transmissivity of source column
+ transmis = 0._r8
+
+ if (transmissivity_method == layersum) then
+ if (head_gradient_method == kinematic) then
+ if(k_perch(c_src) < k_frost(c_src)) then
+ do k = k_perch(c_src), k_frost(c_src)-1
+ if(k == k_perch(c_src)) then
+ transmis = transmis + 1.e-3_r8*hksat(c_src,k)*(zi(c_src,k) - zwt_perched(c_src))
+ else
+ transmis = transmis + 1.e-3_r8*hksat(c_src,k)*dz(c_src,k)
+ endif
+ enddo
+ endif
+ else if (head_gradient_method == darcy) then
+ if(c_src == ispval) then
+ ! lowland, losing stream (c_src == ispval)
+ ! use hksat of c_dst for transmissivity
+ transmis = (1.e-3_r8*hksat(c,k_perch(c_dst)))*stream_water_depth
+ else
+ ! if k_perch equals k_frost, no perched saturated zone exists
+ if(k_perch(c_src) < k_frost(c_src)) then
+ do k = k_perch(c_src), k_frost(c_src)-1
+ if(k == k_perch(c_src)) then
+ transmis = transmis + 1.e-3_r8*hksat(c_src,k)*(zi(c_src,k) - zwt_perched(c_src))
+ else
+ if(c_dst == ispval) then
+ ! lowland, gaining stream
+ ! only include layers above stream channel bottom
+ if ((col%hill_elev(c_src)-z(c_src,k)) > (-stream_channel_depth)) then
+
+ transmis = transmis + 1.e-3_r8*hksat(c_src,k)*dz(c_src,k)
+ endif
+ else
+ ! uplands
+ ! only include layers above dst water table elevation
+ if ((col%hill_elev(c_src)-z(c_src,k)) > (col%hill_elev(c_dst) - zwt_perched(c_dst))) then
+
+ transmis = transmis + 1.e-3_r8*hksat(c_src,k)*dz(c_src,k)
+ endif
+ endif
+ endif
+ enddo
+ endif
+ endif
+ endif
+ else if (transmissivity_method == uniform_transmissivity) then
+ ! constant conductivity based on shallowest saturated layer hydraulic conductivity
+ transmis = (1.e-3_r8*hksat(c_src,k_perch(c_src))) &
+ *(zi(c_src,k_frost(c_src)) - zwt_perched(c_src) )
+ endif
- qflx_drain_perched(c) = q_perch_max * q_perch &
- *(frost_table(c) - zwt_perched(c))
+ ! adjust by 'anisotropy factor'
+ transmis = k_anisotropic*transmis
+
+ qflx_drain_perched_vol(c) = transmis*col%hill_width(c)*head_gradient
+ qflx_drain_perched_out(c) = 1.e3_r8*(qflx_drain_perched_vol(c)/col%hill_area(c))
+
+ else
+ ! Non-hillslope columns
+ ! specify maximum drainage rate
+ q_perch_max = params_inst%perched_baseflow_scalar &
+ * sin(col%topo_slope(c) * (rpi/180._r8))
+
+ wtsub = 0._r8
+ q_perch = 0._r8
+ ! this should be consistent with hillslope and k_perch=k_frost means no
+ ! saturated zone; should probably change q_perch to tranmis and change
+ ! units and q_perch_max
+ do k = k_perch(c), k_frost(c)-1
+ q_perch = q_perch + hksat(c,k)*dz(c,k)
+ wtsub = wtsub + dz(c,k)
+ end do
+ if (wtsub > 0._r8) q_perch = q_perch/wtsub
+
+ qflx_drain_perched_out(c) = q_perch_max * q_perch &
+ *(frost_table(c) - zwt_perched(c))
+ endif
endif
+
enddo
+ ! compute net drainage from perched saturated region
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ ! drainage-out
+ qflx_drain_perched(c) = qflx_drain_perched(c) + qflx_drain_perched_out(c)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ ! drainage-in
+ if (col%cold(c) /= ispval) then
+ qflx_drain_perched(col%cold(c)) = &
+ qflx_drain_perched(col%cold(c)) - &
+ 1.e3_r8*(qflx_drain_perched_vol(c))/col%hill_area(col%cold(c))
+ endif
+ endif
+ enddo
+
! remove drainage from soil moisture storage
do fc = 1, num_hydrologyc
c = filter_hydrologyc(fc)
! remove drainage from perched saturated layers
- drainage_tot = qflx_drain_perched(c) * dtime
-
+ drainage_tot = qflx_drain_perched(c) * dtime
+ ! ignore frozen layer (k_frost)
do k = k_perch(c), k_frost(c)-1
+
s_y = watsat(c,k) &
* ( 1. - (1.+1.e3*zwt_perched(c)/sucsat(c,k))**(-1./bsw(c,k)))
s_y=max(s_y,params_inst%aq_sp_yield_min)
-
- if (k == k_perch(c)) then
+ if (k==k_perch(c)) then
drainage_layer=min(drainage_tot,(s_y*(zi(c,k) - zwt_perched(c))*1.e3))
else
drainage_layer=min(drainage_tot,(s_y*(dz(c,k))*1.e3))
endif
-
+
drainage_layer=max(drainage_layer,0._r8)
drainage_tot = drainage_tot - drainage_layer
h2osoi_liq(c,k) = h2osoi_liq(c,k) - drainage_layer
+
enddo
! if drainage_tot is greater than available water
@@ -1886,17 +2084,24 @@ end subroutine ThetaBasedWaterTable
!#6
!-----------------------------------------------------------------------
- subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
+ subroutine SubsurfaceLateralFlow(bounds, &
+ num_hydrologyc, filter_hydrologyc, &
num_urbanc, filter_urbanc,soilhydrology_inst, soilstate_inst, &
- waterstatebulk_inst, waterfluxbulk_inst)
+ waterstatebulk_inst, waterfluxbulk_inst, wateratm2lndbulk_inst)
!
! !DESCRIPTION:
! Calculate subsurface drainage
!
! !USES:
- use clm_varcon , only : pondmx, watmin,rpi, secspday, nlvic
- use column_varcon , only : icol_roof, icol_road_imperv, icol_road_perv
- use GridcellType , only : grc
+ use clm_time_manager , only : get_step_size
+ use clm_varpar , only : nlevsoi, nlevgrnd, nlayer, nlayert
+ use clm_varctl , only : nhillslope
+ use clm_varcon , only : pondmx, watmin,rpi, secspday
+ use column_varcon , only : icol_road_perv
+ use abortutils , only : endrun
+ use GridcellType , only : grc
+ use landunit_varcon , only : istsoil, istcrop
+ use clm_varctl , only : use_hillslope_routing
!
! !ARGUMENTS:
@@ -1906,47 +2111,46 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
integer , intent(in) :: filter_urbanc(:) ! column filter for urban points
integer , intent(in) :: filter_hydrologyc(:) ! column filter for soil points
type(soilstate_type) , intent(in) :: soilstate_inst
+ type(wateratm2lndbulk_type) , intent(in) :: wateratm2lndbulk_inst
type(soilhydrology_type) , intent(inout) :: soilhydrology_inst
- type(waterstatebulk_type) , intent(inout) :: waterstatebulk_inst
- type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
+ type(waterstatebulk_type), intent(inout) :: waterstatebulk_inst
+ type(waterfluxbulk_type) , intent(inout) :: waterfluxbulk_inst
+
!
! !LOCAL VARIABLES:
- character(len=32) :: subname = 'Drainage' ! subroutine name
- integer :: c,j,fc,i ! indices
+ character(len=32) :: subname = 'SubsurfaceLateralFlow' ! subroutine name
+ integer :: c,j,fc,i,l,g ! indices
real(r8) :: dtime ! land model time step (sec)
real(r8) :: xs(bounds%begc:bounds%endc) ! water needed to bring soil moisture to watmin (mm)
real(r8) :: dzmm(bounds%begc:bounds%endc,1:nlevsoi) ! layer thickness (mm)
integer :: jwt(bounds%begc:bounds%endc) ! index of the soil layer right above the water table (-)
- real(r8) :: rsub_bot(bounds%begc:bounds%endc) ! subsurface runoff - bottom drainage (mm/s)
- real(r8) :: rsub_top(bounds%begc:bounds%endc) ! subsurface runoff - topographic control (mm/s)
+ real(r8) :: drainage(bounds%begc:bounds%endc) ! subsurface drainage (mm/s)
real(r8) :: xsi(bounds%begc:bounds%endc) ! excess soil water above saturation at layer i (mm)
- real(r8) :: xsia(bounds%begc:bounds%endc) ! available pore space at layer i (mm)
real(r8) :: xs1(bounds%begc:bounds%endc) ! excess soil water above saturation at layer 1 (mm)
- real(r8) :: smpfz(1:nlevsoi) ! matric potential of layer right above water table (mm)
- real(r8) :: wtsub ! summation of hk*dzmm for layers below water table (mm**2/s)
real(r8) :: dzsum ! summation of dzmm of layers below water table (mm)
real(r8) :: icefracsum ! summation of icefrac*dzmm of layers below water table (-)
- real(r8) :: fracice_rsub(bounds%begc:bounds%endc) ! fractional impermeability of soil layers (-)
+ real(r8) :: ice_imped_col(bounds%begc:bounds%endc) ! column average hydraulic conductivity reduction due to presence of soil ice (-)
+ real(r8) :: ice_imped(bounds%begc:bounds%endc,1:nlevsoi) ! hydraulic conductivity reduction due to presence of soil ice (-)
real(r8) :: available_h2osoi_liq ! available soil liquid water in a layer
- real(r8) :: h2osoi_vol
- real(r8) :: imped
- real(r8) :: rsub_top_tot
- real(r8) :: rsub_top_layer
- real(r8) :: theta_unsat
- real(r8) :: f_unsat
- real(r8) :: s_y
- integer :: k
- real(r8) :: s1
- real(r8) :: s2
- real(r8) :: m
- real(r8) :: b
- real(r8) :: vol_ice
- real(r8) :: dsmax_tmp(bounds%begc:bounds%endc) ! temporary variable for ARNO subsurface runoff calculation
- real(r8) :: rsub_tmp ! temporary variable for ARNO subsurface runoff calculation
- real(r8) :: frac ! temporary variable for ARNO subsurface runoff calculation
- real(r8) :: rel_moist ! relative moisture, temporary variable
- real(r8) :: wtsub_vic ! summation of hk*dzmm for layers in the third VIC layer
- integer :: g
+ real(r8) :: h2osoi_vol ! volumetric water content (mm3/mm3)
+ real(r8) :: drainage_tot ! total drainage to be removed from column (mm)
+ real(r8) :: drainage_layer ! drainage to be removed from current layer (mm)
+ real(r8) :: s_y ! specific yield (unitless)
+ real(r8) :: vol_ice ! volumetric ice content (mm3/mm3)
+ logical, parameter :: no_lateral_flow = .false. ! flag for testing
+ real(r8) :: transmis ! transmissivity (m2/s)
+ real(r8) :: head_gradient ! hydraulic head gradient (m/m)
+ real(r8) :: stream_water_depth ! depth of water in stream channel (m)
+ real(r8) :: stream_channel_depth ! depth of stream channel (m)
+ real(r8) :: available_stream_water ! stream water (m3)
+ real(r8), parameter :: n_baseflow = 1 ! drainage power law exponent
+ real(r8), parameter :: k_anisotropic = 1._r8 ! anisotropy scalar
+ real(r8) :: qflx_latflow_out_vol(bounds%begc:bounds%endc) ! volumetric lateral flow (m3/s)
+ real(r8) :: qflx_net_latflow(bounds%begc:bounds%endc) ! net lateral flow in column (mm/s)
+ real(r8) :: qflx_latflow_avg(bounds%begc:bounds%endc) ! average lateral flow (mm/s)
+ real(r8) :: larea ! area of hillslope in landunit
+ integer :: c0, c_src, c_dst ! indices
+
!-----------------------------------------------------------------------
associate( &
@@ -1962,28 +2166,21 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
watsat => soilstate_inst%watsat_col , & ! Input: [real(r8) (:,:) ] volumetric soil water at saturation (porosity)
eff_porosity => soilstate_inst%eff_porosity_col , & ! Input: [real(r8) (:,:) ] effective porosity = porosity - vol_ice
hk_l => soilstate_inst%hk_l_col , & ! Input: [real(r8) (:,:) ] hydraulic conductivity (mm/s)
+ qflx_latflow_out => waterfluxbulk_inst%qflx_latflow_out_col, & ! Output: [real(r8) (:) ] lateral saturated outflow (mm/s)
+ qflx_latflow_in => waterfluxbulk_inst%qflx_latflow_in_col, & ! Output: [real(r8) (:) ] lateral saturated inflow (mm/s)
+ volumetric_discharge => waterfluxbulk_inst%volumetric_discharge_col , & ! Output: [real(r8) (:) ] discharge from column (m3/s)
+
+ tdepth => wateratm2lndbulk_inst%tdepth_grc , & ! Input: [real(r8) (:) ] depth of water in tributary channels (m)
+ tdepth_bankfull => wateratm2lndbulk_inst%tdepthmax_grc , & ! Input: [real(r8) (:) ] bankfull depth of tributary channels (m)
depth => soilhydrology_inst%depth_col , & ! Input: [real(r8) (:,:) ] VIC soil depth
- c_param => soilhydrology_inst%c_param_col , & ! Input: [real(r8) (:) ] baseflow exponent (Qb)
- Dsmax => soilhydrology_inst%dsmax_col , & ! Input: [real(r8) (:) ] max. velocity of baseflow (mm/day)
- max_moist => soilhydrology_inst%max_moist_col , & ! Input: [real(r8) (:,:) ] maximum soil moisture (ice + liq)
- moist => soilhydrology_inst%moist_col , & ! Input: [real(r8) (:,:) ] soil layer moisture (mm)
- Ds => soilhydrology_inst%ds_col , & ! Input: [real(r8) (:) ] fracton of Dsmax where non-linear baseflow begins
- Wsvic => soilhydrology_inst%Wsvic_col , & ! Input: [real(r8) (:) ] fraction of maximum soil moisutre where non-liear base flow occurs
icefrac => soilhydrology_inst%icefrac_col , & ! Output: [real(r8) (:,:) ] fraction of ice in layer
frost_table => soilhydrology_inst%frost_table_col , & ! Input: [real(r8) (:) ] frost table depth (m)
zwt => soilhydrology_inst%zwt_col , & ! Input: [real(r8) (:) ] water table depth (m)
- wa => waterstatebulk_inst%wa_col , & ! Input: [real(r8) (:) ] water in the unconfined aquifer (mm)
- ice => soilhydrology_inst%ice_col , & ! Input: [real(r8) (:,:) ] soil layer moisture (mm)
- qcharge => soilhydrology_inst%qcharge_col , & ! Input: [real(r8) (:) ] aquifer recharge rate (mm/s)
- origflag => soilhydrology_inst%origflag , & ! Input: logical
- h2osfcflag => soilhydrology_inst%h2osfcflag , & ! Input: integer
+ stream_water_volume => waterstatebulk_inst%stream_water_volume_lun, & ! Input: [real(r8) (:) ] stream water volume (m3)
qflx_snwcp_liq => waterfluxbulk_inst%qflx_snwcp_liq_col , & ! Output: [real(r8) (:) ] excess rainfall due to snow capping (mm H2O /s) [+]
qflx_ice_runoff_xs => waterfluxbulk_inst%qflx_ice_runoff_xs_col , & ! Output: [real(r8) (:) ] solid runoff from excess ice in soil (mm H2O /s) [+]
- qflx_liqdew_to_top_layer => waterfluxbulk_inst%qflx_liqdew_to_top_layer_col , & ! Output: [real(r8) (:) ] rate of liquid water deposited on top soil or snow layer (dew) (mm H2O /s) [+]
- qflx_soliddew_to_top_layer => waterfluxbulk_inst%qflx_soliddew_to_top_layer_col , & ! Output: [real(r8) (:) ] rate of solid water deposited on top soil or snow layer (frost) (mm H2O /s) [+]
- qflx_solidevap_from_top_layer => waterfluxbulk_inst%qflx_solidevap_from_top_layer_col, & ! Output: [real(r8) (:) ] rate of ice evaporated from top soil or snow layer (sublimation) (mm H2O /s) [+]
qflx_drain => waterfluxbulk_inst%qflx_drain_col , & ! Output: [real(r8) (:) ] sub-surface runoff (mm H2O /s)
qflx_qrgwl => waterfluxbulk_inst%qflx_qrgwl_col , & ! Output: [real(r8) (:) ] qflx_surf at glaciers, wetlands, lakes (mm H2O /s)
qflx_rsub_sat => waterfluxbulk_inst%qflx_rsub_sat_col , & ! Output: [real(r8) (:) ] soil saturation excess [mm h2o/s]
@@ -2003,7 +2200,8 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
dzmm(c,j) = dz(c,j)*1.e3_r8
vol_ice = min(watsat(c,j), h2osoi_ice(c,j)/(dz(c,j)*denice))
- icefrac(c,j) = min(1._r8,vol_ice/watsat(c,j))
+ icefrac(c,j) = min(1._r8,vol_ice/watsat(c,j))
+ ice_imped(c,j)=10._r8**(-params_inst%e_ice*icefrac(c,j))
end do
end do
@@ -2012,80 +2210,298 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
do fc = 1, num_hydrologyc
c = filter_hydrologyc(fc)
qflx_drain(c) = 0._r8
- rsub_bot(c) = 0._r8
qflx_rsub_sat(c) = 0._r8
- rsub_top(c) = 0._r8
- fracice_rsub(c) = 0._r8
- end do
-
- ! The layer index of the first unsaturated layer,
- ! i.e., the layer right above the water table
-
- do fc = 1, num_hydrologyc
- c = filter_hydrologyc(fc)
- jwt(c) = nlevsoi
- ! allow jwt to equal zero when zwt is in top layer
- do j = 1,nlevsoi
- if(zwt(c) <= zi(c,j)) then
- jwt(c) = j-1
- exit
- end if
- enddo
- end do
-
- !-- Topographic runoff -------------------------
- do fc = 1, num_hydrologyc
- c = filter_hydrologyc(fc)
-
- dzsum = 0._r8
- icefracsum = 0._r8
- do j = max(jwt(c),1), nlevsoi
- dzsum = dzsum + dzmm(c,j)
- icefracsum = icefracsum + icefrac(c,j) * dzmm(c,j)
- end do
- imped=10._r8**(-params_inst%e_ice*(icefracsum/dzsum))
- !@@
- ! baseflow is power law expression relative to bedrock layer
- if(zwt(c) <= zi(c,nbedrock(c))) then
- rsub_top(c) = imped * baseflow_scalar * tan(rpi/180._r8*col%topo_slope(c))* &
- (zi(c,nbedrock(c)) - zwt(c))**(params_inst%n_baseflow)
- else
- rsub_top(c) = 0._r8
- endif
-
- !-- Now remove water via rsub_top
- rsub_top_tot = - rsub_top(c)* dtime
-
- !should never be positive... but include for completeness
- if(rsub_top_tot > 0.) then !rising water table
-
- call endrun(subgrid_index=c, subgrid_level=subgrid_level_column, &
- msg="RSUB_TOP IS POSITIVE in Drainage!"//errmsg(sourcefile, __LINE__))
-
+ drainage(c) = 0._r8
+ qflx_latflow_in(c) = 0._r8
+ qflx_latflow_out(c) = 0._r8
+ qflx_net_latflow(c) = 0._r8
+ volumetric_discharge(c) = 0._r8
+ qflx_latflow_out_vol(c) = 0._r8
+ end do
+
+ ! The layer index of the first unsaturated layer,
+ ! i.e., the layer right above the water table
+
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ jwt(c) = nlevsoi
+ ! allow jwt to equal zero when zwt is in top layer
+ do j = 1,nlevsoi
+ if(zwt(c) <= zi(c,j)) then
+ jwt(c) = j-1
+ exit
+ end if
+ enddo
+ end do
+
+ ! Calculate ice impedance factor (after jwt calculated)
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ dzsum = 0._r8
+ icefracsum = 0._r8
+ do j = max(jwt(c),1), nlevsoi
+ dzsum = dzsum + dzmm(c,j)
+ icefracsum = icefracsum + icefrac(c,j) * dzmm(c,j)
+ end do
+ ice_imped_col(c)=10._r8**(-params_inst%e_ice*(icefracsum/dzsum))
+ enddo
+
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ l = col%landunit(c)
+ g = col%gridcell(c)
+ ! Hillslope columns
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+
+ ! method for calculating head gradient
+ if (head_gradient_method == kinematic) then
+ head_gradient = col%hill_slope(c)
+ else if (head_gradient_method == darcy) then
+ if (col%cold(c) /= ispval) then
+ head_gradient = (col%hill_elev(c)-zwt(c)) &
+ - (col%hill_elev(col%cold(c))-zwt(col%cold(c)))
+ head_gradient = head_gradient / (col%hill_distance(c) - col%hill_distance(col%cold(c)))
+ else
+ if (use_hillslope_routing) then
+ stream_water_depth = stream_water_volume(l) &
+ /lun%stream_channel_length(l)/lun%stream_channel_width(l)
+ stream_channel_depth = lun%stream_channel_depth(l)
+ else
+ stream_water_depth = tdepth(g)
+ stream_channel_depth = tdepth_bankfull(g)
+ endif
+
+ ! flow between channel and lowest column
+ ! bankfull height is defined to be zero
+ head_gradient = (col%hill_elev(c)-zwt(c)) &
+ ! ignore overbankfull storage
+ - min((stream_water_depth - stream_channel_depth),0._r8)
+
+ head_gradient = head_gradient / (col%hill_distance(c))
+ ! head_gradient cannot be negative when channel is empty
+ if (stream_water_depth <= 0._r8) then
+ head_gradient = max(head_gradient, 0._r8)
+ endif
+ ! add vertical drainage for losing streams
+ ! (this could be a separate term from lateral flow...)
+ if (head_gradient < 0._r8) then
+ ! head_gradient = head_gradient - 1._r8
+ ! adjust lateral gradient w/ k_anisotropic
+ head_gradient = head_gradient - 1._r8/k_anisotropic
+ endif
+ endif
+ else
+ call endrun(msg="head_gradient_method must be kinematic or darcy"//errmsg(sourcefile, __LINE__))
+ end if
+
+ !scs: in cases of bad data, where hand differences in
+ ! adjacent bins are very large, cap maximum head_gradient
+ ! should a warning be used instead?
+ head_gradient = min(max(head_gradient,-2._r8),2._r8)
+
+ ! Determine source and destination columns
+ if (head_gradient >= 0._r8) then
+ c_src = c
+ c_dst = col%cold(c)
+ else
+ c_src = col%cold(c)
+ c_dst = c
+ endif
+
+ ! Calculate transmissivity of source column
+ transmis = 0._r8
+ if(c_src /= ispval) then
+ ! transmissivity non-zero only when saturated conditions exist
+ if(zwt(c_src) <= zi(c_src,nbedrock(c_src))) then
+ ! sum of layer transmissivities
+ if (transmissivity_method == layersum) then
+ do j = jwt(c_src)+1, nbedrock(c_src)
+ if(j == jwt(c_src)+1) then
+ transmis = transmis + 1.e-3_r8*ice_imped(c_src,j)*hksat(c_src,j)*(zi(c_src,j) - zwt(c_src))
+ else
+ if(c_dst == ispval) then
+ ! lowland, gaining stream
+ ! only include layers above stream channel bottom
+ if ((col%hill_elev(c_src)-z(c_src,j)) > (-stream_channel_depth)) then
+
+ transmis = transmis + 1.e-3_r8*ice_imped(c_src,j)*hksat(c_src,j)*dz(c_src,j)
+ endif
+ else
+ ! uplands
+ if ((col%hill_elev(c_src)-z(c_src,j)) > (col%hill_elev(c_dst) - zwt(c_dst))) then
+ transmis = transmis + 1.e-3_r8*ice_imped(c_src,j)*hksat(c_src,j)*dz(c_src,j)
+ endif
+ endif
+ endif
+ end do
+ ! constant conductivity based on shallowest saturated layer hk
+ else if (transmissivity_method == uniform_transmissivity) then
+ transmis = (1.e-3_r8*ice_imped(c_src,jwt(c_src)+1)*hksat(c_src,jwt(c_src)+1)) &
+ *(zi(c_src,nbedrock(c_src)) - zwt(c_src) )
+ else
+ call endrun(msg="transmissivity_method must be LayerSum or Uniform"//errmsg(sourcefile, __LINE__))
+ endif
+ endif
+ else
+ ! transmissivity of losing stream (c_src == ispval)
+ transmis = (1.e-3_r8*ice_imped(c,jwt(c)+1)*hksat(c,jwt(c)+1))*stream_water_depth
+ endif
+ ! adjust transmissivity by 'anisotropy factor'
+ transmis = k_anisotropic*transmis
+
+ ! the qflx_latflow_out_vol calculations use the
+ ! transmissivity to determine whether saturated flow
+ ! conditions exist, b/c gradients will be nonzero
+ ! even when no saturated layers are present
+ ! qflx_latflow_out_vol(c) = ice_imped(c)*transmis*col%hill_width(c)*head_gradient
+ ! include ice impedance in transmissivity
+ qflx_latflow_out_vol(c) = transmis*col%hill_width(c)*head_gradient
+
+ ! When head gradient is negative (losing stream channel),
+ ! limit outflow by available stream channel water
+ if (use_hillslope_routing .and. (qflx_latflow_out_vol(c) < 0._r8)) then
+ available_stream_water = stream_water_volume(l)/lun%stream_channel_number(l)/nhillslope
+ if(abs(qflx_latflow_out_vol(c))*dtime > available_stream_water) then
+ qflx_latflow_out_vol(c) = -available_stream_water/dtime
+ endif
+ endif
+
+ ! volumetric_discharge from lowest column is qflx_latflow_out_vol
+ ! scaled by total area of column in gridcell divided by column area
+ if (col%cold(c) == ispval) then
+ volumetric_discharge(c) = qflx_latflow_out_vol(c) &
+ *(grc%area(g)*1.e6_r8*col%wtgcell(c)/col%hill_area(c))
+ endif
+
+ ! convert volumetric flow to equivalent flux
+ qflx_latflow_out(c) = 1.e3_r8*qflx_latflow_out_vol(c)/col%hill_area(c)
+
+ ! hilltop column has no inflow
+ if (col%colu(c) == ispval) then
+ qflx_latflow_in(c) = 0._r8
+ endif
+
+ ! current outflow is inflow to downhill column normalized by downhill area
+ if (col%cold(c) /= ispval) then
+ qflx_latflow_in(col%cold(c)) = qflx_latflow_in(col%cold(c)) + &
+ 1.e3_r8*qflx_latflow_out_vol(c)/col%hill_area(col%cold(c))
+ endif
+
+ else
+ ! Non-hillslope columns
+ ! baseflow is power law expression relative to bedrock layer
+ if(zwt(c) <= zi(c,nbedrock(c))) then
+ qflx_latflow_out(c) = ice_imped_col(c) * baseflow_scalar &
+ * tan(rpi/180._r8*col%topo_slope(c))* &
+ (zi(c,nbedrock(c)) - zwt(c))**(params_inst%n_baseflow)
+ endif
+ ! convert flux to volumetric flow
+ qflx_latflow_out_vol(c) = 1.e-3_r8*qflx_latflow_out(c)*(grc%area(g)*1.e6_r8*col%wtgcell(c))
+ volumetric_discharge(c) = qflx_latflow_out_vol(c)
+ endif
+ enddo
+
+ ! recalculate average flux for no-lateral flow case
+ if(no_lateral_flow) then
+ if (head_gradient_method /= kinematic) then
+ call endrun(msg="head_gradient_method must be kinematic for no_lateral_flow = .true.! "//errmsg(sourcefile, __LINE__))
+ endif
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ l = col%landunit(c)
+ !need to sum all columns w/ same hillslope id for each column
+ qflx_latflow_avg(c) = 0._r8
+ larea = 0._r8
+ do c0 = lun%coli(l), lun%colf(l)
+ if(col%hillslope_ndx(c0) == col%hillslope_ndx(c)) then
+ qflx_latflow_avg(c) = qflx_latflow_avg(c) + qflx_latflow_out_vol(c0)
+ larea = larea + col%hill_area(c0)
+ endif
+ enddo
+ qflx_latflow_avg(c) = 1.e3_r8*qflx_latflow_avg(c)/larea
+ else
+ qflx_latflow_avg(c) = qflx_latflow_out(c)
+ endif
+ enddo
+ endif
+
+ !-- Topographic runoff -------------------------
+ do fc = 1, num_hydrologyc
+ c = filter_hydrologyc(fc)
+
+ ! net lateral flow (positive out)
+ qflx_net_latflow(c) = qflx_latflow_out(c) - qflx_latflow_in(c)
+ if(no_lateral_flow) then
+ qflx_net_latflow(c) = qflx_latflow_avg(c)
+ endif
+
+ !@@
+ ! baseflow
+ if(zwt(c) <= zi(c,nbedrock(c))) then
+ ! apply net lateral flow here
+ drainage(c) = qflx_net_latflow(c)
+ else
+ drainage(c) = 0._r8
+ endif
+
+ !-- Now remove water via drainage
+ drainage_tot = - drainage(c) * dtime
+
+ if(drainage_tot > 0.) then !rising water table
+ do j = jwt(c)+1,1,-1
+
+ ! ensure water is not added to frozen layers
+ if (zi(c,j) < frost_table(c)) then
+ ! analytical expression for specific yield
+ s_y = watsat(c,j) &
+ * ( 1. - (1.+1.e3*zwt(c)/sucsat(c,j))**(-1./bsw(c,j)))
+ s_y=max(s_y,params_inst%aq_sp_yield_min)
+
+ drainage_layer=min(drainage_tot,(s_y*dz(c,j)*1.e3))
+
+ drainage_layer=max(drainage_layer,0._r8)
+ h2osoi_liq(c,j) = h2osoi_liq(c,j) + drainage_layer
+
+ drainage_tot = drainage_tot - drainage_layer
+
+ if (drainage_tot <= 0.) then
+ zwt(c) = zwt(c) - drainage_layer/s_y/1000._r8
+ exit
+ else
+ zwt(c) = zi(c,j-1)
+ endif
+ endif
+
+ enddo
+
+ !-- remove residual drainage --------------------------------
+ h2osfc(c) = h2osfc(c) + drainage_tot
+
else ! deepening water table
do j = jwt(c)+1, nbedrock(c)
- ! use analytical expression for specific yield
+ ! analytical expression for specific yield
s_y = watsat(c,j) &
* ( 1. - (1.+1.e3*zwt(c)/sucsat(c,j))**(-1./bsw(c,j)))
- s_y=max(s_y, params_inst%aq_sp_yield_min)
- rsub_top_layer=max(rsub_top_tot,-(s_y*(zi(c,j) - zwt(c))*1.e3))
- rsub_top_layer=min(rsub_top_layer,0._r8)
- h2osoi_liq(c,j) = h2osoi_liq(c,j) + rsub_top_layer
-
- rsub_top_tot = rsub_top_tot - rsub_top_layer
+ s_y=max(s_y,params_inst%aq_sp_yield_min)
+
+ drainage_layer=max(drainage_tot,-(s_y*(zi(c,j) - zwt(c))*1.e3))
+ drainage_layer=min(drainage_layer,0._r8)
+ h2osoi_liq(c,j) = h2osoi_liq(c,j) + drainage_layer
- if (rsub_top_tot >= 0.) then
- zwt(c) = zwt(c) - rsub_top_layer/s_y/1000._r8
+ drainage_tot = drainage_tot - drainage_layer
+ if (drainage_tot >= 0.) then
+ zwt(c) = zwt(c) - drainage_layer/s_y/1000._r8
exit
else
zwt(c) = zi(c,j)
endif
enddo
- !-- remove residual rsub_top --------------------------------
+ !-- remove residual drainage -----------------------
! make sure no extra water removed from soil column
- rsub_top(c) = rsub_top(c) + rsub_top_tot/dtime
+ drainage(c) = drainage(c) + drainage_tot/dtime
endif
zwt(c) = max(0.0_r8,zwt(c))
@@ -2100,7 +2516,7 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
c = filter_hydrologyc(fc)
xsi(c) = max(h2osoi_liq(c,j)-eff_porosity(c,j)*dzmm(c,j),0._r8)
h2osoi_liq(c,j) = min(eff_porosity(c,j)*dzmm(c,j), h2osoi_liq(c,j))
- h2osoi_liq(c,j-1) = h2osoi_liq(c,j-1) + xsi(c)
+ h2osoi_liq(c,j-1) = h2osoi_liq(c,j-1) + xsi(c)
end do
end do
@@ -2173,16 +2589,16 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
! Instead of removing water from aquifer where it eventually
! shows up as excess drainage to the ocean, take it back out of
! drainage
- qflx_rsub_sat(c) = qflx_rsub_sat(c) - xs(c)/dtime
+ qflx_rsub_sat(c) = qflx_rsub_sat(c) - xs(c)/dtime
end do
+
do fc = 1, num_hydrologyc
c = filter_hydrologyc(fc)
! Sub-surface runoff and drainage
-
- qflx_drain(c) = qflx_rsub_sat(c) + rsub_top(c)
+ qflx_drain(c) = qflx_rsub_sat(c) + drainage(c)
! Set imbalance for snow capping
@@ -2190,6 +2606,7 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
end do
+
! No drainage for urban columns (except for pervious road as computed above)
do fc = 1, num_urbanc
@@ -2203,7 +2620,7 @@ subroutine LateralFlowPowerLaw(bounds, num_hydrologyc, filter_hydrologyc, &
end associate
- end subroutine LateralFlowPowerLaw
+ end subroutine SubsurfaceLateralFlow
!#7
!-----------------------------------------------------------------------
diff --git a/src/biogeophys/SoilHydrologyType.F90 b/src/biogeophys/SoilHydrologyType.F90
index 4dfca06811..07ad2ca45b 100644
--- a/src/biogeophys/SoilHydrologyType.F90
+++ b/src/biogeophys/SoilHydrologyType.F90
@@ -19,8 +19,6 @@ Module SoilHydrologyType
type, public :: soilhydrology_type
integer :: h2osfcflag ! true => surface water is active (namelist)
- integer :: origflag ! used to control soil hydrology properties (namelist)
-
real(r8), pointer :: num_substeps_col (:) ! col adaptive timestep counter
! NON-VIC
real(r8), pointer :: frost_table_col (:) ! col frost table depth
@@ -28,7 +26,6 @@ Module SoilHydrologyType
real(r8), pointer :: zwts_col (:) ! col water table depth, the shallower of the two water depths
real(r8), pointer :: zwt_perched_col (:) ! col perched water table depth
real(r8), pointer :: qcharge_col (:) ! col aquifer recharge rate (mm/s)
- real(r8), pointer :: fracice_col (:,:) ! col fractional impermeability (-)
real(r8), pointer :: icefrac_col (:,:) ! col fraction of ice
real(r8), pointer :: h2osfc_thresh_col (:) ! col level at which h2osfc "percolates" (time constant)
real(r8), pointer :: xs_urban_col (:) ! col excess soil water above urban ponding limit
@@ -121,7 +118,6 @@ subroutine InitAllocate(this, bounds)
allocate(this%zwts_col (begc:endc)) ; this%zwts_col (:) = nan
allocate(this%qcharge_col (begc:endc)) ; this%qcharge_col (:) = nan
- allocate(this%fracice_col (begc:endc,nlevgrnd)) ; this%fracice_col (:,:) = nan
allocate(this%icefrac_col (begc:endc,nlevgrnd)) ; this%icefrac_col (:,:) = nan
allocate(this%h2osfc_thresh_col (begc:endc)) ; this%h2osfc_thresh_col (:) = nan
allocate(this%xs_urban_col (begc:endc)) ; this%xs_urban_col (:) = nan
@@ -340,16 +336,14 @@ subroutine ReadNL( this, NLFilename )
! !LOCAL VARIABLES:
integer :: ierr ! error code
integer :: unitn ! unit for namelist file
- integer :: origflag=0 !use to control soil hydraulic properties
integer :: h2osfcflag=1 !If surface water is active or not
character(len=32) :: subname = 'SoilHydrology_readnl' ! subroutine name
!-----------------------------------------------------------------------
- namelist / clm_soilhydrology_inparm / h2osfcflag, origflag
+ namelist / clm_soilhydrology_inparm / h2osfcflag
! preset values
- origflag = 0
h2osfcflag = 1
if ( masterproc )then
@@ -371,10 +365,8 @@ subroutine ReadNL( this, NLFilename )
end if
call shr_mpi_bcast(h2osfcflag, mpicom)
- call shr_mpi_bcast(origflag, mpicom)
this%h2osfcflag = h2osfcflag
- this%origflag = origflag
end subroutine ReadNL
diff --git a/src/biogeophys/SoilWaterMovementMod.F90 b/src/biogeophys/SoilWaterMovementMod.F90
index b1487e2779..85bcf42c5e 100644
--- a/src/biogeophys/SoilWaterMovementMod.F90
+++ b/src/biogeophys/SoilWaterMovementMod.F90
@@ -575,10 +575,8 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
zi => col%zi , & ! Input: [real(r8) (:,:) ] interface level below a "z" level (m)
dz => col%dz , & ! Input: [real(r8) (:,:) ] layer thickness (m)
- origflag => soilhydrology_inst%origflag , & ! Input: constant
qcharge => soilhydrology_inst%qcharge_col , & ! Input: [real(r8) (:) ] aquifer recharge rate (mm/s)
zwt => soilhydrology_inst%zwt_col , & ! Input: [real(r8) (:) ] water table depth (m)
- fracice => soilhydrology_inst%fracice_col , & ! Input: [real(r8) (:,:) ] fractional impermeability (-)
icefrac => soilhydrology_inst%icefrac_col , & ! Input: [real(r8) (:,:) ] fraction of ice
hkdepth => soilhydrology_inst%hkdepth_col , & ! Input: [real(r8) (:) ] decay factor (m)
@@ -720,22 +718,13 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
c = filter_hydrologyc(fc)
! compute hydraulic conductivity based on liquid water content only
- if (origflag == 1) then
- s1 = 0.5_r8*(h2osoi_vol(c,j) + h2osoi_vol(c,min(nlevsoi, j+1))) / &
- (0.5_r8*(watsat(c,j)+watsat(c,min(nlevsoi, j+1))))
- else
- s1 = 0.5_r8*(vwc_liq(c,j) + vwc_liq(c,min(nlevsoi, j+1))) / &
- (0.5_r8*(watsat(c,j)+watsat(c,min(nlevsoi, j+1))))
- endif
+ s1 = 0.5_r8*(vwc_liq(c,j) + vwc_liq(c,min(nlevsoi, j+1))) / &
+ (0.5_r8*(watsat(c,j)+watsat(c,min(nlevsoi, j+1))))
s1 = min(1._r8, s1)
s2 = hksat(c,j)*s1**(2._r8*bsw(c,j)+2._r8)
- ! replace fracice with impedance factor, as in zhao 97,99
- if (origflag == 1) then
- imped(c,j)=(1._r8-0.5_r8*(fracice(c,j)+fracice(c,min(nlevsoi, j+1))))
- else
- imped(c,j)=10._r8**(-params_inst%e_ice*(0.5_r8*(icefrac(c,j)+icefrac(c,min(nlevsoi, j+1)))))
- endif
+ imped(c,j)=10._r8**(-params_inst%e_ice*(0.5_r8*(icefrac(c,j)+icefrac(c,min(nlevsoi, j+1)))))
+
hk(c,j) = imped(c,j)*s1*s2
dhkdw(c,j) = imped(c,j)*(2._r8*bsw(c,j)+3._r8)*s2* &
(1._r8/(watsat(c,j)+watsat(c,min(nlevsoi, j+1))))
@@ -751,11 +740,7 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
! compute matric potential and derivative based on liquid water content only
- if (origflag == 1) then
- s_node = max(h2osoi_vol(c,j)/watsat(c,j), 0.01_r8)
- else
- s_node = max(vwc_liq(c,j)/watsat(c,j), 0.01_r8)
- endif
+ s_node = max(vwc_liq(c,j)/watsat(c,j), 0.01_r8)
s_node = min(1.0_r8, s_node)
!call soil_water_retention_curve%soil_suction(sucsat(c,j), s_node, bsw(c,j), smp(c,j), dsmpds)
@@ -765,11 +750,7 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
!do not turn on the line below, which will cause bit to bit error, jyt, 2014 Mar 6
!dsmpdw(c,j) = dsmpds/watsat(c,j)
- if (origflag == 1) then
- dsmpdw(c,j) = -bsw(c,j)*smp(c,j)/(s_node*watsat(c,j))
- else
- dsmpdw(c,j) = -bsw(c,j)*smp(c,j)/vwc_liq(c,j)
- endif
+ dsmpdw(c,j) = -bsw(c,j)*smp(c,j)/vwc_liq(c,j)
smp_l(c,j) = smp(c,j)
hk_l(c,j) = hk(c,j)
@@ -861,11 +842,7 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
else ! water table is below soil column
! compute aquifer soil moisture as average of layer 10 and saturation
- if(origflag == 1) then
- s_node = max(0.5*(1.0_r8+h2osoi_vol(c,j)/watsat(c,j)), 0.01_r8)
- else
- s_node = max(0.5*((vwc_zwt(c)+vwc_liq(c,j))/watsat(c,j)), 0.01_r8)
- endif
+ s_node = max(0.5*((vwc_zwt(c)+vwc_liq(c,j))/watsat(c,j)), 0.01_r8)
s_node = min(1.0_r8, s_node)
! compute smp for aquifer layer
@@ -940,7 +917,7 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
s_node = max(h2osoi_vol(c,jwt(c)+1)/watsat(c,jwt(c)+1), 0.01_r8)
s1 = min(1._r8, s_node)
- !scs: this is the expression for unsaturated hk
+ !this is the expression for unsaturated hk
ka = imped(c,jwt(c)+1)*hksat(c,jwt(c)+1) &
*s1**(2._r8*bsw(c,jwt(c)+1)+3._r8)
@@ -953,12 +930,12 @@ subroutine soilwater_zengdecker2009(bounds, num_hydrologyc, filter_hydrologyc, &
smp1 = max(smpmin(c), smp(c,max(1,jwt(c))))
wh = smp1 - zq(c,max(1,jwt(c)))
- !scs: original formulation
+ !original formulation
if(jwt(c) == 0) then
qcharge(c) = -ka * (wh_zwt-wh) /((zwt(c)+1.e-3)*1000._r8)
else
! qcharge(c) = -ka * (wh_zwt-wh)/((zwt(c)-z(c,jwt(c)))*1000._r8)
- !scs: 1/2, assuming flux is at zwt interface, saturation deeper than zwt
+ !1/2, assuming flux is at zwt interface, saturation deeper than zwt
qcharge(c) = -ka * (wh_zwt-wh)/((zwt(c)-z(c,jwt(c)))*1000._r8*2.0)
endif
@@ -1164,6 +1141,7 @@ subroutine soilwater_moisture_form(bounds, num_hydrologyc, &
real(r8) :: vLiqRes(bounds%begc:bounds%endc,1:nlevsoi) ! residual for the volumetric liquid water content (v/v)
real(r8) :: dwat_temp
+ real(r8) :: over_saturation
!-----------------------------------------------------------------------
associate(&
@@ -1177,6 +1155,7 @@ subroutine soilwater_moisture_form(bounds, num_hydrologyc, &
qcharge => soilhydrology_inst%qcharge_col , & ! Input: [real(r8) (:) ] aquifer recharge rate (mm/s)
zwt => soilhydrology_inst%zwt_col , & ! Input: [real(r8) (:) ] water table depth (m)
+ watsat => soilstate_inst%watsat_col , & ! Input: [real(r8) (:,:) ] volumetric soil water at saturation (porosity)
smp_l => soilstate_inst%smp_l_col , & ! Input: [real(r8) (:,:) ] soil matrix potential [mm]
hk_l => soilstate_inst%hk_l_col , & ! Input: [real(r8) (:,:) ] hydraulic conductivity (mm/s)
h2osoi_ice => waterstatebulk_inst%h2osoi_ice_col , & ! Input: [real(r8) (:,:) ] ice water (kg/m2)
@@ -1413,10 +1392,10 @@ subroutine soilwater_moisture_form(bounds, num_hydrologyc, &
end do ! substep loop
-! save number of adaptive substeps used during time step
+ ! save number of adaptive substeps used during time step
nsubsteps(c) = nsubstep
-! check for negative moisture values
+ ! check for negative moisture values
do j = 2, nlayers
if(h2osoi_liq(c,j) < -1e-6_r8) then
write(*,*) 'layer, h2osoi_liq: ', c,j,h2osoi_liq(c,j)
@@ -1494,7 +1473,7 @@ subroutine compute_hydraulic_properties(c, nlayers, &
character(len=32) :: subname = 'calculate_hydraulic_properties' ! subroutine name
!-----------------------------------------------------------------------
-!scs: originally, associate statements selected sections rather than
+! originally, associate statements selected sections rather than
! entire arrays, but due to pgi bug, removed array section selections
! using array sections allowed consistent 1d indexing throughout
associate(&
@@ -1621,7 +1600,7 @@ subroutine compute_moisture_fluxes_and_derivs(c, nlayers, &
real(r8) :: num, den ! used in calculating qin, qout
real(r8) :: dhkds1, dhkds2 !temporary variable
real(r8),parameter :: m_to_mm = 1.e3_r8 !convert meters to mm
-!scs: temporarily use local variables for the following
+ ! temporarily use local variables for the following
real(r8) :: vwc_liq_ub ! liquid volumetric water content at upper boundary
real(r8) :: vwc_liq_lb ! liquid volumetric water content at lower boundary
character(len=32) :: subname = 'calculate_moisture_fluxes_and_derivs' ! subroutine name
@@ -1704,12 +1683,11 @@ subroutine compute_moisture_fluxes_and_derivs(c, nlayers, &
dhkds1 = 0.5_r8 * dhkdw(j) / watsat(c,j) ! derivative w.r.t. volumetric liquid water in the upper layer
dhkds2 = 0.5_r8 * dhkdw(j) / watsat(c,j+1) ! derivative w.r.t. volumetric liquid water in the lower layer
-!scs: this is how zd is done
+ ! this is how zd is done
if (zdflag == 1) then
dhkds1 = dhkdw(j)/(watsat(c,j)+watsat(c,min(nlevsoi, j+1)))
dhkds2 = dhkds1
endif
-!scs
! compute flux at the bottom of the j-th layer
! NOTE: hk(j) is hydraulic conductivity at the bottom of the j-th
@@ -1739,12 +1717,11 @@ subroutine compute_moisture_fluxes_and_derivs(c, nlayers, &
! layer interface w.r.t relative saturation at the interface
dhkds1 = 0.5_r8 * dhkdw(j) / watsat(c,j) ! derivative w.r.t. volumetric liquid water in the upper layer
dhkds2 = 0.5_r8 * dhkdw(j) / watsat(c,j+1) ! derivative w.r.t. volumetric liquid water in the lower layer
-!scs: this is how zd is done
+ ! this is how zd is done
if (zdflag == 1) then
dhkds1 = dhkdw(j)/(watsat(c,j)+watsat(c,min(nlevsoi, j+1)))
dhkds2 = dhkds1
endif
-!scs
! compute flux at the bottom of the j-th layer
! NOTE: hk(j) is hydraulic conductivity at the bottom of the j-th layer
@@ -1801,12 +1778,12 @@ subroutine compute_moisture_fluxes_and_derivs(c, nlayers, &
! condition when the water table is a long way below the soil column
dhkds1 = dhkdw(j) / watsat(c,j)
-!scs: this is how zd is done
+ ! this is how zd is done
if (zdflag == 1) then
dhkds1 = dhkdw(j)/(watsat(c,j)+watsat(c,min(nlevsoi, j+1)))
dhkds2 = dhkds1
endif
-!scs
+
! compute flux
num = -smp(j) ! NOTE: assume saturation at water table depth (smp=0)
den = m_to_mm * (zwt(c) - z(c,j))
@@ -1824,7 +1801,7 @@ subroutine compute_moisture_fluxes_and_derivs(c, nlayers, &
! compute the relative saturation at the lower boundary
s1 = vwc_liq_lb / watsat(c,j)
-!scs: mc's original expression s1 = (vwc_liq_lb - watres(c,j)) / (watsat(c,j) - watres(c,j))
+ ! mc's original expression s1 = (vwc_liq_lb - watres(c,j)) / (watsat(c,j) - watres(c,j))
s1 = min(s1, 1._r8)
s1 = max(0.01_r8, s1)
diff --git a/src/biogeophys/SurfaceAlbedoMod.F90 b/src/biogeophys/SurfaceAlbedoMod.F90
index d23320d5e7..6628f0fa4d 100644
--- a/src/biogeophys/SurfaceAlbedoMod.F90
+++ b/src/biogeophys/SurfaceAlbedoMod.F90
@@ -261,6 +261,8 @@ subroutine SurfaceAlbedo(bounds,nc, &
use abortutils , only : endrun
use clm_varctl , only : use_subgrid_fluxes, use_snicar_frc, use_fates
use CLMFatesInterfaceMod, only : hlm_fates_interface_type
+ use landunit_varcon , only : istsoil
+ use clm_varctl , only : downscale_hillslope_meteorology
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds ! bounds
@@ -305,7 +307,6 @@ subroutine SurfaceAlbedo(bounds,nc, &
real(r8) :: ws (bounds%begp:bounds%endp) ! fraction of LAI+SAI that is SAI
real(r8) :: blai(bounds%begp:bounds%endp) ! lai buried by snow: tlai - elai
real(r8) :: bsai(bounds%begp:bounds%endp) ! sai buried by snow: tsai - esai
- real(r8) :: coszen_gcell (bounds%begg:bounds%endg) ! cosine solar zenith angle for next time step (grc)
real(r8) :: coszen_patch (bounds%begp:bounds%endp) ! cosine solar zenith angle for next time step (patch)
real(r8) :: rho(bounds%begp:bounds%endp,numrad) ! leaf/stem refl weighted by fraction LAI and SAI
real(r8) :: tau(bounds%begp:bounds%endp,numrad) ! leaf/stem tran weighted by fraction LAI and SAI
@@ -334,6 +335,7 @@ subroutine SurfaceAlbedo(bounds,nc, &
real(r8) :: mss_cnc_aer_in_fdb (bounds%begc:bounds%endc,-nlevsno+1:0,sno_nbr_aer) ! mass concentration of all aerosol species for feedback calculation (col,lyr,aer) [kg kg-1]
real(r8), parameter :: mpe = 1.e-06_r8 ! prevents overflow for division by zero
integer , parameter :: nband =numrad ! number of solar radiation waveband classes
+ real(r8) :: zenith_angle
!-----------------------------------------------------------------------
associate(&
@@ -369,6 +371,8 @@ subroutine SurfaceAlbedo(bounds,nc, &
vcmaxcintsha => surfalb_inst%vcmaxcintsha_patch , & ! Output: [real(r8) (:) ] leaf to canopy scaling coefficient, shaded leaf vcmax
ncan => surfalb_inst%ncan_patch , & ! Output: [integer (:) ] number of canopy layers
nrad => surfalb_inst%nrad_patch , & ! Output: [integer (:) ] number of canopy layers, above snow for radiative transfer
+ azsun_grc => surfalb_inst%azsun_grc , & ! Output: [real(r8) (:) ] cosine of solar zenith angle
+ coszen_grc => surfalb_inst%coszen_grc , & ! Output: [real(r8) (:) ] cosine of solar zenith angle
coszen_col => surfalb_inst%coszen_col , & ! Output: [real(r8) (:) ] cosine of solar zenith angle
albgrd => surfalb_inst%albgrd_col , & ! Output: [real(r8) (:,:) ] ground albedo (direct)
albgri => surfalb_inst%albgri_col , & ! Output: [real(r8) (:,:) ] ground albedo (diffuse)
@@ -426,16 +430,29 @@ subroutine SurfaceAlbedo(bounds,nc, &
! Cosine solar zenith angle for next time step
do g = bounds%begg,bounds%endg
- coszen_gcell(g) = shr_orb_cosz (nextsw_cday, grc%lat(g), grc%lon(g), declinp1)
+ coszen_grc(g) = shr_orb_cosz (nextsw_cday, grc%lat(g), grc%lon(g), declinp1)
end do
+
do c = bounds%begc,bounds%endc
g = col%gridcell(c)
- coszen_col(c) = coszen_gcell(g)
+ if (col%is_hillslope_column(c) .and. downscale_hillslope_meteorology) then
+ ! calculate local incidence angle based on column slope and aspect
+ zenith_angle = acos(coszen_grc(g))
+
+ azsun_grc(g) = shr_orb_azimuth(nextsw_cday, grc%lat(g), grc%lon(g), declinp1, zenith_angle)
+ ! hill_slope is [m/m], convert to radians
+ coszen_col(c) = shr_orb_cosinc(zenith_angle,azsun_grc(g),atan(col%hill_slope(c)),col%hill_aspect(c))
+
+ if(coszen_grc(g) > 0._r8 .and. coszen_col(c) < 0._r8) coszen_col(c) = 0._r8
+
+ else
+ coszen_col(c) = coszen_grc(g)
+ endif
end do
do fp = 1,num_nourbanp
p = filter_nourbanp(fp)
- g = patch%gridcell(p)
- coszen_patch(p) = coszen_gcell(g)
+ c = patch%column(p)
+ coszen_patch(p) = coszen_col(c)
end do
! Initialize output because solar radiation only done if coszen > 0
diff --git a/src/biogeophys/SurfaceAlbedoType.F90 b/src/biogeophys/SurfaceAlbedoType.F90
index a8b645b84a..ddb57d88f7 100644
--- a/src/biogeophys/SurfaceAlbedoType.F90
+++ b/src/biogeophys/SurfaceAlbedoType.F90
@@ -16,6 +16,8 @@ module SurfaceAlbedoType
! !PUBLIC DATA MEMBERS:
type, public :: surfalb_type
+ real(r8), pointer :: azsun_grc (:) ! azimuth angle of sun
+ real(r8), pointer :: coszen_grc (:) ! gridcell cosine of solar zenith angle
real(r8), pointer :: coszen_col (:) ! col cosine of solar zenith angle
real(r8), pointer :: albd_patch (:,:) ! patch surface albedo (direct) (numrad)
real(r8), pointer :: albi_patch (:,:) ! patch surface albedo (diffuse) (numrad)
@@ -123,11 +125,15 @@ subroutine InitAllocate(this, bounds)
! !LOCAL VARIABLES:
integer :: begp, endp
integer :: begc, endc
+ integer :: begg, endg
!---------------------------------------------------------------------
begp = bounds%begp; endp = bounds%endp
begc = bounds%begc; endc = bounds%endc
+ begg = bounds%begg; endg = bounds%endg
+ allocate(this%azsun_grc (begg:endg)) ; this%azsun_grc (:) = nan
+ allocate(this%coszen_grc (begg:endg)) ; this%coszen_grc (:) = nan
allocate(this%coszen_col (begc:endc)) ; this%coszen_col (:) = nan
allocate(this%albgrd_col (begc:endc,numrad)) ; this%albgrd_col (:,:) = nan
allocate(this%albgri_col (begc:endc,numrad)) ; this%albgri_col (:,:) = nan
@@ -210,15 +216,27 @@ subroutine InitHistory(this, bounds)
! !LOCAL VARIABLES:
integer :: begp, endp
integer :: begc, endc
+ integer :: begg, endg
character(len=cs) :: defaultoutput
!---------------------------------------------------------------------
begp = bounds%begp; endp = bounds%endp
begc = bounds%begc; endc = bounds%endc
+ begg = bounds%begg; endg = bounds%endg
+
+ this%azsun_grc(begg:endg) = spval
+ call hist_addfld1d (fname='AZSUN', units='radians', &
+ avgflag='A', long_name='cosine of solar zenith angle', &
+ ptr_lnd=this%azsun_grc, default='inactive')
+
+ this%coszen_grc(begg:endg) = spval
+ call hist_addfld1d (fname='COSZEN_GRC', units='none', &
+ avgflag='A', long_name='cosine of solar zenith angle', &
+ ptr_lnd=this%coszen_grc, default='inactive')
this%coszen_col(begc:endc) = spval
call hist_addfld1d (fname='COSZEN', units='none', &
- avgflag='A', long_name='cosine of solar zenith angle', &
+ avgflag='A', long_name='cosine of solar zenith angle (downscaled if downscaling is activated)', &
ptr_col=this%coszen_col, default='inactive')
this%albgrd_col(begc:endc,:) = spval
@@ -418,6 +436,11 @@ subroutine Restart(this, bounds, ncid, flag, &
begp = bounds%begp; endp = bounds%endp
begc = bounds%begc; endc = bounds%endc
+ call restartvar(ncid=ncid, flag=flag, varname='coszen_grc', xtype=ncd_double, &
+ dim1name='gridcell', &
+ long_name='cosine of solar zenith angle', units='unitless', &
+ interpinic_flag='interp', readvar=readvar, data=this%coszen_grc)
+
call restartvar(ncid=ncid, flag=flag, varname='coszen', xtype=ncd_double, &
dim1name='column', &
long_name='cosine of solar zenith angle', units='unitless', &
diff --git a/src/biogeophys/SurfaceRadiationMod.F90 b/src/biogeophys/SurfaceRadiationMod.F90
index 03557c6476..5de3ba6e09 100644
--- a/src/biogeophys/SurfaceRadiationMod.F90
+++ b/src/biogeophys/SurfaceRadiationMod.F90
@@ -383,6 +383,7 @@ subroutine CanopySunShadeFracs(filter_nourbanp, num_nourbanp, &
! local variables
integer :: fp ! non-urban filter patch index
integer :: p ! patch index
+ integer :: c ! column index
integer :: g ! gridcell index
integer :: iv ! canopy layer index
integer,parameter :: ipar = 1 ! The band index for PAR
@@ -390,7 +391,7 @@ subroutine CanopySunShadeFracs(filter_nourbanp, num_nourbanp, &
associate( tlai_z => surfalb_inst%tlai_z_patch, & ! tlai increment for canopy layer
fsun_z => surfalb_inst%fsun_z_patch, & ! sunlit fraction of canopy layer
elai => canopystate_inst%elai_patch, & ! one-sided leaf area index
- forc_solad => atm2lnd_inst%forc_solad_grc, & ! direct beam radiation (W/m**2)
+ forc_solad_col => atm2lnd_inst%forc_solad_downscaled_col, & ! direct beam radiation, column (W/m**2)
forc_solai => atm2lnd_inst%forc_solai_grc, & ! diffuse radiation (W/m**2)
fabd_sun_z => surfalb_inst%fabd_sun_z_patch, & ! absorbed sunlit leaf direct PAR
fabd_sha_z => surfalb_inst%fabd_sha_z_patch, & ! absorbed shaded leaf direct PAR
@@ -440,10 +441,11 @@ subroutine CanopySunShadeFracs(filter_nourbanp, num_nourbanp, &
! are canopy integrated so that layer values equal big leaf values.
g = patch%gridcell(p)
+ c = patch%column(p)
do iv = 1, nrad(p)
- parsun_z(p,iv) = forc_solad(g,ipar)*fabd_sun_z(p,iv) + forc_solai(g,ipar)*fabi_sun_z(p,iv)
- parsha_z(p,iv) = forc_solad(g,ipar)*fabd_sha_z(p,iv) + forc_solai(g,ipar)*fabi_sha_z(p,iv)
+ parsun_z(p,iv) = forc_solad_col(c,ipar)*fabd_sun_z(p,iv) + forc_solai(g,ipar)*fabi_sun_z(p,iv)
+ parsha_z(p,iv) = forc_solad_col(c,ipar)*fabd_sha_z(p,iv) + forc_solai(g,ipar)*fabi_sha_z(p,iv)
end do
end do ! end of fp = 1,num_nourbanp loop
@@ -533,7 +535,7 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
associate( &
snl => col%snl , & ! Input: [integer (:) ] negative number of snow layers [nbr]
- forc_solad => atm2lnd_inst%forc_solad_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (W/m**2)
+ forc_solad_col => atm2lnd_inst%forc_solad_downscaled_col , & ! Input: [real(r8) (:,:) ] direct beam radiation, column (W/m**2)
forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] diffuse radiation (W/m**2)
snow_depth => waterdiagnosticbulk_inst%snow_depth_col , & ! Input: [real(r8) (:) ] snow height (m)
@@ -682,7 +684,7 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
! Absorbed by canopy
- cad(p,ib) = forc_solad(g,ib)*fabd(p,ib)
+ cad(p,ib) = forc_solad_col(c,ib)*fabd(p,ib)
cai(p,ib) = forc_solai(g,ib)*fabi(p,ib)
sabv(p) = sabv(p) + cad(p,ib) + cai(p,ib)
fsa(p) = fsa(p) + cad(p,ib) + cai(p,ib)
@@ -695,8 +697,8 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
! Transmitted = solar fluxes incident on ground
- trd(p,ib) = forc_solad(g,ib)*ftdd(p,ib)
- tri(p,ib) = forc_solad(g,ib)*ftid(p,ib) + forc_solai(g,ib)*ftii(p,ib)
+ trd(p,ib) = forc_solad_col(c,ib)*ftdd(p,ib)
+ tri(p,ib) = forc_solad_col(c,ib)*ftid(p,ib) + forc_solai(g,ib)*ftii(p,ib)
! Solar radiation absorbed by ground surface
! calculate absorbed solar by soil/snow separately
absrad = trd(p,ib)*(1._r8-albsod(c,ib)) + tri(p,ib)*(1._r8-albsoi(c,ib))
@@ -887,29 +889,30 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
do fp = 1,num_nourbanp
p = filter_nourbanp(fp)
g = patch%gridcell(p)
+ c = patch%column(p)
! NDVI and reflected solar radiation
- rvis = albd(p,1)*forc_solad(g,1) + albi(p,1)*forc_solai(g,1)
- rnir = albd(p,2)*forc_solad(g,2) + albi(p,2)*forc_solai(g,2)
+ rvis = albd(p,1)*forc_solad_col(c,1) + albi(p,1)*forc_solai(g,1)
+ rnir = albd(p,2)*forc_solad_col(c,2) + albi(p,2)*forc_solai(g,2)
fsr(p) = rvis + rnir
if (use_SSRE) then
- rvisSF = albdSF(p,1)*forc_solad(g,1) + albiSF(p,1)*forc_solai(g,1)
- rnirSF = albdSF(p,2)*forc_solad(g,2) + albiSF(p,2)*forc_solai(g,2)
+ rvisSF = albdSF(p,1)*forc_solad_col(c,1) + albiSF(p,1)*forc_solai(g,1)
+ rnirSF = albdSF(p,2)*forc_solad_col(c,2) + albiSF(p,2)*forc_solai(g,2)
fsrSF(p) = rvisSF + rnirSF
ssre_fsr(p) = fsr(p)-fsrSF(p)
end if
- fsds_vis_d(p) = forc_solad(g,1)
- fsds_nir_d(p) = forc_solad(g,2)
+ fsds_vis_d(p) = forc_solad_col(c,1)
+ fsds_nir_d(p) = forc_solad_col(c,2)
fsds_vis_i(p) = forc_solai(g,1)
fsds_nir_i(p) = forc_solai(g,2)
- fsr_vis_d(p) = albd(p,1)*forc_solad(g,1)
- fsr_nir_d(p) = albd(p,2)*forc_solad(g,2)
+ fsr_vis_d(p) = albd(p,1)*forc_solad_col(c,1)
+ fsr_nir_d(p) = albd(p,2)*forc_solad_col(c,2)
fsr_vis_i(p) = albi(p,1)*forc_solai(g,1)
fsr_nir_i(p) = albi(p,2)*forc_solai(g,2)
if (use_SSRE) then
- fsrSF_vis_d(p) = albdSF(p,1)*forc_solad(g,1)
- fsrSF_nir_d(p) = albdSF(p,2)*forc_solad(g,2)
+ fsrSF_vis_d(p) = albdSF(p,1)*forc_solad_col(c,1)
+ fsrSF_nir_d(p) = albdSF(p,2)*forc_solad_col(c,2)
fsrSF_vis_i(p) = albiSF(p,1)*forc_solai(g,1)
fsrSF_nir_i(p) = albiSF(p,2)*forc_solai(g,2)
@@ -919,10 +922,10 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
ssre_fsr_nir_i(p) = fsrSF_nir_i(p)-fsr_nir_i(p)
end if
if ( is_near_local_noon( grc%londeg(g), deltasec=nint(dtime)/2 ) )then
- fsds_vis_d_ln(p) = forc_solad(g,1)
- fsds_nir_d_ln(p) = forc_solad(g,2)
- fsr_vis_d_ln(p) = albd(p,1)*forc_solad(g,1)
- fsr_nir_d_ln(p) = albd(p,2)*forc_solad(g,2)
+ fsds_vis_d_ln(p) = forc_solad_col(c,1)
+ fsds_nir_d_ln(p) = forc_solad_col(c,2)
+ fsr_vis_d_ln(p) = albd(p,1)*forc_solad_col(c,1)
+ fsr_nir_d_ln(p) = albd(p,2)*forc_solad_col(c,2)
fsds_vis_i_ln(p) = forc_solai(g,1)
parveg_ln(p) = parveg(p)
else
@@ -935,8 +938,8 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
end if
if (use_SSRE) then
if ( is_near_local_noon( grc%londeg(g), deltasec=nint(dtime)/2 ) )then
- fsrSF_vis_d_ln(p) = albdSF(p,1)*forc_solad(g,1)
- fsrSF_nir_d_ln(p) = albdSF(p,2)*forc_solad(g,2)
+ fsrSF_vis_d_ln(p) = albdSF(p,1)*forc_solad_col(c,1)
+ fsrSF_nir_d_ln(p) = albdSF(p,2)*forc_solad_col(c,2)
else
fsrSF_vis_d_ln(p) = spval
fsrSF_nir_d_ln(p) = spval
@@ -946,8 +949,8 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
! (OPTIONAL)
c = patch%column(p)
if (snl(c) < 0) then
- fsds_sno_vd(p) = forc_solad(g,1)
- fsds_sno_nd(p) = forc_solad(g,2)
+ fsds_sno_vd(p) = forc_solad_col(c,1)
+ fsds_sno_nd(p) = forc_solad_col(c,2)
fsds_sno_vi(p) = forc_solai(g,1)
fsds_sno_ni(p) = forc_solai(g,2)
@@ -972,6 +975,7 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
do fp = 1,num_urbanp
p = filter_urbanp(fp)
g = patch%gridcell(p)
+ c = patch%column(p)
if(elai(p)==0.0_r8.and.fabd(p,1)>0._r8)then
if ( local_debug ) write(iulog,*) 'absorption without LAI',elai(p),tlai(p),fabd(p,1),p
@@ -979,15 +983,15 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
! Solar incident
- fsds_vis_d(p) = forc_solad(g,1)
- fsds_nir_d(p) = forc_solad(g,2)
+ fsds_vis_d(p) = forc_solad_col(c,1)
+ fsds_nir_d(p) = forc_solad_col(c,2)
fsds_vis_i(p) = forc_solai(g,1)
fsds_nir_i(p) = forc_solai(g,2)
! Determine local noon incident solar
if ( is_near_local_noon( grc%londeg(g), deltasec=nint(dtime)/2 ) )then
- fsds_vis_d_ln(p) = forc_solad(g,1)
- fsds_nir_d_ln(p) = forc_solad(g,2)
+ fsds_vis_d_ln(p) = forc_solad_col(c,1)
+ fsds_nir_d_ln(p) = forc_solad_col(c,2)
fsds_vis_i_ln(p) = forc_solai(g,1)
parveg_ln(p) = 0._r8
else
@@ -1000,8 +1004,8 @@ subroutine SurfaceRadiation(bounds, num_nourbanp, filter_nourbanp, &
! Solar reflected
! per unit ground area (roof, road) and per unit wall area (sunwall, shadewall)
- fsr_vis_d(p) = albd(p,1) * forc_solad(g,1)
- fsr_nir_d(p) = albd(p,2) * forc_solad(g,2)
+ fsr_vis_d(p) = albd(p,1) * forc_solad_col(c,1)
+ fsr_nir_d(p) = albd(p,2) * forc_solad_col(c,2)
fsr_vis_i(p) = albi(p,1) * forc_solai(g,1)
fsr_nir_i(p) = albi(p,2) * forc_solai(g,2)
diff --git a/src/biogeophys/SurfaceWaterMod.F90 b/src/biogeophys/SurfaceWaterMod.F90
index b293dd792c..562c64cc18 100644
--- a/src/biogeophys/SurfaceWaterMod.F90
+++ b/src/biogeophys/SurfaceWaterMod.F90
@@ -456,6 +456,7 @@ subroutine QflxH2osfcSurf(bounds, num_hydrologyc, filter_hydrologyc, &
real(r8) :: dtime ! land model time step (sec)
real(r8) :: frac_infclust ! fraction of submerged area that is connected
real(r8) :: k_wet ! linear reservoir coefficient for h2osfc
+ real(r8),parameter :: min_hill_slope = 1e-3_r8! minimum value of hillslope for outflow
character(len=*), parameter :: subname = 'QflxH2osfcSurf'
!-----------------------------------------------------------------------
@@ -483,6 +484,10 @@ subroutine QflxH2osfcSurf(bounds, num_hydrologyc, filter_hydrologyc, &
if(h2osfc(c) > h2osfc_thresh(c) .and. h2osfcflag/=0) then
! spatially variable k_wet
k_wet=1.0e-4_r8 * sin((rpi/180._r8) * topo_slope(c))
+ if (col%is_hillslope_column(c)) then
+ ! require a minimum value to ensure non-zero outflow
+ k_wet = 1e-4_r8 * max(col%hill_slope(c),min_hill_slope)
+ endif
qflx_h2osfc_surf(c) = k_wet * frac_infclust * (h2osfc(c) - h2osfc_thresh(c))
qflx_h2osfc_surf(c)=min(qflx_h2osfc_surf(c),(h2osfc(c) - h2osfc_thresh(c))/dtime)
diff --git a/src/biogeophys/UrbanRadiationMod.F90 b/src/biogeophys/UrbanRadiationMod.F90
index 0b6412f2d2..ccb3f196b7 100644
--- a/src/biogeophys/UrbanRadiationMod.F90
+++ b/src/biogeophys/UrbanRadiationMod.F90
@@ -117,9 +117,9 @@ subroutine UrbanRadiation (bounds , &
canyon_hwr => lun%canyon_hwr , & ! Input: [real(r8) (:) ] ratio of building height to street width
wtroad_perv => lun%wtroad_perv , & ! Input: [real(r8) (:) ] weight of pervious road wrt total road
- forc_solad => atm2lnd_inst%forc_solad_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (vis=forc_sols , nir=forc_soll ) (W/m**2)
+ forc_solad => atm2lnd_inst%forc_solad_not_downscaled_grc , & ! Input: [real(r8) (:,:) ] direct beam radiation (vis=forc_sols , nir=forc_soll ) (W/m**2)
forc_solai => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:,:) ] diffuse beam radiation (vis=forc_sols , nir=forc_soll ) (W/m**2)
- forc_solar => atm2lnd_inst%forc_solar_grc , & ! Input: [real(r8) (:) ] incident solar radiation (W/m**2)
+ forc_solar => atm2lnd_inst%forc_solar_not_downscaled_grc , & ! Input: [real(r8) (:) ] incident solar radiation (W/m**2)
forc_lwrad => atm2lnd_inst%forc_lwrad_not_downscaled_grc , & ! Input: [real(r8) (:) ] downward infrared (longwave) radiation (W/m**2)
frac_sno => waterdiagnosticbulk_inst%frac_sno_col , & ! Input: [real(r8) (:) ] fraction of ground covered by snow (0 to 1)
diff --git a/src/biogeophys/WaterDiagnosticBulkType.F90 b/src/biogeophys/WaterDiagnosticBulkType.F90
index 057062777f..dd556a2df6 100644
--- a/src/biogeophys/WaterDiagnosticBulkType.F90
+++ b/src/biogeophys/WaterDiagnosticBulkType.F90
@@ -16,7 +16,7 @@ module WaterDiagnosticBulkType
use shr_log_mod , only : errMsg => shr_log_errMsg
use decompMod , only : bounds_type
use abortutils , only : endrun
- use clm_varctl , only : use_cn, iulog, use_luna
+ use clm_varctl , only : use_cn, iulog, use_luna, use_hillslope
use clm_varpar , only : nlevgrnd, nlevsno, nlevcan, nlevsoi
use clm_varcon , only : spval
use LandunitType , only : lun
@@ -83,6 +83,9 @@ module WaterDiagnosticBulkType
real(r8), pointer :: qflx_prec_intr_patch (:) ! patch interception of precipitation (mm H2O/s)
real(r8), pointer :: qflx_prec_grnd_col (:) ! col water onto ground including canopy runoff (mm H2O/s)
+ ! Hillslope stream variables
+ real(r8), pointer :: stream_water_depth_lun (:) ! landunit depth of water in the streams (m)
+
contains
! Public interfaces
@@ -231,6 +234,7 @@ subroutine InitBulkAllocate(this, bounds)
allocate(this%fdry_patch (begp:endp)) ; this%fdry_patch (:) = nan
allocate(this%qflx_prec_intr_patch (begp:endp)) ; this%qflx_prec_intr_patch (:) = nan
allocate(this%qflx_prec_grnd_col (begc:endc)) ; this%qflx_prec_grnd_col (:) = nan
+ allocate(this%stream_water_depth_lun (begl:endl)) ; this%stream_water_depth_lun (:) = nan
end subroutine InitBulkAllocate
@@ -252,12 +256,14 @@ subroutine InitBulkHistory(this, bounds)
! !LOCAL VARIABLES:
integer :: begp, endp
integer :: begc, endc
+ integer :: begl, endl
integer :: begg, endg
real(r8), pointer :: data2dptr(:,:), data1dptr(:) ! temp. pointers for slicing larger arrays
!------------------------------------------------------------------------
begp = bounds%begp; endp= bounds%endp
begc = bounds%begc; endc= bounds%endc
+ begl = bounds%begl; endl= bounds%endl
begg = bounds%begg; endg= bounds%endg
this%h2osno_total_col(begc:endc) = spval
@@ -580,6 +586,14 @@ subroutine InitBulkHistory(this, bounds)
long_name=this%info%lname('interception'), &
ptr_patch=this%qflx_prec_intr_patch, set_lake=0._r8)
+ if (use_hillslope) then
+ this%stream_water_depth_lun(begl:endl) = spval
+ call hist_addfld1d (fname=this%info%fname('STREAM_WATER_DEPTH'), &
+ units='m', avgflag='A', &
+ long_name=this%info%lname('depth of water in stream channel (hillslope hydrology only)'), &
+ ptr_lunit=this%stream_water_depth_lun, l2g_scale_type='natveg', default='inactive')
+ endif
+
end subroutine InitBulkHistory
!-----------------------------------------------------------------------
diff --git a/src/biogeophys/WaterFluxType.F90 b/src/biogeophys/WaterFluxType.F90
index f7c55d44e1..23980a21c9 100644
--- a/src/biogeophys/WaterFluxType.F90
+++ b/src/biogeophys/WaterFluxType.F90
@@ -10,7 +10,7 @@ module WaterFluxType
use clm_varpar , only : nlevsno, nlevsoi
use clm_varcon , only : spval
use decompMod , only : bounds_type
- use decompMod , only : subgrid_level_patch, subgrid_level_column, subgrid_level_gridcell
+ use decompMod , only : subgrid_level_patch, subgrid_level_column, subgrid_level_landunit, subgrid_level_gridcell
use LandunitType , only : lun
use ColumnType , only : col
use AnnualFluxDribbler, only : annual_flux_dribbler_type, annual_flux_dribbler_gridcell
@@ -26,7 +26,7 @@ module WaterFluxType
class(water_info_base_type), pointer :: info
- ! water fluxes are in units or mm/s
+ ! water fluxes are in units of mm/s
real(r8), pointer :: qflx_through_snow_patch (:) ! patch canopy throughfall of snow (mm H2O/s)
real(r8), pointer :: qflx_through_liq_patch (:) ! patch canopy throughfal of liquid (rain+irrigation) (mm H2O/s)
@@ -72,6 +72,10 @@ module WaterFluxType
real(r8), pointer :: qflx_infl_col (:) ! col infiltration (mm H2O /s)
real(r8), pointer :: qflx_surf_col (:) ! col total surface runoff (mm H2O /s)
real(r8), pointer :: qflx_drain_col (:) ! col sub-surface runoff (mm H2O /s)
+ real(r8), pointer :: qflx_latflow_in_col (:) ! col hillslope lateral flow input (mm/s)
+ real(r8), pointer :: qflx_latflow_out_col (:) ! col hillslope lateral flow output (mm/s)
+ real(r8), pointer :: volumetric_discharge_col (:) ! col hillslope discharge (m3/s)
+ real(r8), pointer :: volumetric_streamflow_lun(:) ! lun stream discharge (m3/s)
real(r8), pointer :: qflx_drain_perched_col (:) ! col sub-surface runoff from perched wt (mm H2O /s)
real(r8), pointer :: qflx_top_soil_col (:) ! col net water input into soil from top (mm/s)
real(r8), pointer :: qflx_floodc_col (:) ! col flood water flux at column level
@@ -278,6 +282,18 @@ subroutine InitAllocate(this, bounds, tracer_vars)
call AllocateVar1d(var = this%qflx_drain_perched_col, name = 'qflx_drain_perched_col', &
container = tracer_vars, &
bounds = bounds, subgrid_level = subgrid_level_column)
+ call AllocateVar1d(var = this%qflx_latflow_in_col, name = 'qflx_latflow_in_col', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_column)
+ call AllocateVar1d(var = this%qflx_latflow_out_col, name = 'qflx_latflow_out_col', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_column)
+ call AllocateVar1d(var = this%volumetric_discharge_col, name = 'volumetric_discharge_col', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_column)
+ call AllocateVar1d(var = this%volumetric_streamflow_lun, name = 'volumetric_streamflow_lun', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_landunit)
call AllocateVar1d(var = this%qflx_top_soil_col, name = 'qflx_top_soil_col', &
container = tracer_vars, &
bounds = bounds, subgrid_level = subgrid_level_column)
@@ -386,6 +402,8 @@ subroutine InitHistory(this, bounds)
!
! !USES:
use histFileMod , only : hist_addfld1d, hist_addfld2d, no_snow_normal
+ use clm_varctl , only : use_hillslope, use_hillslope_routing
+
!
! !ARGUMENTS:
class(waterflux_type), intent(in) :: this
@@ -394,12 +412,14 @@ subroutine InitHistory(this, bounds)
! !LOCAL VARIABLES:
integer :: begp, endp
integer :: begc, endc
+ integer :: begl, endl
integer :: begg, endg
real(r8), pointer :: data2dptr(:,:), data1dptr(:) ! temp. pointers for slicing larger arrays
!------------------------------------------------------------------------
begp = bounds%begp; endp= bounds%endp
begc = bounds%begc; endc= bounds%endc
+ begl = bounds%begl; endl= bounds%endl
begg = bounds%begg; endg= bounds%endg
this%qflx_through_liq_patch(begp:endp) = spval
@@ -483,6 +503,37 @@ subroutine InitHistory(this, bounds)
long_name=this%info%lname('sub-surface drainage'), &
ptr_col=this%qflx_drain_col, c2l_scale_type='urbanf')
+ if (use_hillslope) then
+ this%qflx_latflow_out_col(begc:endc) = spval
+ call hist_addfld1d ( &
+ fname=this%info%fname('QLATFLOWOUT'), &
+ units='mm/s', &
+ avgflag='A', &
+ long_name=this%info%lname('hillcol lateral outflow'), &
+ l2g_scale_type='natveg', c2l_scale_type='urbanf', &
+ ptr_col=this%qflx_latflow_out_col)
+
+ this%volumetric_discharge_col(begc:endc) = spval
+ call hist_addfld1d ( &
+ fname=this%info%fname('VOLUMETRIC_DISCHARGE'), &
+ units='m3/s', &
+ avgflag='A', &
+ long_name=this%info%lname('hillslope discharge from column'), &
+ l2g_scale_type='natveg', c2l_scale_type='urbanf', &
+ ptr_col=this%volumetric_discharge_col,default='inactive')
+
+ if (use_hillslope_routing) then
+ this%volumetric_streamflow_lun(begl:endl) = spval
+ call hist_addfld1d ( &
+ fname=this%info%fname('VOLUMETRIC_STREAMFLOW'), &
+ units='m3/s', &
+ avgflag='A', &
+ long_name=this%info%lname('volumetric streamflow from hillslope'), &
+ l2g_scale_type='natveg', &
+ ptr_lunit=this%volumetric_streamflow_lun)
+ endif
+ endif
+
this%qflx_drain_perched_col(begc:endc) = spval
call hist_addfld1d ( &
fname=this%info%fname('QDRAI_PERCH'), &
@@ -810,6 +861,8 @@ subroutine InitCold(this, bounds)
!
! !USES:
use landunit_varcon, only : istsoil, istcrop
+ use clm_varctl , only : use_hillslope_routing
+
!
! !ARGUMENTS:
class(waterflux_type), intent(in) :: this
@@ -861,9 +914,19 @@ subroutine InitCold(this, bounds)
if (lun%itype(l) == istsoil .or. lun%itype(l) == istcrop) then
this%qflx_drain_col(c) = 0._r8
this%qflx_surf_col(c) = 0._r8
+ this%qflx_latflow_in_col(c) = 0._r8
+ this%qflx_latflow_out_col(c) = 0._r8
+ this%volumetric_discharge_col(c) = 0._r8
end if
end do
-
+ if (use_hillslope_routing) then
+ do l = bounds%begl, bounds%endl
+ if (lun%itype(l) == istsoil .or. lun%itype(l) == istcrop) then
+ this%volumetric_streamflow_lun(l) = 0._r8
+ end if
+ end do
+ endif
+
end subroutine InitCold
!------------------------------------------------------------------------
diff --git a/src/biogeophys/WaterStateType.F90 b/src/biogeophys/WaterStateType.F90
index cdbefa2a04..390e9e8691 100644
--- a/src/biogeophys/WaterStateType.F90
+++ b/src/biogeophys/WaterStateType.F90
@@ -12,10 +12,10 @@ module WaterStateType
use shr_log_mod , only : errMsg => shr_log_errMsg
use abortutils , only : endrun
use decompMod , only : bounds_type
- use decompMod , only : subgrid_level_patch, subgrid_level_column, subgrid_level_gridcell
+ use decompMod , only : subgrid_level_patch, subgrid_level_column, subgrid_level_landunit, subgrid_level_gridcell
use clm_varctl , only : use_bedrock, use_excess_ice, iulog
use spmdMod , only : masterproc
- use clm_varctl , only : use_fates
+ use clm_varctl , only : use_fates, use_hillslope
use clm_varpar , only : nlevgrnd, nlevsoi, nlevurb, nlevmaxurbgrnd, nlevsno
use clm_varcon , only : spval
use LandunitType , only : lun
@@ -58,6 +58,9 @@ module WaterStateType
type(excessicestream_type), private :: exicestream ! stream type for excess ice initialization NUOPC only
+ ! Hillslope stream variables
+ real(r8), pointer :: stream_water_volume_lun(:) ! landunit volume of water in the streams (m3)
+
contains
procedure, public :: Init
@@ -158,6 +161,9 @@ subroutine InitAllocate(this, bounds, tracer_vars)
call AllocateVar1d(var = this%dynbal_baseline_ice_col, name = 'dynbal_baseline_ice_col', &
container = tracer_vars, &
bounds = bounds, subgrid_level = subgrid_level_column)
+ call AllocateVar1d(var = this%stream_water_volume_lun, name = 'stream_water_volume_lun', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_landunit)
!excess ice vars
call AllocateVar2d(var = this%excess_ice_col, name = 'excess_ice_col', &
container = tracer_vars, &
@@ -178,6 +184,7 @@ subroutine InitHistory(this, bounds, use_aquifer_layer)
! !USES:
use histFileMod , only : hist_addfld1d, hist_addfld2d, no_snow_normal
use clm_varctl , only : use_soil_moisture_streams
+ use GridcellType , only : grc
!
! !ARGUMENTS:
class(waterstate_type), intent(in) :: this
@@ -187,12 +194,14 @@ subroutine InitHistory(this, bounds, use_aquifer_layer)
! !LOCAL VARIABLES:
integer :: begp, endp
integer :: begc, endc
+ integer :: begl, endl
integer :: begg, endg
real(r8), pointer :: data2dptr(:,:), data1dptr(:) ! temp. pointers for slicing larger arrays
!------------------------------------------------------------------------
begp = bounds%begp; endp= bounds%endp
begc = bounds%begc; endc= bounds%endc
+ begl = bounds%begl; endl= bounds%endl
begg = bounds%begg; endg= bounds%endg
data2dptr => this%h2osoi_liq_col(:,-nlevsno+1:0)
@@ -284,6 +293,14 @@ subroutine InitHistory(this, bounds, use_aquifer_layer)
ptr_col=this%wa_col, l2g_scale_type='veg')
end if
+ if (use_hillslope) then
+ this%stream_water_volume_lun(begl:endl) = spval
+ call hist_addfld1d (fname=this%info%fname('STREAM_WATER_VOLUME'), units='m3', &
+ avgflag='A', &
+ long_name=this%info%lname('volume of water in stream channel (hillslope hydrology only)'), &
+ ptr_lunit=this%stream_water_volume_lun, l2g_scale_type='natveg', default='inactive')
+ end if
+
! Add excess ice fields to history
if (use_excess_ice) then
@@ -345,7 +362,7 @@ subroutine InitCold(this, bounds, &
this%h2osfc_col(bounds%begc:bounds%endc) = 0._r8
this%snocan_patch(bounds%begp:bounds%endp) = 0._r8
this%liqcan_patch(bounds%begp:bounds%endp) = 0._r8
-
+ this%stream_water_volume_lun(bounds%begl:bounds%endl) = 0._r8
!--------------------------------------------
! Set soil water
@@ -709,6 +726,13 @@ subroutine Restart(this, bounds, ncid, flag, &
units='kg/m2', &
interpinic_flag='interp', readvar=readvar, data=this%dynbal_baseline_ice_col)
+ call restartvar(ncid=ncid, flag=flag, &
+ varname=this%info%fname('STREAM_WATER_VOLUME'), &
+ xtype=ncd_double, &
+ dim1name='landunit', &
+ long_name=this%info%lname('water in stream channel'), &
+ units='m3', &
+ interpinic_flag='interp', readvar=readvar, data=this%stream_water_volume_lun)
! Restart excess ice vars
if (.not. use_excess_ice) then
! no need to even define the restart vars
diff --git a/src/biogeophys/Wateratm2lndBulkType.F90 b/src/biogeophys/Wateratm2lndBulkType.F90
index 03ee7522f3..4aacbe11c2 100644
--- a/src/biogeophys/Wateratm2lndBulkType.F90
+++ b/src/biogeophys/Wateratm2lndBulkType.F90
@@ -30,6 +30,8 @@ module Wateratm2lndBulkType
real(r8), pointer :: volrmch_grc (:) ! rof volr main channel (m3)
real(r8), pointer :: volr_grc (:) ! rof volr total volume (m3)
+ real(r8), pointer :: tdepth_grc (:) ! rof tributary water depth (m)
+ real(r8), pointer :: tdepthmax_grc (:) ! rof tributary bankfull water depth (m)
real(r8), pointer :: forc_rh_grc (:) ! atmospheric relative humidity (%)
real(r8) , pointer :: prec365_col (:) ! col 365-day running mean of tot. precipitation (see comment in UpdateAccVars regarding why this is col-level despite other prec accumulators being patch-level)
real(r8) , pointer :: prec60_patch (:) ! patch 60-day running mean of tot. precipitation (mm/s)
@@ -117,6 +119,8 @@ subroutine InitBulkAllocate(this, bounds)
begc = bounds%begc; endc= bounds%endc
begg = bounds%begg; endg= bounds%endg
+ allocate(this%tdepth_grc (begg:endg)) ; this%tdepth_grc (:) = ival
+ allocate(this%tdepthmax_grc (begg:endg)) ; this%tdepthmax_grc (:) = ival
allocate(this%volr_grc (begg:endg)) ; this%volr_grc (:) = ival
allocate(this%volrmch_grc (begg:endg)) ; this%volrmch_grc (:) = ival
allocate(this%forc_rh_grc (begg:endg)) ; this%forc_rh_grc (:) = ival
@@ -154,6 +158,15 @@ subroutine InitBulkHistory(this, bounds)
begp = bounds%begp; endp= bounds%endp
begg = bounds%begg; endg= bounds%endg
+ this%tdepth_grc(begg:endg) = spval
+ call hist_addfld1d (fname='TDEPTH', units='m', &
+ avgflag='A', long_name='tributary water depth', &
+ ptr_lnd=this%tdepth_grc, default = 'inactive')
+
+ this%tdepthmax_grc(begg:endg) = spval
+ call hist_addfld1d (fname='TDEPTHMAX', units='m', &
+ avgflag='A', long_name='tributary bankfull water depth', &
+ ptr_lnd=this%tdepthmax_grc, default = 'inactive')
this%volr_grc(begg:endg) = spval
call hist_addfld1d (fname=this%info%fname('VOLR'), units='m3', &
@@ -462,6 +475,8 @@ subroutine Clean(this)
! rof->lnd
deallocate(this%forc_flood_grc)
+ deallocate(this%tdepth_grc)
+ deallocate(this%tdepthmax_grc)
deallocate(this%volr_grc)
deallocate(this%volrmch_grc)
diff --git a/src/biogeophys/Waterlnd2atmType.F90 b/src/biogeophys/Waterlnd2atmType.F90
index 54972e9b00..80214bebbb 100644
--- a/src/biogeophys/Waterlnd2atmType.F90
+++ b/src/biogeophys/Waterlnd2atmType.F90
@@ -32,6 +32,7 @@ module Waterlnd2atmType
real(r8), pointer :: qflx_rofliq_qsub_grc (:) ! rof liq -- subsurface runoff component
real(r8), pointer :: qflx_rofliq_qgwl_grc (:) ! rof liq -- glacier, wetland and lakes water balance residual component
real(r8), pointer :: qflx_rofliq_drain_perched_grc (:) ! rof liq -- perched water table runoff component
+ real(r8), pointer :: qflx_rofliq_stream_grc (:) ! rof liq -- stream channel runoff component
real(r8), pointer :: qflx_ice_runoff_col(:) ! rof ice forcing, col level
real(r8), pointer :: qflx_rofice_grc (:) ! rof ice forcing, grc level
real(r8), pointer :: qflx_liq_from_ice_col(:) ! liquid runoff from converted ice runoff
@@ -120,6 +121,10 @@ subroutine InitAllocate(this, bounds, tracer_vars)
container = tracer_vars, &
bounds = bounds, subgrid_level = subgrid_level_gridcell, &
ival=ival)
+ call AllocateVar1d(var = this%qflx_rofliq_stream_grc, name = 'qflx_rofliq_stream_grc', &
+ container = tracer_vars, &
+ bounds = bounds, subgrid_level = subgrid_level_gridcell, &
+ ival=ival)
call AllocateVar1d(var = this%qflx_ice_runoff_col, name = 'qflx_ice_runoff_col', &
container = tracer_vars, &
bounds = bounds, subgrid_level = subgrid_level_column, &
diff --git a/src/biogeophys/test/CMakeLists.txt b/src/biogeophys/test/CMakeLists.txt
index 49f80533de..5c15858210 100644
--- a/src/biogeophys/test/CMakeLists.txt
+++ b/src/biogeophys/test/CMakeLists.txt
@@ -1,6 +1,7 @@
add_subdirectory(Daylength_test)
add_subdirectory(Irrigation_test)
add_subdirectory(HumanStress_test)
+add_subdirectory(HillslopeHydrology_test)
add_subdirectory(SnowHydrology_test)
add_subdirectory(Photosynthesis_test)
add_subdirectory(Balance_test)
diff --git a/src/biogeophys/test/HillslopeHydrology_test/CMakeLists.txt b/src/biogeophys/test/HillslopeHydrology_test/CMakeLists.txt
new file mode 100644
index 0000000000..f40baf96ed
--- /dev/null
+++ b/src/biogeophys/test/HillslopeHydrology_test/CMakeLists.txt
@@ -0,0 +1,6 @@
+set (pfunit_sources
+ test_hillslopehydrologyUtils.pf)
+
+add_pfunit_ctest(HillslopeHydrologyUtils
+ TEST_SOURCES "${pfunit_sources}"
+ LINK_LIBRARIES clm csm_share esmf_wrf_timemgr)
diff --git a/src/biogeophys/test/HillslopeHydrology_test/test_hillslopehydrologyUtils.pf b/src/biogeophys/test/HillslopeHydrology_test/test_hillslopehydrologyUtils.pf
new file mode 100644
index 0000000000..63db42cffd
--- /dev/null
+++ b/src/biogeophys/test/HillslopeHydrology_test/test_hillslopehydrologyUtils.pf
@@ -0,0 +1,249 @@
+module test_hillslopehydrologyUtils
+
+ ! Tests of the HillslopeHydrologyUtils module
+
+ use funit
+ use unittestSubgridMod
+ use ColumnType , only : col
+ use LandunitType , only : lun
+ use landunit_varcon , only : istwet
+ use decompMod , only : bounds_type
+ use clm_varpar , only : nlevsoi, nlevgrnd
+ use shr_kind_mod , only : r8 => shr_kind_r8
+ use HillslopeHydrologyUtilsMod, only : HillslopeSoilThicknessProfile_linear
+
+ implicit none
+
+ ! From clm_instInit
+ real(r8), parameter :: soil_depth_lowland = 8.5_r8
+ real(r8), parameter :: soil_depth_upland = 2._r8
+
+ integer, parameter :: nbedrock_dummy_value = 9999
+
+ @TestCase
+ type, extends(TestCase) :: TestInit
+ contains
+ procedure :: setUp
+ procedure :: tearDown
+ end type TestInit
+
+contains
+
+ subroutine setUp(this)
+ ! Set up variables needed for tests: various subgrid type variables, along with
+ ! bounds.
+ !
+ class(TestInit), intent(inout) :: this
+ integer :: g, l, c
+
+ ! Set up subgrid structure
+ ! The weights (of both landunits and columns) and column types in the following are
+ ! arbitrary, since they are not important for these tests
+
+ call unittest_subgrid_setup_start()
+
+ ! Set up gridcell with one landunit and two columns
+ call unittest_add_gridcell()
+ call unittest_add_landunit(my_gi=gi, ltype=istwet, wtgcell=0.25_r8)
+ call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8)
+ call unittest_add_column(my_li=li, ctype=1, wtlunit=0.5_r8)
+
+ call unittest_subgrid_setup_end()
+
+ ! These will be enabled by specific tests
+ col%active(begc:endc) = .false.
+ col%is_hillslope_column(begc:endc) = .false.
+
+ ! Set up hill_distance
+ l = bounds%begl
+ do c = lun%coli(l), lun%colf(l)
+ col%hill_distance(c) = real(c, kind=r8)
+ end do
+
+
+ end subroutine setUp
+
+ subroutine tearDown(this)
+ ! clean up stuff set up in setup()
+ use clm_varcon, only: clm_varcon_clean
+ class(TestInit), intent(inout) :: this
+
+ call unittest_subgrid_teardown()
+ call clm_varcon_clean()
+
+ end subroutine tearDown
+
+ ! Set up ground/soil structure
+ subroutine ground_a(bounds)
+ use clm_varcon, only: clm_varcon_init, zisoi
+ type(bounds_type), intent(in) :: bounds
+ real(r8), allocatable :: my_zisoi(:)
+
+ nlevsoi = 5
+ allocate(my_zisoi(1:nlevsoi))
+ my_zisoi = [0.01_r8, 0.02_r8, 2._r8, 4._r8, 6._r8]
+ nlevgrnd = size(my_zisoi)
+ call clm_varcon_init( is_simple_buildtemp = .true.)
+ zisoi(0) = 0._r8
+ zisoi(1:nlevgrnd) = my_zisoi(:)
+ col%nbedrock(bounds%begc:bounds%endc) = nbedrock_dummy_value
+
+ deallocate(my_zisoi)
+ end subroutine ground_a
+
+ ! Set up ground/soil structure
+ subroutine ground_b(bounds)
+ use clm_varcon, only: clm_varcon_init, zisoi
+ type(bounds_type), intent(in) :: bounds
+ real(r8), allocatable :: my_zisoi(:)
+
+ nlevsoi = 3
+ allocate(my_zisoi(1:nlevsoi))
+ my_zisoi = [0.01_r8, 0.02_r8, 1._r8]
+ nlevgrnd = size(my_zisoi)
+ call clm_varcon_init( is_simple_buildtemp = .true.)
+ zisoi(0) = 0._r8
+ zisoi(1:nlevgrnd) = my_zisoi(:)
+ col%nbedrock(bounds%begc:bounds%endc) = nbedrock_dummy_value
+
+ deallocate(my_zisoi)
+ end subroutine ground_b
+
+ @Test
+ subroutine test_HillslopeSoilThicknessProfile_linear(this)
+ class(TestInit), intent(inout) :: this
+ integer, allocatable :: nbedrock_expected(:)
+ integer :: l, c
+
+ l = bounds%begl
+
+ call ground_a(bounds)
+ col%active(bounds%begc:bounds%endc) = .true.
+ col%is_hillslope_column(bounds%begc:bounds%endc) = .true.
+
+ ! Get expected values
+ ! Column 1 soil_depth_col = 8.5
+ ! Column 2 soil_depth_col = 2.0
+ allocate(nbedrock_expected(bounds%begc:bounds%endc))
+ nbedrock_expected(lun%coli(l)) = nbedrock_dummy_value
+ nbedrock_expected(lun%coli(l) + 1) = 3
+
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland)
+
+ @assertEqual(nbedrock_expected(lun%coli(l):lun%colf(l)), col%nbedrock(lun%coli(l):lun%colf(l)))
+
+ deallocate(nbedrock_expected)
+
+ end subroutine test_HillslopeSoilThicknessProfile_linear
+
+ @Test
+ subroutine test_HillslopeSoilThicknessProfile_linear_tooshallow(this)
+ class(TestInit), intent(inout) :: this
+ integer, allocatable :: nbedrock_expected(:)
+ integer :: l, c
+
+ l = bounds%begl
+
+ call ground_b(bounds)
+ col%active(bounds%begc:bounds%endc) = .true.
+ col%is_hillslope_column(bounds%begc:bounds%endc) = .true.
+
+ ! Get expected values
+ ! Column 1 soil_depth_col = 8.5
+ ! Column 2 soil_depth_col = 2.0; still too deep for ground_b()
+ allocate(nbedrock_expected(bounds%begc:bounds%endc))
+ nbedrock_expected(lun%coli(l)) = nbedrock_dummy_value
+ nbedrock_expected(lun%coli(l) + 1) = nbedrock_dummy_value
+
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland)
+
+ @assertEqual(nbedrock_expected(lun%coli(l):lun%colf(l)), col%nbedrock(lun%coli(l):lun%colf(l)))
+
+ deallocate(nbedrock_expected)
+
+ end subroutine test_HillslopeSoilThicknessProfile_linear_tooshallow
+
+ @Test
+ subroutine test_HillslopeSoilThicknessProfile_linear_noslope(this)
+ class(TestInit), intent(inout) :: this
+ integer, allocatable :: nbedrock_expected(:)
+ integer :: l, c
+ real(r8) :: toosmall_distance
+
+ l = bounds%begl
+
+ call ground_a(bounds)
+ col%active(bounds%begc:bounds%endc) = .true.
+ col%is_hillslope_column(bounds%begc:bounds%endc) = .true.
+
+ ! Get expected values, setting toosmall_distance to something high enough that the (abs(max_hill_dist - min_hill_dist) > toosmall_distance) conditional will fail, causing m = 0.0
+ toosmall_distance = 100._r8
+ ! Column 1 soil_depth_col = 2.0
+ ! Column 2 soil_depth_col = 2.0
+ allocate(nbedrock_expected(bounds%begc:bounds%endc))
+ nbedrock_expected(lun%coli(l)) = 3
+ nbedrock_expected(lun%coli(l) + 1) = 3
+
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland, toosmall_distance_in=toosmall_distance)
+
+ @assertEqual(nbedrock_expected(lun%coli(l):lun%colf(l)), col%nbedrock(lun%coli(l):lun%colf(l)))
+
+ deallocate(nbedrock_expected)
+
+ end subroutine test_HillslopeSoilThicknessProfile_linear_noslope
+
+ @Test
+ subroutine test_HillslopeSoilThicknessProfile_linear_inactive(this)
+ class(TestInit), intent(inout) :: this
+ integer, allocatable :: nbedrock_expected(:)
+ integer :: l, c
+
+ l = bounds%begl
+
+ call ground_a(bounds)
+ col%active(bounds%begc:bounds%endc) = .false.
+ col%is_hillslope_column(bounds%begc:bounds%endc) = .true.
+
+ ! Get expected values
+ ! Column 1 soil_depth_col = 8.5
+ ! Column 2 soil_depth_col = 2.0, but not active
+ allocate(nbedrock_expected(bounds%begc:bounds%endc))
+ nbedrock_expected(lun%coli(l)) = nbedrock_dummy_value
+ nbedrock_expected(lun%coli(l) + 1) = nbedrock_dummy_value
+
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland)
+
+ @assertEqual(nbedrock_expected(lun%coli(l):lun%colf(l)), col%nbedrock(lun%coli(l):lun%colf(l)))
+
+ deallocate(nbedrock_expected)
+
+ end subroutine test_HillslopeSoilThicknessProfile_linear_inactive
+
+ @Test
+ subroutine test_HillslopeSoilThicknessProfile_linear_nohillslope(this)
+ class(TestInit), intent(inout) :: this
+ integer, allocatable :: nbedrock_expected(:)
+ integer :: l, c
+
+ l = bounds%begl
+
+ call ground_a(bounds)
+ col%active(bounds%begc:bounds%endc) = .true.
+ col%is_hillslope_column(bounds%begc:bounds%endc) = .false.
+
+ ! Get expected values
+ ! Column 1 soil_depth_col = 8.5
+ ! Column 2 soil_depth_col = 2.0, but not is_hillslope_column
+ allocate(nbedrock_expected(bounds%begc:bounds%endc))
+ nbedrock_expected(lun%coli(l)) = nbedrock_dummy_value
+ nbedrock_expected(lun%coli(l) + 1) = nbedrock_dummy_value
+
+ call HillslopeSoilThicknessProfile_linear(bounds, soil_depth_lowland, soil_depth_upland)
+
+ @assertEqual(nbedrock_expected(lun%coli(l):lun%colf(l)), col%nbedrock(lun%coli(l):lun%colf(l)))
+
+ deallocate(nbedrock_expected)
+
+ end subroutine test_HillslopeSoilThicknessProfile_linear_nohillslope
+
+end module test_hillslopehydrologyUtils
diff --git a/src/cpl/lilac/lnd_import_export.F90 b/src/cpl/lilac/lnd_import_export.F90
index 281666c3e7..bab24ed37f 100644
--- a/src/cpl/lilac/lnd_import_export.F90
+++ b/src/cpl/lilac/lnd_import_export.F90
@@ -154,11 +154,11 @@ subroutine import_fields( importState, bounds, first_call, rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
call state_getimport(importState, 'c2l_fb_atm', 'Faxa_swvdr', bounds, &
- output=atm2lnd_inst%forc_solad_grc(:,1), rc=rc)
+ output=atm2lnd_inst%forc_solad_not_downscaled_grc(:,1), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
call state_getimport(importState, 'c2l_fb_atm', 'Faxa_swndr', bounds, &
- output=atm2lnd_inst%forc_solad_grc(:,2), rc=rc)
+ output=atm2lnd_inst%forc_solad_not_downscaled_grc(:,2), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
call state_getimport(importState, 'c2l_fb_atm', 'Faxa_swvdf', bounds, &
diff --git a/src/cpl/mct/lnd_import_export.F90 b/src/cpl/mct/lnd_import_export.F90
index 3f7e67af68..537abd49d9 100644
--- a/src/cpl/mct/lnd_import_export.F90
+++ b/src/cpl/mct/lnd_import_export.F90
@@ -10,6 +10,7 @@ module lnd_import_export
use Waterlnd2atmBulkType , only: waterlnd2atmbulk_type
use Wateratm2lndBulkType , only: wateratm2lndbulk_type
use clm_cpl_indices
+ use GridcellType , only : grc
!
implicit none
!===============================================================================
@@ -96,8 +97,8 @@ subroutine lnd_import( bounds, x2l, glc_present, atm2lnd_inst, glc2lnd_inst, wat
atm2lnd_inst%forc_topo_grc(g) = x2l(index_x2l_Sa_topo,i) ! Atm surface height (m)
atm2lnd_inst%forc_u_grc(g) = x2l(index_x2l_Sa_u,i) ! forc_uxy Atm state m/s
atm2lnd_inst%forc_v_grc(g) = x2l(index_x2l_Sa_v,i) ! forc_vxy Atm state m/s
- atm2lnd_inst%forc_solad_grc(g,2) = x2l(index_x2l_Faxa_swndr,i) ! forc_sollxy Atm flux W/m^2
- atm2lnd_inst%forc_solad_grc(g,1) = x2l(index_x2l_Faxa_swvdr,i) ! forc_solsxy Atm flux W/m^2
+ atm2lnd_inst%forc_solad_not_downscaled_grc(g,2) = x2l(index_x2l_Faxa_swndr,i) ! forc_sollxy Atm flux W/m^2
+ atm2lnd_inst%forc_solad_not_downscaled_grc(g,1) = x2l(index_x2l_Faxa_swvdr,i) ! forc_solsxy Atm flux W/m^2
atm2lnd_inst%forc_solai_grc(g,2) = x2l(index_x2l_Faxa_swndf,i) ! forc_solldxy Atm flux W/m^2
atm2lnd_inst%forc_solai_grc(g,1) = x2l(index_x2l_Faxa_swvdf,i) ! forc_solsdxy Atm flux W/m^2
diff --git a/src/cpl/nuopc/lnd_import_export.F90 b/src/cpl/nuopc/lnd_import_export.F90
index 5ed5ff76d1..11cc807640 100644
--- a/src/cpl/nuopc/lnd_import_export.F90
+++ b/src/cpl/nuopc/lnd_import_export.F90
@@ -9,7 +9,7 @@ module lnd_import_export
use NUOPC_Model , only : NUOPC_ModelGet
use shr_kind_mod , only : r8 => shr_kind_r8, cx=>shr_kind_cx, cxx=>shr_kind_cxx, cs=>shr_kind_cs
use shr_sys_mod , only : shr_sys_abort
- use clm_varctl , only : iulog
+ use clm_varctl , only : iulog, use_hillslope_routing
use clm_time_manager , only : get_nstep
use decompmod , only : bounds_type, get_proc_bounds
use lnd2atmType , only : lnd2atm_type
@@ -99,6 +99,8 @@ module lnd_import_export
character(*), parameter :: Flrr_flood = 'Flrr_flood'
character(*), parameter :: Flrr_volr = 'Flrr_volr'
character(*), parameter :: Flrr_volrmch = 'Flrr_volrmch'
+ character(*), parameter :: Sr_tdepth = 'Sr_tdepth'
+ character(*), parameter :: Sr_tdepth_max = 'Sr_tdepth_max'
character(*), parameter :: Sg_ice_covered_elev = 'Sg_ice_covered_elev'
character(*), parameter :: Sg_topo_elev = 'Sg_topo_elev'
character(*), parameter :: Flgg_hflx_elev = 'Flgg_hflx_elev'
@@ -388,6 +390,8 @@ subroutine advertise_fields(gcomp, flds_scalar_name, glc_present, cism_evolve, r
call fldlist_add(fldsToLnd_num, fldsToLnd, Flrr_flood )
call fldlist_add(fldsToLnd_num, fldsToLnd, Flrr_volr )
call fldlist_add(fldsToLnd_num, fldsToLnd, Flrr_volrmch )
+ call fldlist_add(fldsToLnd_num, fldsToLnd, Sr_tdepth )
+ call fldlist_add(fldsToLnd_num, fldsToLnd, Sr_tdepth_max )
end if
if (glc_present) then
@@ -549,9 +553,9 @@ subroutine import_fields( gcomp, bounds, glc_present, rof_prognostic, &
if (ChkErr(rc,__LINE__,u_FILE_u)) return
call state_getimport_1d(importState, Faxa_lwdn , atm2lnd_inst%forc_lwrad_not_downscaled_grc(begg:), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
- call state_getimport_1d(importState, Faxa_swvdr, atm2lnd_inst%forc_solad_grc(begg:,1), rc=rc)
+ call state_getimport_1d(importState, Faxa_swvdr, atm2lnd_inst%forc_solad_not_downscaled_grc(begg:,1), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
- call state_getimport_1d(importState, Faxa_swndr, atm2lnd_inst%forc_solad_grc(begg:,2), rc=rc)
+ call state_getimport_1d(importState, Faxa_swndr, atm2lnd_inst%forc_solad_not_downscaled_grc(begg:,2), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
call state_getimport_1d(importState, Faxa_swvdf, atm2lnd_inst%forc_solai_grc(begg:,1), rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
@@ -608,6 +612,20 @@ subroutine import_fields( gcomp, bounds, glc_present, rof_prognostic, &
wateratm2lndbulk_inst%volrmch_grc(:) = 0._r8
end if
+ if (fldchk(importState, Sr_tdepth)) then
+ call state_getimport_1d(importState, Sr_tdepth, wateratm2lndbulk_inst%tdepth_grc(begg:), rc=rc)
+ if (ChkErr(rc,__LINE__,u_FILE_u)) return
+ else
+ wateratm2lndbulk_inst%tdepth_grc(:) = 0._r8
+ end if
+
+ if (fldchk(importState, Sr_tdepth_max)) then
+ call state_getimport_1d(importState, Sr_tdepth_max, wateratm2lndbulk_inst%tdepthmax_grc(begg:), rc=rc)
+ if (ChkErr(rc,__LINE__,u_FILE_u)) return
+ else
+ wateratm2lndbulk_inst%tdepthmax_grc(:) = 0._r8
+ end if
+
!--------------------------
! Derived quantities for required fields
! and corresponding error checks
@@ -891,6 +909,10 @@ subroutine export_fields( gcomp, bounds, glc_present, rof_prognostic, &
do g = begg, endg
data1d(g) = waterlnd2atmbulk_inst%qflx_rofliq_qsub_grc(g) + &
waterlnd2atmbulk_inst%qflx_rofliq_drain_perched_grc(g)
+ if (use_hillslope_routing) then
+ data1d(g) = data1d(g) + &
+ waterlnd2atmbulk_inst%qflx_rofliq_stream_grc(g)
+ endif
end do
call state_setexport_1d(exportState, Flrl_rofsub, data1d(begg:), init_spval=.true., rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
diff --git a/src/cpl/utils/lnd_import_export_utils.F90 b/src/cpl/utils/lnd_import_export_utils.F90
index 4b7941da5b..1b40cb0e6c 100644
--- a/src/cpl/utils/lnd_import_export_utils.F90
+++ b/src/cpl/utils/lnd_import_export_utils.F90
@@ -76,8 +76,11 @@ subroutine derive_quantities( bounds, atm2lnd_inst, wateratm2lndbulk_inst, &
atm2lnd_inst%forc_wind_grc(g) = sqrt(atm2lnd_inst%forc_u_grc(g)**2 + atm2lnd_inst%forc_v_grc(g)**2)
- atm2lnd_inst%forc_solar_grc(g) = atm2lnd_inst%forc_solad_grc(g,1) + atm2lnd_inst%forc_solai_grc(g,1) + &
- atm2lnd_inst%forc_solad_grc(g,2) + atm2lnd_inst%forc_solai_grc(g,2)
+ atm2lnd_inst%forc_solar_not_downscaled_grc(g) = &
+ atm2lnd_inst%forc_solad_not_downscaled_grc(g,1) &
+ + atm2lnd_inst%forc_solai_grc(g,1) &
+ + atm2lnd_inst%forc_solad_not_downscaled_grc(g,2) &
+ + atm2lnd_inst%forc_solai_grc(g,2)
wateratm2lndbulk_inst%forc_rain_not_downscaled_grc(g) = forc_rainc(g) + forc_rainl(g)
wateratm2lndbulk_inst%forc_snow_not_downscaled_grc(g) = forc_snowc(g) + forc_snowl(g)
@@ -118,8 +121,8 @@ subroutine check_for_errors( bounds, atm2lnd_inst, wateratm2lndbulk_inst )
call shr_sys_abort( subname//&
' ERROR: Longwave down sent from the atmosphere model is negative or zero' )
end if
- if ( (atm2lnd_inst%forc_solad_grc(g,1) < 0.0_r8) .or. &
- (atm2lnd_inst%forc_solad_grc(g,2) < 0.0_r8) .or. &
+ if ( (atm2lnd_inst%forc_solad_not_downscaled_grc(g,1) < 0.0_r8) .or. &
+ (atm2lnd_inst%forc_solad_not_downscaled_grc(g,2) < 0.0_r8) .or. &
(atm2lnd_inst%forc_solai_grc(g,1) < 0.0_r8) .or. &
(atm2lnd_inst%forc_solai_grc(g,2) < 0.0_r8) ) then
call shr_sys_abort( subname//&
@@ -141,6 +144,7 @@ end subroutine check_for_errors
!=============================================================================
subroutine check_for_nans(array, fname, begg, direction)
+ use GridcellType , only : grc
! input/output variables
real(r8) , intent(in) :: array(:)
@@ -159,7 +163,7 @@ subroutine check_for_nans(array, fname, begg, direction)
write(iulog,*) 'Which are NaNs = ', isnan(array)
do i = 1, size(array)
if (isnan(array(i))) then
- write(iulog,*) "NaN found in field ", trim(fname), ' at gridcell index ',begg+i-1
+ write(iulog,*) "NaN found in field ", trim(fname), ' at gridcell index/lon/lat: ',begg+i-1,grc%londeg(begg+i-1),grc%latdeg(begg+i-1)
end if
end do
call shr_sys_abort(' ERROR: One or more of the CTSM cap '//direction//' fields are NaN ' )
diff --git a/src/main/ColumnType.F90 b/src/main/ColumnType.F90
index 5f57b3ed23..ab7ee8e261 100644
--- a/src/main/ColumnType.F90
+++ b/src/main/ColumnType.F90
@@ -68,8 +68,20 @@ module ColumnType
real(r8), pointer :: z_lake (:,:) ! layer depth for lake (m)
real(r8), pointer :: lakedepth (:) ! variable lake depth (m)
integer , pointer :: nbedrock (:) ! variable depth to bedrock index
+ ! hillslope hydrology variables
+ integer, pointer :: col_ndx (:) ! column index of column (hillslope hydrology)
+ integer, pointer :: colu (:) ! column index of uphill column (hillslope hydrology)
+ integer, pointer :: cold (:) ! column index of downhill column (hillslope hydrology)
+ integer, pointer :: hillslope_ndx (:) ! hillslope identifier
+ real(r8), pointer :: hill_elev (:) ! mean elevation of column relative to stream channel (m)
+ real(r8), pointer :: hill_slope (:) ! mean along-hill slope (m/m)
+ real(r8), pointer :: hill_area (:) ! mean surface area (m2)
+ real(r8), pointer :: hill_width (:) ! across-hill width of bottom boundary of column (m)
+ real(r8), pointer :: hill_distance (:) ! along-hill distance of column from bottom of hillslope (m)
+ real(r8), pointer :: hill_aspect (:) ! azimuth angle of column wrt to north, positive to east (radians)
! other column characteristics
+ logical , pointer :: is_hillslope_column(:) ! true if this column is a hillslope element
logical , pointer :: hydrologically_active(:) ! true if this column is a hydrologically active type
logical , pointer :: urbpoi (:) ! true=>urban point
@@ -130,13 +142,22 @@ subroutine Init(this, begc, endc)
allocate(this%lakedepth (begc:endc)) ; this%lakedepth (:) = spval
allocate(this%dz_lake (begc:endc,nlevlak)) ; this%dz_lake (:,:) = nan
allocate(this%z_lake (begc:endc,nlevlak)) ; this%z_lake (:,:) = nan
-
+ allocate(this%col_ndx (begc:endc)) ; this%col_ndx(:) = ispval
+ allocate(this%colu (begc:endc)) ; this%colu (:) = ispval
+ allocate(this%cold (begc:endc)) ; this%cold (:) = ispval
+ allocate(this%hillslope_ndx(begc:endc)) ; this%hillslope_ndx (:) = ispval
+ allocate(this%hill_elev(begc:endc)) ; this%hill_elev (:) = spval
+ allocate(this%hill_slope(begc:endc)) ; this%hill_slope (:) = spval
+ allocate(this%hill_area(begc:endc)) ; this%hill_area (:) = spval
+ allocate(this%hill_width(begc:endc)) ; this%hill_width (:) = spval
+ allocate(this%hill_distance(begc:endc)) ; this%hill_distance (:) = spval
+ allocate(this%hill_aspect(begc:endc)) ; this%hill_aspect (:) = spval
allocate(this%nbedrock (begc:endc)) ; this%nbedrock (:) = ispval
allocate(this%levgrnd_class(begc:endc,nlevmaxurbgrnd)) ; this%levgrnd_class(:,:) = ispval
allocate(this%micro_sigma (begc:endc)) ; this%micro_sigma (:) = nan
allocate(this%topo_slope (begc:endc)) ; this%topo_slope (:) = nan
allocate(this%topo_std (begc:endc)) ; this%topo_std (:) = nan
-
+ allocate(this%is_hillslope_column(begc:endc)) ; this%is_hillslope_column(:) = .false.
allocate(this%hydrologically_active(begc:endc)) ; this%hydrologically_active(:) = .false.
allocate(this%urbpoi (begc:endc)) ; this%urbpoi (:) = .false.
@@ -174,9 +195,19 @@ subroutine Clean(this)
deallocate(this%topo_std )
deallocate(this%nbedrock )
deallocate(this%levgrnd_class)
+ deallocate(this%is_hillslope_column)
deallocate(this%hydrologically_active)
- deallocate(this%urbpoi)
-
+ deallocate(this%col_ndx )
+ deallocate(this%colu )
+ deallocate(this%cold )
+ deallocate(this%hillslope_ndx)
+ deallocate(this%hill_elev )
+ deallocate(this%hill_slope )
+ deallocate(this%hill_area )
+ deallocate(this%hill_width )
+ deallocate(this%hill_distance)
+ deallocate(this%hill_aspect )
+ deallocate(this%urbpoi )
end subroutine Clean
!-----------------------------------------------------------------------
diff --git a/src/main/LandunitType.F90 b/src/main/LandunitType.F90
index 22770d2334..3a5c68c4f3 100644
--- a/src/main/LandunitType.F90
+++ b/src/main/LandunitType.F90
@@ -32,6 +32,7 @@ module LandunitType
integer , pointer :: coli (:) ! beginning column index per landunit
integer , pointer :: colf (:) ! ending column index for each landunit
integer , pointer :: ncolumns (:) ! number of columns for each landunit
+ integer , pointer :: nhillslopes (:) ! number of hillslopes for each landunit
integer , pointer :: patchi (:) ! beginning patch index for each landunit
integer , pointer :: patchf (:) ! ending patch index for each landunit
integer , pointer :: npatches (:) ! number of patches for each landunit
@@ -52,6 +53,13 @@ module LandunitType
real(r8), pointer :: z_0_town (:) ! urban landunit momentum roughness length (m)
real(r8), pointer :: z_d_town (:) ! urban landunit displacement height (m)
+ ! hillslope variables
+ real(r8), pointer :: stream_channel_depth (:) ! stream channel bankfull depth (m)
+ real(r8), pointer :: stream_channel_width (:) ! stream channel bankfull width (m)
+ real(r8), pointer :: stream_channel_length (:) ! stream channel length (m)
+ real(r8), pointer :: stream_channel_slope (:) ! stream channel slope (m/m)
+ real(r8), pointer :: stream_channel_number (:) ! number of channels in landunit
+
contains
procedure, public :: Init ! Allocate and initialize
@@ -82,6 +90,7 @@ subroutine Init(this, begl, endl)
allocate(this%coli (begl:endl)); this%coli (:) = ispval
allocate(this%colf (begl:endl)); this%colf (:) = ispval
allocate(this%ncolumns (begl:endl)); this%ncolumns (:) = ispval
+ allocate(this%nhillslopes (begl:endl)); this%nhillslopes(:) = ispval
allocate(this%patchi (begl:endl)); this%patchi (:) = ispval
allocate(this%patchf (begl:endl)); this%patchf (:) = ispval
allocate(this%npatches (begl:endl)); this%npatches (:) = ispval
@@ -102,6 +111,13 @@ subroutine Init(this, begl, endl)
allocate(this%z_0_town (begl:endl)); this%z_0_town (:) = nan
allocate(this%z_d_town (begl:endl)); this%z_d_town (:) = nan
+ ! Hillslope variables initialized in HillslopeHydrologyMod
+ allocate(this%stream_channel_depth(begl:endl)); this%stream_channel_depth (:) = nan
+ allocate(this%stream_channel_width(begl:endl)); this%stream_channel_width (:) = nan
+ allocate(this%stream_channel_length(begl:endl)); this%stream_channel_length (:) = nan
+ allocate(this%stream_channel_slope(begl:endl)); this%stream_channel_slope (:) = nan
+ allocate(this%stream_channel_number(begl:endl)); this%stream_channel_number (:) = nan
+
end subroutine Init
!------------------------------------------------------------------------
@@ -119,6 +135,7 @@ subroutine Clean(this)
deallocate(this%coli )
deallocate(this%colf )
deallocate(this%ncolumns )
+ deallocate(this%nhillslopes )
deallocate(this%patchi )
deallocate(this%patchf )
deallocate(this%npatches )
@@ -134,7 +151,11 @@ subroutine Clean(this)
deallocate(this%wtlunit_roof )
deallocate(this%z_0_town )
deallocate(this%z_d_town )
-
+ deallocate(this%stream_channel_depth)
+ deallocate(this%stream_channel_width)
+ deallocate(this%stream_channel_length)
+ deallocate(this%stream_channel_slope)
+ deallocate(this%stream_channel_number)
end subroutine Clean
end module LandunitType
diff --git a/src/main/TopoMod.F90 b/src/main/TopoMod.F90
index e14762cc21..b081c77482 100644
--- a/src/main/TopoMod.F90
+++ b/src/main/TopoMod.F90
@@ -13,8 +13,9 @@ module TopoMod
use LandunitType , only : lun
use glc2lndMod , only : glc2lnd_type
use glcBehaviorMod , only : glc_behavior_type
- use landunit_varcon, only : istice
+ use landunit_varcon, only : istice, istsoil
use filterColMod , only : filter_col_type, col_filter_from_logical_array_active_only
+ use clm_varctl , only : use_hillslope, downscale_hillslope_meteorology
!
! !PUBLIC TYPES:
implicit none
@@ -139,8 +140,14 @@ subroutine InitCold(this, bounds)
! For other landunits, arbitrarily initialize topo_col to 0 m; for landunits
! where this matters, this will get overwritten in the run loop by values sent
! from CISM
- this%topo_col(c) = 0._r8
- this%needs_downscaling_col(c) = .false.
+ if (col%is_hillslope_column(c) .and. downscale_hillslope_meteorology) then
+ this%topo_col(c) = col%hill_elev(c)
+ this%needs_downscaling_col(c) = .true.
+ else
+ this%topo_col(c) = 0._r8
+ this%needs_downscaling_col(c) = .false.
+ endif
+
end if
end do
@@ -218,7 +225,9 @@ subroutine UpdateTopo(this, bounds, num_icec, filter_icec, &
!
! !LOCAL VARIABLES:
integer :: begc, endc
- integer :: c, g
+ integer :: c, l, g
+ real(r8), allocatable :: mean_hillslope_elevation(:)
+ real(r8):: mhe_norm
character(len=*), parameter :: subname = 'UpdateTopo'
!-----------------------------------------------------------------------
@@ -240,18 +249,48 @@ subroutine UpdateTopo(this, bounds, num_icec, filter_icec, &
this%topo_col(begc:endc), &
this%needs_downscaling_col(begc:endc))
- ! For any point that isn't downscaled, set its topo value to the atmosphere's
- ! topographic height. This shouldn't matter, but is useful if topo_col is written to
- ! the history file.
- !
+ ! calculate area-weighted mean hillslope elevation on each landunit
+ if (use_hillslope) then
+ allocate(mean_hillslope_elevation(bounds%begl:bounds%endl))
+ mean_hillslope_elevation(:) = 0._r8
+ do l = bounds%begl, bounds%endl
+ mhe_norm = 0._r8
+ do c = lun%coli(l), lun%colf(l)
+ if (col%is_hillslope_column(c)) then
+ mean_hillslope_elevation(l) = mean_hillslope_elevation(l) &
+ + col%hill_elev(c)*col%hill_area(c)
+ mhe_norm = mhe_norm + col%hill_area(c)
+ endif
+ enddo
+ if (mhe_norm > 0) then
+ mean_hillslope_elevation(l) = mean_hillslope_elevation(l)/mhe_norm
+ endif
+ enddo
+ endif
+
! This could operate over a filter like 'allc' in order to just operate over active
! points, but I'm not sure that would speed things up much, and would require passing
! in this additional filter.
+
do c = bounds%begc, bounds%endc
if (.not. this%needs_downscaling_col(c)) then
+ ! For any point that isn't already set to be downscaled, set its topo value to the
+ ! atmosphere's topographic height. This is important for the hillslope block
+ ! below. For non-hillslope columns, this shouldn't matter, but is useful if
+ ! topo_col is written to the history file.
g = col%gridcell(c)
this%topo_col(c) = atm_topo(g)
end if
+ ! If needs_downscaling_col was already set, then that implies
+ ! that topo_col was previously set by update_glc2lnd_topo.
+ ! In that case, topo_col should be used as a starting point,
+ ! rather than the atmosphere's topo value.
+ if (col%is_hillslope_column(c) .and. downscale_hillslope_meteorology) then
+ l = col%landunit(c)
+ this%topo_col(c) = this%topo_col(c) &
+ + (col%hill_elev(c) - mean_hillslope_elevation(l))
+ this%needs_downscaling_col(c) = .true.
+ endif
end do
call glc_behavior%update_glc_classes(bounds, this%topo_col(begc:endc))
diff --git a/src/main/atm2lndMod.F90 b/src/main/atm2lndMod.F90
index 11e05f1496..5da4ff6333 100644
--- a/src/main/atm2lndMod.F90
+++ b/src/main/atm2lndMod.F90
@@ -18,12 +18,14 @@ module atm2lndMod
use decompMod , only : bounds_type, subgrid_level_gridcell, subgrid_level_column
use atm2lndType , only : atm2lnd_type
use TopoMod , only : topo_type
+ use SurfaceAlbedoType, only : surfalb_type
use filterColMod , only : filter_col_type
use LandunitType , only : lun
use ColumnType , only : col
use landunit_varcon, only : istice
use WaterType , only : water_type
use Wateratm2lndBulkType, only : wateratm2lndbulk_type
+
!
! !PUBLIC TYPES:
implicit none
@@ -46,6 +48,9 @@ module atm2lndMod
private :: build_normalization ! Compute normalization factors so that downscaled fields are conservative
private :: check_downscale_consistency ! Check consistency of downscaling
+ private :: downscale_hillslope_solar ! Downscale incoming direct solar radiation based on local slope and aspect.
+ private :: downscale_hillslope_precipitation ! Downscale precipitation based on local topographic height.
+
character(len=*), parameter, private :: sourcefile = &
__FILE__
!-----------------------------------------------------------------------
@@ -91,7 +96,7 @@ end subroutine set_atm2lnd_water_tracers
!-----------------------------------------------------------------------
subroutine downscale_forcings(bounds, &
- topo_inst, atm2lnd_inst, wateratm2lndbulk_inst, eflx_sh_precip_conversion)
+ topo_inst, atm2lnd_inst, surfalb_inst, wateratm2lndbulk_inst, eflx_sh_precip_conversion)
!
! !DESCRIPTION:
! Downscale atmospheric forcing fields from gridcell to column.
@@ -111,12 +116,14 @@ subroutine downscale_forcings(bounds, &
!
! !USES:
use clm_varcon , only : rair, cpair, grav
+ use clm_varctl , only : use_hillslope,downscale_hillslope_meteorology
use QsatMod , only : Qsat
!
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds
class(topo_type) , intent(in) :: topo_inst
type(atm2lnd_type) , intent(inout) :: atm2lnd_inst
+ class(surfalb_type) , intent(in) :: surfalb_inst
type(wateratm2lndbulk_type) , intent(inout) :: wateratm2lndbulk_inst
real(r8) , intent(out) :: eflx_sh_precip_conversion(bounds%begc:) ! sensible heat flux from precipitation conversion (W/m**2) [+ to atm]
!
@@ -143,6 +150,8 @@ subroutine downscale_forcings(bounds, &
! Gridcell-level metadata:
forc_topo_g => atm2lnd_inst%forc_topo_grc , & ! Input: [real(r8) (:)] atmospheric surface height (m)
+ forc_rain_g => wateratm2lndbulk_inst%forc_rain_not_downscaled_grc , & ! Input: [real(r8) (:)] rain rate [mm/s]
+ forc_snow_g => wateratm2lndbulk_inst%forc_snow_not_downscaled_grc , & ! Input: [real(r8) (:)] snow rate [mm/s]
! Column-level metadata:
topo_c => topo_inst%topo_col , & ! Input: [real(r8) (:)] column surface height (m)
@@ -153,13 +162,19 @@ subroutine downscale_forcings(bounds, &
forc_q_g => wateratm2lndbulk_inst%forc_q_not_downscaled_grc , & ! Input: [real(r8) (:)] atmospheric specific humidity (kg/kg)
forc_pbot_g => atm2lnd_inst%forc_pbot_not_downscaled_grc , & ! Input: [real(r8) (:)] atmospheric pressure (Pa)
forc_rho_g => atm2lnd_inst%forc_rho_not_downscaled_grc , & ! Input: [real(r8) (:)] atmospheric density (kg/m**3)
-
+ forc_solad_g => atm2lnd_inst%forc_solad_not_downscaled_grc , & ! Input: [real(r8) (:)] gridcell direct incoming solar radiation
+ forc_solar_g => atm2lnd_inst%forc_solar_not_downscaled_grc, & ! Input: [real(r8) (:)] gridcell direct incoming solar radiation
+
! Column-level downscaled fields:
+ forc_rain_c => wateratm2lndbulk_inst%forc_rain_downscaled_col , & ! Output: [real(r8) (:)] rain rate [mm/s]
+ forc_snow_c => wateratm2lndbulk_inst%forc_snow_downscaled_col , & ! Output: [real(r8) (:)] snow rate [mm/s]
+ forc_q_c => wateratm2lndbulk_inst%forc_q_downscaled_col , & ! Output: [real(r8) (:)] atmospheric specific humidity (kg/kg)
forc_t_c => atm2lnd_inst%forc_t_downscaled_col , & ! Output: [real(r8) (:)] atmospheric temperature (Kelvin)
forc_th_c => atm2lnd_inst%forc_th_downscaled_col , & ! Output: [real(r8) (:)] atmospheric potential temperature (Kelvin)
- forc_q_c => wateratm2lndbulk_inst%forc_q_downscaled_col , & ! Output: [real(r8) (:)] atmospheric specific humidity (kg/kg)
forc_pbot_c => atm2lnd_inst%forc_pbot_downscaled_col , & ! Output: [real(r8) (:)] atmospheric pressure (Pa)
- forc_rho_c => atm2lnd_inst%forc_rho_downscaled_col & ! Output: [real(r8) (:)] atmospheric density (kg/m**3)
+ forc_rho_c => atm2lnd_inst%forc_rho_downscaled_col , & ! Output: [real(r8) (:)] atmospheric density (kg/m**3)
+ forc_solad_c => atm2lnd_inst%forc_solad_downscaled_col , & ! Output: [real(r8) (:)] column direct incoming solar radiation
+ forc_solar_c => atm2lnd_inst%forc_solar_downscaled_col & ! Output: [real(r8) (:)] column total incoming solar radiation
)
! Initialize column forcing (needs to be done for ALL active columns)
@@ -167,11 +182,15 @@ subroutine downscale_forcings(bounds, &
if (col%active(c)) then
g = col%gridcell(c)
+ forc_rain_c(c) = forc_rain_g(g)
+ forc_snow_c(c) = forc_snow_g(g)
forc_t_c(c) = forc_t_g(g)
forc_th_c(c) = forc_th_g(g)
forc_q_c(c) = forc_q_g(g)
forc_pbot_c(c) = forc_pbot_g(g)
forc_rho_c(c) = forc_rho_g(g)
+ forc_solar_c(c) = forc_solar_g(g)
+ forc_solad_c(c,1:numrad) = forc_solad_g(g,1:numrad)
end if
end do
@@ -247,6 +266,12 @@ subroutine downscale_forcings(bounds, &
end do
+ ! adjust hillslope precpitation before repartitioning rain/snow
+ if (use_hillslope .and. downscale_hillslope_meteorology) then
+ call downscale_hillslope_solar(bounds, atm2lnd_inst, surfalb_inst)
+ call downscale_hillslope_precipitation(bounds, topo_inst, atm2lnd_inst, wateratm2lndbulk_inst)
+ endif
+
call partition_precip(bounds, atm2lnd_inst, wateratm2lndbulk_inst, &
eflx_sh_precip_conversion(bounds%begc:bounds%endc))
@@ -312,10 +337,6 @@ subroutine partition_precip(bounds, atm2lnd_inst, wateratm2lndbulk_inst, eflx_sh
SHR_ASSERT_ALL_FL((ubound(eflx_sh_precip_conversion) == (/bounds%endc/)), sourcefile, __LINE__)
associate(&
- ! Gridcell-level non-downscaled fields:
- forc_rain_g => wateratm2lndbulk_inst%forc_rain_not_downscaled_grc , & ! Input: [real(r8) (:)] rain rate [mm/s]
- forc_snow_g => wateratm2lndbulk_inst%forc_snow_not_downscaled_grc , & ! Input: [real(r8) (:)] snow rate [mm/s]
-
! Column-level downscaled fields:
forc_t_c => atm2lnd_inst%forc_t_downscaled_col , & ! Input: [real(r8) (:)] atmospheric temperature (Kelvin)
forc_rain_c => wateratm2lndbulk_inst%forc_rain_downscaled_col , & ! Output: [real(r8) (:)] rain rate [mm/s]
@@ -328,8 +349,6 @@ subroutine partition_precip(bounds, atm2lnd_inst, wateratm2lndbulk_inst, eflx_sh
do c = bounds%begc,bounds%endc
if (col%active(c)) then
g = col%gridcell(c)
- forc_rain_c(c) = forc_rain_g(g)
- forc_snow_c(c) = forc_snow_g(g)
rain_to_snow_conversion_c(c) = 0._r8
snow_to_rain_conversion_c(c) = 0._r8
eflx_sh_precip_conversion(c) = 0._r8
@@ -719,4 +738,250 @@ subroutine check_downscale_consistency(bounds, atm2lnd_inst, wateratm2lndbulk_in
end subroutine check_downscale_consistency
+ subroutine downscale_hillslope_solar(bounds, atm2lnd_inst, surfalb_inst)
+ !
+ ! !DESCRIPTION:
+ ! Downscale incoming direct solar radiation based on local slope and aspect.
+ !
+ ! This is currently applied over columns
+ !
+ ! USES
+ use clm_varpar , only : numrad
+
+ ! !ARGUMENTS:
+ type(bounds_type) , intent(in) :: bounds
+ type(surfalb_type) , intent(in) :: surfalb_inst
+ type(atm2lnd_type) , intent(inout) :: atm2lnd_inst
+ !
+ ! !LOCAL VARIABLES:
+ integer :: c,l,g,n ! indices
+ real(r8) :: norm(numrad)
+ real(r8) :: sum_solar(bounds%begg:bounds%endg,numrad)
+ real(r8) :: sum_wtgcell(bounds%begg:bounds%endg)
+ real(r8) :: illum_frac(bounds%begg:bounds%endg)
+ real(r8), parameter :: illumination_threshold = 0.05
+ logical :: checkConservation = .true.
+
+ character(len=*), parameter :: subname = 'downscale_hillslope_solar'
+ !-----------------------------------------------------------------------
+
+ associate(&
+ ! Gridcell-level fields:
+ forc_solai_grc => atm2lnd_inst%forc_solai_grc , & ! Input: [real(r8) (:)] gridcell indirect incoming solar radiation
+ forc_solad_grc => atm2lnd_inst%forc_solad_not_downscaled_grc , & ! Input: [real(r8) (:)] gridcell direct incoming solar radiation
+ coszen_grc => surfalb_inst%coszen_grc , & ! Input: [real(r8) (:)] cosine of solar zenith angle
+
+ ! Column-level fields:
+ coszen_col => surfalb_inst%coszen_col , & ! Input: [real(r8) (:)] cosine of solar zenith angle
+ forc_solar_col => atm2lnd_inst%forc_solar_downscaled_col , & ! Output: [real(r8) (:)] column total incoming solar radiation
+ forc_solad_col => atm2lnd_inst%forc_solad_downscaled_col & ! Output: [real(r8) (:)] column direct incoming solar radiation
+ )
+
+ ! Initialize column forcing
+ sum_solar(bounds%begg:bounds%endg,1:numrad) = 0._r8
+ sum_wtgcell(bounds%begg:bounds%endg) = 0._r8
+ illum_frac(bounds%begg:bounds%endg) = 0._r8
+ do c = bounds%begc,bounds%endc
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ g = col%gridcell(c)
+ if (coszen_grc(g) > 0._r8) then
+ forc_solad_col(c,1:numrad) = forc_solad_grc(g,1:numrad)*(coszen_col(c)/coszen_grc(g))
+ if (coszen_col(c) > 0._r8) then
+ illum_frac(g) = illum_frac(g) + col%wtgcell(c)
+ endif
+ endif
+
+ sum_solar(g,1:numrad) = sum_solar(g,1:numrad) + col%wtgcell(c)*forc_solad_col(c,1:numrad)
+ sum_wtgcell(g) = sum_wtgcell(g) + col%wtgcell(c)
+ end if
+ end do
+
+ ! Calculate illuminated fraction of gridcell
+ do g = bounds%begg,bounds%endg
+ if (sum_wtgcell(g) > 0._r8) then
+ illum_frac(g) = illum_frac(g)/sum_wtgcell(g)
+ endif
+ enddo
+
+ ! Normalize column level solar
+ do c = bounds%begc,bounds%endc
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ g = col%gridcell(c)
+ do n = 1,numrad
+ ! absorbed energy is solar flux x area landunit (sum_wtgcell)
+ if(sum_solar(g,n) > 0._r8 .and. illum_frac(g) > illumination_threshold) then
+ norm(n) = sum_wtgcell(g)*forc_solad_grc(g,n)/sum_solar(g,n)
+ forc_solad_col(c,n) = forc_solad_col(c,n)*norm(n)
+ else
+ forc_solad_col(c,n) = forc_solad_grc(g,n)
+ endif
+ enddo
+ forc_solar_col(c) = sum(forc_solad_col(c,1:numrad))+sum(forc_solai_grc(g,1:numrad))
+ end if
+
+ end do
+
+ ! check conservation
+ if(checkConservation) then
+ sum_solar(bounds%begg:bounds%endg,1:numrad) = 0._r8
+ sum_wtgcell(bounds%begg:bounds%endg) = 0._r8
+ ! Calculate normalization (area-weighted solar flux)
+ do c = bounds%begc,bounds%endc
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ g = col%gridcell(c)
+ do n = 1,numrad
+ sum_solar(g,n) = sum_solar(g,n) + col%wtgcell(c)*forc_solad_col(c,n)
+ enddo
+ sum_wtgcell(g) = sum_wtgcell(g) + col%wtgcell(c)
+ end if
+ end do
+ do g = bounds%begg,bounds%endg
+ do n = 1,numrad
+ if(abs(sum_solar(g,n) - sum_wtgcell(g)*forc_solad_grc(g,n)) > 1.e-6) then
+ write(iulog,*) 'downscaled solar not conserved', g, n, sum_solar(g,n), sum_wtgcell(g)*forc_solad_grc(g,n)
+ call endrun(subgrid_index=g, subgrid_level=subgrid_level_gridcell, &
+ msg=' ERROR: Energy conservation error downscaling solar'//&
+ errMsg(sourcefile, __LINE__))
+ endif
+ enddo
+ enddo
+ endif
+
+
+ end associate
+
+ end subroutine downscale_hillslope_solar
+
+ !-----------------------------------------------------------------------
+ subroutine downscale_hillslope_precipitation(bounds, &
+ topo_inst, atm2lnd_inst, wateratm2lndbulk_inst)
+ !
+ ! !DESCRIPTION:
+ ! Downscale precipitation from gridcell to column.
+ !
+ ! Downscaling is done based on the difference between each CLM column's elevation and
+ ! the atmosphere's surface elevation (which is the elevation at which the atmospheric
+ ! forcings are valid).
+ !
+ ! !USES:
+ use clm_varcon , only : rair, cpair, grav
+ !
+ ! !ARGUMENTS:
+ type(bounds_type) , intent(in) :: bounds
+ class(topo_type) , intent(in) :: topo_inst
+ type(atm2lnd_type) , intent(in) :: atm2lnd_inst
+ type(wateratm2lndbulk_type) , intent(inout) :: wateratm2lndbulk_inst
+ !
+ ! !LOCAL VARIABLES:
+ integer :: g, l, c, fc ! indices
+
+ ! temporaries for topo downscaling
+ real(r8) :: precip_anom, topo_anom
+ real(r8) :: norm_rain(bounds%begg:bounds%endg)
+ real(r8) :: norm_snow(bounds%begg:bounds%endg)
+ real(r8) :: sum_wt(bounds%begg:bounds%endg)
+ real(r8), parameter :: rain_scalar = 1.5e-3_r8 ! (1/m)
+ real(r8), parameter :: snow_scalar = 1.5e-3_r8 ! (1/m)
+ logical :: checkConservation = .true.
+ character(len=*), parameter :: subname = 'downscale_hillslope_precipitation'
+ !-----------------------------------------------------------------------
+
+ associate(&
+ ! Gridcell-level metadata:
+ forc_topo_g => atm2lnd_inst%forc_topo_grc , & ! Input: [real(r8) (:)] atmospheric surface height (m)
+ forc_rain_g => wateratm2lndbulk_inst%forc_rain_not_downscaled_grc , & ! Input: [real(r8) (:)] rain rate [mm/s]
+ forc_snow_g => wateratm2lndbulk_inst%forc_snow_not_downscaled_grc , & ! Input: [real(r8) (:)] snow rate [mm/s]
+ ! Column-level metadata:
+ topo_c => topo_inst%topo_col , & ! Input: [real(r8) (:)] column surface height (m)
+
+ ! Column-level downscaled fields:
+ forc_rain_c => wateratm2lndbulk_inst%forc_rain_downscaled_col , & ! Output: [real(r8) (:)] rain rate [mm/s]
+ forc_snow_c => wateratm2lndbulk_inst%forc_snow_downscaled_col & ! Output: [real(r8) (:)] snow rate [mm/s]
+ )
+
+ ! Redistribute precipitation based on departure
+ ! of column elevation from mean elevation
+
+ do c = bounds%begc,bounds%endc
+ g = col%gridcell(c)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+
+ ! spatially uniform normalization, but separate rain/snow
+ topo_anom = max(-1._r8,(topo_c(c) - forc_topo_g(g))*rain_scalar) ! rain
+ precip_anom = forc_rain_g(g) * topo_anom
+ forc_rain_c(c) = forc_rain_c(c) + precip_anom
+
+ topo_anom = max(-1._r8,(topo_c(c) - forc_topo_g(g))*snow_scalar) ! snow
+ precip_anom = forc_snow_g(g) * topo_anom
+ forc_snow_c(c) = forc_snow_c(c) + precip_anom
+
+ end if
+ end do
+
+ ! Initialize arrays of total landunit precipitation
+ norm_rain(bounds%begg:bounds%endg) = 0._r8
+ norm_snow(bounds%begg:bounds%endg) = 0._r8
+ sum_wt(bounds%begg:bounds%endg) = 0._r8
+ ! Calculate normalization (area-weighted average precipitation)
+ do c = bounds%begc,bounds%endc
+ g = col%gridcell(c)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ norm_rain(g) = norm_rain(g) + col%wtgcell(c)*forc_rain_c(c)
+ norm_snow(g) = norm_snow(g) + col%wtgcell(c)*forc_snow_c(c)
+ sum_wt(g) = sum_wt(g) + col%wtgcell(c)
+ end if
+ end do
+ do g = bounds%begg,bounds%endg
+ if(sum_wt(g) > 0._r8) then
+ norm_rain(g) = norm_rain(g) / sum_wt(g)
+ norm_snow(g) = norm_snow(g) / sum_wt(g)
+ endif
+ enddo
+
+ ! Normalize column precipitation to conserve gridcell average
+ do c = bounds%begc,bounds%endc
+ g = col%gridcell(c)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ if (norm_rain(g) > 0._r8) then
+ forc_rain_c(c) = forc_rain_c(c) * forc_rain_g(g) / norm_rain(g)
+ else
+ forc_rain_c(c) = forc_rain_g(g)
+ endif
+ if (norm_snow(g) > 0._r8) then
+ forc_snow_c(c) = forc_snow_c(c) * forc_snow_g(g) / norm_snow(g)
+ else
+ forc_snow_c(c) = forc_snow_g(g)
+ endif
+ end if
+ end do
+
+ ! check conservation
+ if(checkConservation) then
+ norm_rain(bounds%begg:bounds%endg) = 0._r8
+ norm_snow(bounds%begg:bounds%endg) = 0._r8
+ sum_wt(bounds%begg:bounds%endg) = 0._r8
+ ! Calculate normalization (area-weighted average precipitation)
+ do c = bounds%begc,bounds%endc
+ g = col%gridcell(c)
+ if (col%is_hillslope_column(c) .and. col%active(c)) then
+ norm_rain(g) = norm_rain(g) + col%wtgcell(c)*forc_rain_c(c)
+ norm_snow(g) = norm_snow(g) + col%wtgcell(c)*forc_snow_c(c)
+ sum_wt(g) = sum_wt(g) + col%wtgcell(c)
+ end if
+ end do
+ do g = bounds%begg,bounds%endg
+ if(abs(norm_rain(g) - sum_wt(g)*forc_rain_g(g)) > 1.e-6) then
+ write(iulog,*) 'rain not conserved', g, norm_rain(g), sum_wt(g)*forc_rain_g(g)
+ endif
+ if(abs(norm_snow(g) - sum_wt(g)*forc_snow_g(g)) > 1.e-6) then
+ write(iulog,*) 'snow not conserved', g, norm_snow(g), sum_wt(g)*forc_snow_g(g)
+ endif
+ enddo
+ endif
+
+ end associate
+
+ end subroutine downscale_hillslope_precipitation
+
+
end module atm2lndMod
diff --git a/src/main/atm2lndType.F90 b/src/main/atm2lndType.F90
index 53013caf24..b99e0c8ba1 100644
--- a/src/main/atm2lndType.F90
+++ b/src/main/atm2lndType.F90
@@ -80,9 +80,10 @@ module atm2lndType
real(r8), pointer :: forc_vp_grc (:) => null() ! atmospheric vapor pressure (Pa)
real(r8), pointer :: forc_pco2_grc (:) => null() ! CO2 partial pressure (Pa)
real(r8), pointer :: forc_pco2_240_patch (:) => null() ! 10-day mean CO2 partial pressure (Pa)
- real(r8), pointer :: forc_solad_grc (:,:) => null() ! direct beam radiation (numrad) (vis=forc_sols , nir=forc_soll )
+ real(r8), pointer :: forc_solad_not_downscaled_grc (:,:) => null() ! direct beam radiation (numrad) (vis=forc_sols , nir=forc_soll )
real(r8), pointer :: forc_solai_grc (:,:) => null() ! diffuse radiation (numrad) (vis=forc_solsd, nir=forc_solld)
- real(r8), pointer :: forc_solar_grc (:) => null() ! incident solar radiation
+ real(r8), pointer :: forc_solar_not_downscaled_grc (:) => null() ! incident solar radiation
+ real(r8), pointer :: forc_solar_downscaled_col (:) => null() ! incident solar radiation
real(r8), pointer :: forc_ndep_grc (:) => null() ! nitrogen deposition rate (gN/m2/s)
real(r8), pointer :: forc_pc13o2_grc (:) => null() ! C13O2 partial pressure (Pa)
real(r8), pointer :: forc_po2_grc (:) => null() ! O2 partial pressure (Pa)
@@ -104,7 +105,7 @@ module atm2lndType
real(r8), pointer :: forc_pbot_downscaled_col (:) => null() ! downscaled atm pressure (Pa)
real(r8), pointer :: forc_rho_downscaled_col (:) => null() ! downscaled atm density (kg/m**3)
real(r8), pointer :: forc_lwrad_downscaled_col (:) => null() ! downscaled atm downwrd IR longwave radiation (W/m**2)
-
+ real(r8), pointer :: forc_solad_downscaled_col (:,:) => null() ! direct beam radiation (numrad) (vis=forc_sols , nir=forc_soll )
! time averaged quantities
real(r8) , pointer :: fsd24_patch (:) => null() ! patch 24hr average of direct beam radiation
@@ -475,9 +476,9 @@ subroutine InitAllocate(this, bounds)
allocate(this%forc_hgt_q_grc (begg:endg)) ; this%forc_hgt_q_grc (:) = ival
allocate(this%forc_vp_grc (begg:endg)) ; this%forc_vp_grc (:) = ival
allocate(this%forc_pco2_grc (begg:endg)) ; this%forc_pco2_grc (:) = ival
- allocate(this%forc_solad_grc (begg:endg,numrad)) ; this%forc_solad_grc (:,:) = ival
+ allocate(this%forc_solad_not_downscaled_grc (begg:endg,numrad)) ; this%forc_solad_not_downscaled_grc (:,:) = ival
allocate(this%forc_solai_grc (begg:endg,numrad)) ; this%forc_solai_grc (:,:) = ival
- allocate(this%forc_solar_grc (begg:endg)) ; this%forc_solar_grc (:) = ival
+ allocate(this%forc_solar_not_downscaled_grc (begg:endg)) ; this%forc_solar_not_downscaled_grc (:) = ival
allocate(this%forc_ndep_grc (begg:endg)) ; this%forc_ndep_grc (:) = ival
allocate(this%forc_pc13o2_grc (begg:endg)) ; this%forc_pc13o2_grc (:) = ival
allocate(this%forc_po2_grc (begg:endg)) ; this%forc_po2_grc (:) = ival
@@ -503,6 +504,8 @@ subroutine InitAllocate(this, bounds)
allocate(this%forc_th_downscaled_col (begc:endc)) ; this%forc_th_downscaled_col (:) = ival
allocate(this%forc_rho_downscaled_col (begc:endc)) ; this%forc_rho_downscaled_col (:) = ival
allocate(this%forc_lwrad_downscaled_col (begc:endc)) ; this%forc_lwrad_downscaled_col (:) = ival
+ allocate(this%forc_solad_downscaled_col (begc:endc,numrad)) ; this%forc_solad_downscaled_col (:,:) = ival
+ allocate(this%forc_solar_downscaled_col (begc:endc)) ; this%forc_solar_downscaled_col (:) = ival
allocate(this%fsd24_patch (begp:endp)) ; this%fsd24_patch (:) = nan
allocate(this%fsd240_patch (begp:endp)) ; this%fsd240_patch (:) = nan
@@ -555,24 +558,25 @@ subroutine InitHistory(this, bounds)
avgflag='A', long_name='atmospheric surface height', &
ptr_lnd=this%forc_topo_grc)
+ this%forc_solar_not_downscaled_grc(begg:endg) = spval
+ call hist_addfld1d (fname='FSDS_from_atm', units='W/m^2', &
+ avgflag='A', long_name='atmospheric incident solar radiation received from atmosphere (pre-downscaling)', &
+ ptr_lnd=this%forc_solar_not_downscaled_grc)
+
+ this%forc_o3_grc(begg:endg) = spval
call hist_addfld1d (fname='ATM_O3', units='mol/mol', &
avgflag='A', long_name='atmospheric ozone partial pressure', &
ptr_lnd=this%forc_o3_grc, default = 'inactive')
- this%forc_solar_grc(begg:endg) = spval
- call hist_addfld1d (fname='FSDS', units='W/m^2', &
- avgflag='A', long_name='atmospheric incident solar radiation', &
- ptr_lnd=this%forc_solar_grc)
-
this%forc_pco2_grc(begg:endg) = spval
call hist_addfld1d (fname='PCO2', units='Pa', &
avgflag='A', long_name='atmospheric partial pressure of CO2', &
ptr_lnd=this%forc_pco2_grc)
- this%forc_solar_grc(begg:endg) = spval
+ this%forc_solar_not_downscaled_grc(begg:endg) = spval
call hist_addfld1d (fname='SWdown', units='W/m^2', &
avgflag='A', long_name='atmospheric incident solar radiation', &
- ptr_gcell=this%forc_solar_grc, default='inactive')
+ ptr_gcell=this%forc_solar_not_downscaled_grc, default='inactive')
if (use_lch4) then
this%forc_pch4_grc(begg:endg) = spval
@@ -586,42 +590,46 @@ subroutine InitHistory(this, bounds)
avgflag='A', long_name='atmospheric air temperature received from atmosphere (pre-downscaling)', &
ptr_gcell=this%forc_t_not_downscaled_grc, default='inactive')
+ this%forc_solar_downscaled_col(begc:endc) = spval
+ call hist_addfld1d (fname='FSDS', units='W/m^2', &
+ avgflag='A', long_name='atmospheric incident solar radiation (downscaled for glacier and hillslope columns)', &
+ ptr_col=this%forc_solar_downscaled_col)
+
this%forc_t_downscaled_col(begc:endc) = spval
call hist_addfld1d (fname='TBOT', units='K', &
- avgflag='A', long_name='atmospheric air temperature (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric air temperature (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_t_downscaled_col)
call hist_addfld1d (fname='Tair', units='K', &
- avgflag='A', long_name='atmospheric air temperature (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric air temperature (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_t_downscaled_col, default='inactive')
this%forc_pbot_downscaled_col(begc:endc) = spval
call hist_addfld1d (fname='PBOT', units='Pa', &
- avgflag='A', long_name='atmospheric pressure at surface (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric pressure at surface (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_pbot_downscaled_col)
call hist_addfld1d (fname='PSurf', units='Pa', &
- avgflag='A', long_name='atmospheric pressure at surface (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric pressure at surface (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_pbot_downscaled_col, default='inactive')
this%forc_lwrad_downscaled_col(begc:endc) = spval
call hist_addfld1d (fname='FLDS', units='W/m^2', &
- avgflag='A', long_name='atmospheric longwave radiation (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric longwave radiation (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_lwrad_downscaled_col)
call hist_addfld1d (fname='LWdown', units='W/m^2', &
- avgflag='A', long_name='atmospheric longwave radiation (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric longwave radiation (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_lwrad_downscaled_col, default='inactive')
call hist_addfld1d (fname='FLDS_ICE', units='W/m^2', &
avgflag='A', &
- long_name='atmospheric longwave radiation (downscaled to columns in glacier regions) (ice landunits only)', &
+ long_name='atmospheric longwave radiation (downscaled for glacier and hillslope columns) (ice landunits only)', &
ptr_col=this%forc_lwrad_downscaled_col, l2g_scale_type='ice', &
default='inactive')
this%forc_th_downscaled_col(begc:endc) = spval
call hist_addfld1d (fname='THBOT', units='K', &
- avgflag='A', long_name='atmospheric air potential temperature (downscaled to columns in glacier regions)', &
+ avgflag='A', long_name='atmospheric air potential temperature (downscaled for glacier and hillslope columns)', &
ptr_col=this%forc_th_downscaled_col)
-
! Time averaged quantities
this%fsi24_patch(begp:endp) = spval
call hist_addfld1d (fname='FSI24', units='K', &
@@ -858,7 +866,7 @@ subroutine UpdateAccVars (this, bounds)
! Accumulate and extract forc_solad24 & forc_solad240
do p = begp,endp
g = patch%gridcell(p)
- rbufslp(p) = this%forc_solad_grc(g,1)
+ rbufslp(p) = this%forc_solad_not_downscaled_grc(g,1)
end do
call update_accum_field ('FSD240', rbufslp , nstep)
call extract_accum_field ('FSD240', this%fsd240_patch , nstep)
@@ -997,9 +1005,9 @@ subroutine Clean(this)
deallocate(this%forc_hgt_q_grc)
deallocate(this%forc_vp_grc)
deallocate(this%forc_pco2_grc)
- deallocate(this%forc_solad_grc)
+ deallocate(this%forc_solad_not_downscaled_grc)
deallocate(this%forc_solai_grc)
- deallocate(this%forc_solar_grc)
+ deallocate(this%forc_solar_not_downscaled_grc)
deallocate(this%forc_ndep_grc)
deallocate(this%forc_pc13o2_grc)
deallocate(this%forc_po2_grc)
@@ -1020,6 +1028,8 @@ subroutine Clean(this)
deallocate(this%forc_th_downscaled_col)
deallocate(this%forc_rho_downscaled_col)
deallocate(this%forc_lwrad_downscaled_col)
+ deallocate(this%forc_solad_downscaled_col)
+ deallocate(this%forc_solar_downscaled_col)
deallocate(this%fsd24_patch)
deallocate(this%fsd240_patch)
diff --git a/src/main/clm_driver.F90 b/src/main/clm_driver.F90
index 33e9412ba9..00a98e61b4 100644
--- a/src/main/clm_driver.F90
+++ b/src/main/clm_driver.F90
@@ -511,7 +511,7 @@ subroutine clm_drv(doalb, nextsw_cday, declinp1, declin, rstwr, nlend, rdate, ro
atm_topo = atm2lnd_inst%forc_topo_grc(bounds_clump%begg:bounds_clump%endg))
call downscale_forcings(bounds_clump, &
- topo_inst, atm2lnd_inst, water_inst%wateratm2lndbulk_inst, &
+ topo_inst, atm2lnd_inst, surfalb_inst, water_inst%wateratm2lndbulk_inst, &
eflx_sh_precip_conversion = energyflux_inst%eflx_sh_precip_conversion_col(bounds_clump%begc:bounds_clump%endc))
call set_atm2lnd_water_tracers(bounds_clump, &
@@ -1092,7 +1092,7 @@ subroutine clm_drv(doalb, nextsw_cday, declinp1, declin, rstwr, nlend, rdate, ro
filter(nc)%num_hydrologyc, filter(nc)%hydrologyc, &
filter(nc)%num_urbanc, filter(nc)%urbanc, &
filter(nc)%num_do_smb_c, filter(nc)%do_smb_c, &
- atm2lnd_inst, glc2lnd_inst, temperature_inst, &
+ glc2lnd_inst, temperature_inst, &
soilhydrology_inst, soilstate_inst, water_inst%waterstatebulk_inst, &
water_inst%waterdiagnosticbulk_inst, water_inst%waterbalancebulk_inst, &
water_inst%waterfluxbulk_inst, water_inst%wateratm2lndbulk_inst, &
diff --git a/src/main/clm_initializeMod.F90 b/src/main/clm_initializeMod.F90
index 3354c1e7d0..ab59ea5447 100644
--- a/src/main/clm_initializeMod.F90
+++ b/src/main/clm_initializeMod.F90
@@ -14,10 +14,10 @@ module clm_initializeMod
use clm_varctl , only : use_fates_sp, use_fates_bgc, use_fates
use clm_varctl , only : is_cold_start
use clm_varctl , only : iulog
- use clm_varctl , only : use_lch4, use_cn, use_cndv, use_c13, use_c14
+ use clm_varctl , only : use_lch4, use_cn, use_cndv, use_c13, use_c14, nhillslope
use clm_varctl , only : use_soil_moisture_streams
use clm_instur , only : wt_lunit, urban_valid, wt_nat_patch, wt_cft, fert_cft
- use clm_instur , only : irrig_method, wt_glc_mec, topo_glc_mec, haslake, pct_urban_max
+ use clm_instur , only : irrig_method, wt_glc_mec, topo_glc_mec, haslake, ncolumns_hillslope, pct_urban_max
use perf_mod , only : t_startf, t_stopf
use readParamsMod , only : readParameters
use ncdio_pio , only : file_desc_t
@@ -64,7 +64,8 @@ subroutine initialize1(dtime)
use UrbanParamsType , only: IsSimpleBuildTemp
use dynSubgridControlMod , only: dynSubgridControl_init
use SoilBiogeochemDecompCascadeConType , only : decomp_cascade_par_init
- use CropReprPoolsMod , only: crop_repr_pools_init
+ use CropReprPoolsMod , only: crop_repr_pools_init
+ use HillslopeHydrologyMod, only: hillslope_properties_init
!
! !ARGUMENTS
integer, intent(in) :: dtime ! model time step (seconds)
@@ -114,6 +115,7 @@ subroutine initialize1(dtime)
if (masterproc) call control_print()
call dynSubgridControl_init(NLFilename)
call crop_repr_pools_init()
+ call hillslope_properties_init(NLFilename)
call t_stopf('clm_init1')
@@ -135,6 +137,7 @@ subroutine initialize2(ni,nj)
use clm_varctl , only : finidat, finidat_interp_source, finidat_interp_dest, fsurdat
use clm_varctl , only : use_cn, use_fates, use_fates_luh
use clm_varctl , only : use_crop, ndep_from_cpl, fates_spitfire_mode
+ use clm_varctl , only : use_hillslope
use clm_varorb , only : eccen, mvelpp, lambm0, obliqr
use clm_varctl , only : use_cropcal_streams
use landunit_varcon , only : landunit_varcon_init, max_lunit, numurbl
@@ -176,9 +179,10 @@ subroutine initialize2(ni,nj)
use NutrientCompetitionFactoryMod , only : create_nutrient_competition_method
use FATESFireFactoryMod , only : scalar_lightning
use dynFATESLandUseChangeMod , only : dynFatesLandUseInit
+ use HillslopeHydrologyMod , only : InitHillslope
!
! !ARGUMENTS
- integer, intent(in) :: ni, nj ! global grid sizes
+ integer, intent(in) :: ni, nj ! global grid sizes
!
! !LOCAL VARIABLES:
integer :: c,g,i,j,k,l,n,p ! indices
@@ -236,6 +240,9 @@ subroutine initialize2(ni,nj)
allocate (wt_glc_mec (begg:endg, maxpatch_glc ))
allocate (topo_glc_mec (begg:endg, maxpatch_glc ))
allocate (haslake (begg:endg ))
+ if (use_hillslope) then
+ allocate (ncolumns_hillslope (begg:endg ))
+ endif
allocate (pct_urban_max(begg:endg, numurbl ))
allocate (wt_nat_patch (begg:endg, surfpft_lb:surfpft_ub ))
@@ -293,6 +300,11 @@ subroutine initialize2(ni,nj)
! Set global seg maps for gridcells, landlunits, columns and patches
call decompInit_glcp(ni, nj, glc_behavior)
+ if (use_hillslope) then
+ ! Initialize hillslope properties
+ call InitHillslope(bounds_proc, fsurdat)
+ endif
+
! Set filters
call allocFilters()
@@ -318,6 +330,7 @@ subroutine initialize2(ni,nj)
! end of the run for error checking, pct_urban_max is kept through the end of the run
! for reweighting in subgridWeights.
deallocate (wt_lunit, wt_cft, wt_glc_mec, haslake)
+ if (use_hillslope) deallocate (ncolumns_hillslope)
! Determine processor bounds and clumps for this processor
call get_proc_bounds(bounds_proc)
diff --git a/src/main/clm_instMod.F90 b/src/main/clm_instMod.F90
index 1ca450b48d..43390ca8b7 100644
--- a/src/main/clm_instMod.F90
+++ b/src/main/clm_instMod.F90
@@ -200,6 +200,9 @@ subroutine clm_instInit(bounds)
use SoilWaterRetentionCurveFactoryMod , only : create_soil_water_retention_curve
use decompMod , only : get_proc_bounds
use BalanceCheckMod , only : GetBalanceCheckSkipSteps
+ use clm_varctl , only : use_hillslope
+ use HillslopeHydrologyMod , only : SetHillslopeSoilThickness
+ use initVerticalMod , only : setSoilLayerClass
!
! !ARGUMENTS
type(bounds_type), intent(in) :: bounds ! processor bounds
@@ -268,6 +271,14 @@ subroutine clm_instInit(bounds)
urbanparams_inst%thick_wall(begl:endl), &
urbanparams_inst%thick_roof(begl:endl))
+ ! Set hillslope column bedrock values
+ if (use_hillslope) then
+ call SetHillslopeSoilThickness(bounds,fsurdat, &
+ soil_depth_lowland_in=8.5_r8,&
+ soil_depth_upland_in =2.0_r8)
+ call setSoilLayerClass(bounds)
+ endif
+
!-----------------------------------------------
! Set cold-start values for snow levels, snow layers and snow interfaces
!-----------------------------------------------
diff --git a/src/main/clm_varctl.F90 b/src/main/clm_varctl.F90
index 615f3b2606..bdcb653b64 100644
--- a/src/main/clm_varctl.F90
+++ b/src/main/clm_varctl.F90
@@ -152,6 +152,12 @@ module clm_varctl
! true => separate crop landunit is not created by default
logical, public :: create_crop_landunit = .false.
+ ! number of hillslopes per landunit
+ integer, public :: nhillslope = 0
+
+ ! maximum number of hillslope columns per landunit
+ integer, public :: max_columns_hillslope = 1
+
! do not irrigate by default
logical, public :: irrigate = .false.
@@ -378,7 +384,15 @@ module clm_varctl
integer, public :: soil_layerstruct_userdefined_nlevsoi = iundef
!----------------------------------------------------------
- !excess ice physics switch
+ ! hillslope hydrology switch
+ !----------------------------------------------------------
+
+ logical, public :: use_hillslope = .false. ! true => use multi-column hillslope hydrology
+ logical, public :: downscale_hillslope_meteorology = .false. ! true => downscale meteorological forcing in hillslope model
+ logical, public :: use_hillslope_routing = .false. ! true => use surface water routing in hillslope hydrology
+
+ !----------------------------------------------------------
+ ! excess ice physics switch
!----------------------------------------------------------
logical, public :: use_excess_ice = .false. ! true. => use excess ice physics
diff --git a/src/main/clm_varsur.F90 b/src/main/clm_varsur.F90
index d360941d23..c49c8bb052 100644
--- a/src/main/clm_varsur.F90
+++ b/src/main/clm_varsur.F90
@@ -45,13 +45,17 @@ module clm_instur
! subgrid glacier_mec sfc elevation
real(r8), pointer :: topo_glc_mec(:,:)
-
+
! whether we have lake to initialise in each grid cell
logical , pointer :: haslake(:)
+
+ ! subgrid hillslope hydrology constituents
+ integer, pointer :: ncolumns_hillslope(:)
! whether we have urban to initialize in each grid cell
! (second dimension goes 1:numurbl)
real(r8), pointer :: pct_urban_max(:,:)
+
!-----------------------------------------------------------------------
end module clm_instur
diff --git a/src/main/controlMod.F90 b/src/main/controlMod.F90
index d95c0e28e0..eaa7c5c1e4 100644
--- a/src/main/controlMod.F90
+++ b/src/main/controlMod.F90
@@ -45,7 +45,7 @@ module controlMod
use SoilBiogeochemLittVertTranspMod , only: som_adv_flux, max_depth_cryoturb
use SoilBiogeochemVerticalProfileMod , only: surfprof_exp
use SoilBiogeochemNitrifDenitrifMod , only: no_frozen_nitrif_denitrif
- use SoilHydrologyMod , only: soilHydReadNML
+ use SoilHydrologyMod , only: soilHydReadNML, hillslope_hydrology_ReadNML
use CNFireFactoryMod , only: CNFireReadNML
use CanopyFluxesMod , only: CanopyFluxesReadNML
use shr_drydep_mod , only: n_drydep
@@ -257,6 +257,11 @@ subroutine control_init(dtime)
namelist /clm_inparm/ use_biomass_heat_storage
+ namelist /clm_inparm/ use_hillslope
+
+ namelist /clm_inparm/ downscale_hillslope_meteorology
+
+ namelist /clm_inparm/ use_hillslope_routing
namelist /clm_inparm/ use_hydrstress
@@ -574,8 +579,10 @@ subroutine control_init(dtime)
end if
call soilHydReadNML( NLFilename )
-
- if( use_cn ) then
+ if ( use_hillslope ) then
+ call hillslope_hydrology_ReadNML( NLFilename )
+ endif
+ if ( use_cn ) then
call CNFireReadNML( NLFilename )
call CNPrecisionControlReadNML( NLFilename )
call CNNDynamicsReadNML ( NLFilename )
@@ -816,6 +823,11 @@ subroutine control_spmd()
call mpi_bcast (use_biomass_heat_storage, 1, MPI_LOGICAL, 0, mpicom, ier)
+ call mpi_bcast (use_hillslope, 1, MPI_LOGICAL, 0, mpicom, ier)
+
+ call mpi_bcast (downscale_hillslope_meteorology, 1, MPI_LOGICAL, 0, mpicom, ier)
+
+ call mpi_bcast (use_hillslope_routing, 1, MPI_LOGICAL, 0, mpicom, ier)
call mpi_bcast (use_hydrstress, 1, MPI_LOGICAL, 0, mpicom, ier)
@@ -1066,6 +1078,7 @@ subroutine control_print ()
write(iulog,'(a,d20.10)') ' Max snow depth (mm) =', h2osno_max
write(iulog,'(a,i8)') ' glc number of elevation classes =', maxpatch_glc
+
if (glc_do_dynglacier) then
write(iulog,*) ' glc CLM glacier areas and topography WILL evolve dynamically'
else
@@ -1098,6 +1111,9 @@ subroutine control_print ()
end if
write(iulog,*) ' land-ice albedos (unitless 0-1) = ', albice
+ write(iulog,*) ' hillslope hydrology = ', use_hillslope
+ write(iulog,*) ' downscale hillslope meteorology = ', downscale_hillslope_meteorology
+ write(iulog,*) ' hillslope routing = ', use_hillslope_routing
write(iulog,*) ' pre-defined soil layer structure = ', soil_layerstruct_predefined
write(iulog,*) ' user-defined soil layer structure = ', soil_layerstruct_userdefined
write(iulog,*) ' user-defined number of soil layers = ', soil_layerstruct_userdefined_nlevsoi
diff --git a/src/main/histFileMod.F90 b/src/main/histFileMod.F90
index fb1a25db37..d419f97630 100644
--- a/src/main/histFileMod.F90
+++ b/src/main/histFileMod.F90
@@ -16,7 +16,7 @@ module histFileMod
use clm_varctl , only : iulog, use_fates, compname, use_cn, use_crop
use clm_varcon , only : spval, ispval
use clm_varcon , only : grlnd, nameg, namel, namec, namep
- use decompMod , only : get_proc_bounds, get_proc_global, bounds_type, get_global_index_array
+ use decompMod , only : get_proc_bounds, get_proc_global, bounds_type, get_global_index, get_global_index_array
use decompMod , only : subgrid_level_gridcell, subgrid_level_landunit, subgrid_level_column
use GridcellType , only : grc
use LandunitType , only : lun
@@ -2329,6 +2329,7 @@ subroutine htape_create (t, histrest)
use landunit_varcon , only : max_lunit
use clm_varctl , only : caseid, ctitle, fsurdat, finidat, paramfile
use clm_varctl , only : version, hostname, username, conventions, source
+ use clm_varctl , only : use_hillslope,nhillslope,max_columns_hillslope
use domainMod , only : ldomain
use fileutils , only : get_filename
!
@@ -2466,6 +2467,10 @@ subroutine htape_create (t, histrest)
call ncd_defdim(lnfid, 'ltype', max_lunit, dimid)
call ncd_defdim(lnfid, 'nlevcan',nlevcan, dimid)
call ncd_defdim(lnfid, 'nvegwcs',nvegwcs, dimid)
+ if (use_hillslope) then
+ call ncd_defdim(lnfid, 'nhillslope',nhillslope, dimid)
+ call ncd_defdim(lnfid, 'max_columns_hillslope',max_columns_hillslope, dimid)
+ endif
call ncd_defdim(lnfid, 'mxsowings' , mxsowings , dimid)
call ncd_defdim(lnfid, 'mxharvests' , mxharvests , dimid)
call htape_add_ltype_metadata(lnfid)
@@ -2487,7 +2492,6 @@ subroutine htape_create (t, histrest)
call ncd_defdim(lnfid, 'scale_type_string_length', scale_type_strlen, dimid)
call ncd_defdim( lnfid, 'levdcmp', nlevdecomp_full, dimid)
-
if(use_fates)then
call ncd_defdim(lnfid, 'fates_levscag', nlevsclass * nlevage, dimid)
call ncd_defdim(lnfid, 'fates_levscagpf', nlevsclass * nlevage * numpft_fates, dimid)
@@ -2730,6 +2734,7 @@ subroutine htape_timeconst3D(t, &
'lake', & ! ZLAKE
'lake' & ! DZLAKE
]
+
!-----------------------------------------------------------------------
SHR_ASSERT_ALL_FL((ubound(watsat_col) == (/bounds%endc, nlevmaxurbgrnd/)), sourcefile, __LINE__)
@@ -3024,7 +3029,8 @@ subroutine htape_timeconst(t, mode)
!
! !USES:
use clm_varpar , only : nlevsoi
- use clm_varcon , only : zsoi, zlak, secspday, isecspday, isecsphr, isecspmin
+ use clm_varctl , only : use_hillslope
+ use clm_varcon , only : zsoi, zlak, secspday, isecspday, isecsphr, isecspmin, ispval
use domainMod , only : ldomain, lon1d, lat1d
use clm_time_manager, only : get_nstep, get_curr_date, get_curr_time
use clm_time_manager, only : get_ref_date, get_calendar, NO_LEAP_C, GREGORIAN_C
@@ -3079,7 +3085,7 @@ subroutine htape_timeconst(t, mode)
!
integer :: sec_hist_nhtfrq ! hist_nhtfrq converted to seconds
! !LOCAL VARIABLES:
- integer :: vid,n,i,j,m ! indices
+ integer :: vid,n,i,j,m,c ! indices
integer :: nstep ! current step
integer :: mcsec ! seconds of current date
integer :: mdcur ! current day
@@ -3105,6 +3111,9 @@ subroutine htape_timeconst(t, mode)
real(r8), pointer :: histo(:,:) ! temporary
integer :: status
real(r8) :: zsoi_1d(1)
+ type(bounds_type) :: bounds
+ integer :: ier ! error status
+ integer, pointer :: icarr(:) ! temporary
character(len=*),parameter :: subname = 'htape_timeconst'
!-----------------------------------------------------------------------
@@ -3112,6 +3121,9 @@ subroutine htape_timeconst(t, mode)
!*** Time constant grid variables only on first time-sample of file ***
!-------------------------------------------------------------------------------
+ call get_proc_bounds(bounds)
+
+
if (tape(t)%ntimes == 1) then
if (mode == 'define') then
call ncd_defvar(varname='levgrnd', xtype=tape(t)%ncprec, &
@@ -3126,6 +3138,36 @@ subroutine htape_timeconst(t, mode)
call ncd_defvar(varname='levdcmp', xtype=tape(t)%ncprec, dim1name='levdcmp', &
long_name='coordinate levels for soil decomposition variables', units='m', ncid=nfid(t))
+ if (use_hillslope .and. .not.tape(t)%dov2xy)then
+ call ncd_defvar(varname='hillslope_distance', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column distance', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_width', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column width', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_area', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column area', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_elev', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column elevation', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_slope', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column slope', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_aspect', xtype=ncd_double, &
+ dim1name=namec, long_name='hillslope column aspect', &
+ units='m', ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_index', xtype=ncd_int, &
+ dim1name=namec, long_name='hillslope index', &
+ ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_cold', xtype=ncd_int, &
+ dim1name=namec, long_name='hillslope downhill column index', &
+ ncid=nfid(t))
+ call ncd_defvar(varname='hillslope_colu', xtype=ncd_int, &
+ dim1name=namec, long_name='hillslope uphill column index', &
+ ncid=nfid(t))
+ end if
+
if(use_fates)then
call ncd_defvar(varname='fates_levscls', xtype=tape(t)%ncprec, dim1name='fates_levscls', &
@@ -3214,6 +3256,44 @@ subroutine htape_timeconst(t, mode)
zsoi_1d(1) = 1._r8
call ncd_io(varname='levdcmp', data=zsoi_1d, ncid=nfid(t), flag='write')
end if
+
+ if (use_hillslope .and. .not.tape(t)%dov2xy) then
+ call ncd_io(varname='hillslope_distance' , data=col%hill_distance, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_width' , data=col%hill_width, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_area' , data=col%hill_area, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_elev' , data=col%hill_elev, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_slope' , data=col%hill_slope, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_aspect' , data=col%hill_aspect, dim1name=namec, ncid=nfid(t), flag='write')
+ call ncd_io(varname='hillslope_index' , data=col%hillslope_ndx, dim1name=namec, ncid=nfid(t), flag='write')
+
+ ! write global indices rather than local indices
+ allocate(icarr(bounds%begc:bounds%endc),stat=ier)
+ if (ier /= 0) then
+ call endrun(msg=' allocation error of icarr'//errMsg(sourcefile, __LINE__))
+ end if
+
+ do c = bounds%begc,bounds%endc
+ if (col%cold(c) /= ispval) then
+ icarr(c)= get_global_index(subgrid_index=col%cold(c), subgrid_level=subgrid_level_column)
+ else
+ icarr(c)= col%cold(c)
+ endif
+ enddo
+
+ call ncd_io(varname='hillslope_cold' , data=icarr, dim1name=namec, ncid=nfid(t), flag='write')
+
+ do c = bounds%begc,bounds%endc
+ if (col%colu(c) /= ispval) then
+ icarr(c)= get_global_index(subgrid_index=col%colu(c), subgrid_level=subgrid_level_column)
+ else
+ icarr(c)= col%colu(c)
+ endif
+ enddo
+
+ call ncd_io(varname='hillslope_colu' , data=icarr, dim1name=namec, ncid=nfid(t), flag='write')
+ deallocate(icarr)
+ endif
+
if(use_fates)then
call ncd_io(varname='fates_scmap_levscag',data=fates_hdim_scmap_levscag, ncid=nfid(t), flag='write')
call ncd_io(varname='fates_agmap_levscag',data=fates_hdim_agmap_levscag, ncid=nfid(t), flag='write')
@@ -3765,6 +3845,9 @@ subroutine hfields_1dinfo(t, mode)
call ncd_defvar(varname='cols1d_active', xtype=ncd_log, dim1name=namec, &
long_name='true => do computations on this column', ifill_value=0, ncid=ncid)
+ call ncd_defvar(varname='cols1d_nbedrock', xtype=ncd_int, dim1name=namec, &
+ long_name='column bedrock depth index', ifill_value=ispval, ncid=ncid)
+
! Define patch info
call ncd_defvar(varname='pfts1d_lon', xtype=ncd_double, dim1name=namep, &
@@ -3912,6 +3995,7 @@ subroutine hfields_1dinfo(t, mode)
call ncd_io(varname='cols1d_itype_lunit', data=icarr , dim1name=namec, ncid=ncid, flag='write')
call ncd_io(varname='cols1d_active' , data=col%active , dim1name=namec, ncid=ncid, flag='write')
+ call ncd_io(varname='cols1d_nbedrock', data=col%nbedrock , dim1name=namec, ncid=ncid, flag='write')
! Write patch info
@@ -4107,7 +4191,7 @@ subroutine hist_htapes_wrapup( rstwr, nlend, bounds, &
call htape_timeconst(t, mode='define')
! Define 3D time-constant field variables on first history tapes
- if ( do_3Dtconst) then
+ if ( do_3Dtconst .and. t == 1) then
call htape_timeconst3D(t, &
bounds, watsat_col, sucsat_col, bsw_col, hksat_col, &
cellsand_col, cellclay_col, mode='define')
@@ -4127,7 +4211,7 @@ subroutine hist_htapes_wrapup( rstwr, nlend, bounds, &
call htape_timeconst(t, mode='write')
! Write 3D time constant history variables to first history tapes
- if ( do_3Dtconst .and. tape(t)%ntimes == 1 )then
+ if ( do_3Dtconst .and. t == 1 .and. tape(t)%ntimes == 1 )then
call htape_timeconst3D(t, &
bounds, watsat_col, sucsat_col, bsw_col, hksat_col, &
cellsand_col, cellclay_col, mode='write')
@@ -4581,7 +4665,6 @@ subroutine hist_restart_ncd (bounds, ncid, flag, rdate)
start(1)=1
-
!
! Add history namelist data to each history restart tape
!
diff --git a/src/main/initGridCellsMod.F90 b/src/main/initGridCellsMod.F90
index 99303c32da..44bc9361b2 100644
--- a/src/main/initGridCellsMod.F90
+++ b/src/main/initGridCellsMod.F90
@@ -216,7 +216,7 @@ subroutine set_landunit_veg_compete (ltype, gi, li, ci, pi)
integer , intent(inout) :: pi ! patch index
!
! !LOCAL VARIABLES:
- integer :: m ! index
+ integer :: m, ci2 ! index
integer :: npatches ! number of patches in landunit
integer :: ncols
integer :: nlunits
@@ -224,6 +224,7 @@ subroutine set_landunit_veg_compete (ltype, gi, li, ci, pi)
integer :: ncols_added ! number of columns actually added
integer :: nlunits_added ! number of landunits actually added
real(r8) :: wtlunit2gcell ! landunit weight in gridcell
+ real(r8) :: wtcol2lunit ! column weight in landunit
real(r8) :: p_wt ! patch weight (0-1)
!------------------------------------------------------------------------
@@ -240,31 +241,37 @@ subroutine set_landunit_veg_compete (ltype, gi, li, ci, pi)
if (nlunits > 0) then
call add_landunit(li=li, gi=gi, ltype=ltype, wtgcell=wtlunit2gcell)
nlunits_added = nlunits_added + 1
-
- ! Assume one column on the landunit
- call add_column(ci=ci, li=li, ctype=1, wtlunit=1.0_r8)
- ncols_added = ncols_added + 1
-
- ! For FATES: the total number of patches may not match what is in the surface
- ! file, and therefor the weighting can't be used. The weightings in
- ! wt_nat_patch may be meaningful (like with fixed biogeography), but they
- ! they need a mapping table to connect to the allocated patches (in fates)
- ! so the wt_nat_patch array is not applicable to these area weights
- ! A subsequent call, via the clmfates interface will update these weights
- ! by using said mapping table
-
- do m = natpft_lb,natpft_ub
- if (natveg_patch_exists(gi, m)) then
- if(use_fates .and. .not.use_fates_sp)then
- p_wt = 1.0_r8/real(natpft_size,r8)
- else
- p_wt = wt_nat_patch(gi,m)
+
+ ! Potentially create multiple columns (e.g., for hillslope hydrology), but each
+ ! with the same PFT breakdown.
+ !
+ ! Set column weight arbitrarily for now. If we have multiple columns because we're
+ ! using hillslope hydrology, then col%wtlunit will be modified in InitHillslope.
+ wtcol2lunit = 1.0_r8/real(ncols,r8)
+ do ci2 = 1,ncols
+ call add_column(ci=ci, li=li, ctype=1, wtlunit=wtcol2lunit)
+ ncols_added = ncols_added + 1
+
+ ! For FATES: the total number of patches may not match what is in the surface
+ ! file, and therefor the weighting can't be used. The weightings in
+ ! wt_nat_patch may be meaningful (like with fixed biogeography), but they
+ ! they need a mapping table to connect to the allocated patches (in fates)
+ ! so the wt_nat_patch array is not applicable to these area weights
+ ! A subsequent call, via the clmfates interface will update these weights
+ ! by using said mapping table
+
+ do m = natpft_lb,natpft_ub
+ if (natveg_patch_exists(gi, m)) then
+ if(use_fates .and. .not.use_fates_sp)then
+ p_wt = 1.0_r8/real(natpft_size,r8)
+ else
+ p_wt = wt_nat_patch(gi,m)
+ end if
+ call add_patch(pi=pi, ci=ci, ptype=m, wtcol=p_wt)
+ npatches_added = npatches_added + 1
end if
- call add_patch(pi=pi, ci=ci, ptype=m, wtcol=p_wt)
- npatches_added = npatches_added + 1
- end if
+ end do
end do
-
end if
SHR_ASSERT_FL(nlunits_added == nlunits, sourcefile, __LINE__)
diff --git a/src/main/initVerticalMod.F90 b/src/main/initVerticalMod.F90
index 1bf79706f9..e88c4e1a18 100644
--- a/src/main/initVerticalMod.F90
+++ b/src/main/initVerticalMod.F90
@@ -40,7 +40,8 @@ module initVerticalMod
public :: initVertical
public :: find_soil_layer_containing_depth
public :: readParams
-
+ public :: setSoilLayerClass
+
! !PRIVATE MEMBER FUNCTIONS:
private :: hasBedrock ! true if the given column type includes bedrock layers
type, private :: params_type
@@ -80,9 +81,75 @@ subroutine readParams( ncid )
end subroutine readParams
+ !------------------------------------------------------------------------
+ subroutine setSoilLayerClass(bounds)
+
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ !
+ ! LOCAL VARAIBLES:
+ integer :: c,l,j ! indices
+
+ ! Possible values for levgrnd_class. The important thing is that, for a given column,
+ ! layers that are fundamentally different (e.g., soil vs bedrock) have different
+ ! values. This information is used in the vertical interpolation in init_interp.
+ !
+ ! IMPORTANT: These values should not be changed lightly. e.g., try to avoid changing
+ ! the values assigned to LEVGRND_CLASS_STANDARD, LEVGRND_CLASS_DEEP_BEDROCK, etc. The
+ ! problem with changing these is that init_interp expects that layers with a value of
+ ! (e.g.) 1 on the source file correspond to layers with a value of 1 on the
+ ! destination file. So if you change the values of these constants, you either need to
+ ! adequately inform users of this change, or build in some translation mechanism in
+ ! init_interp (such as via adding more metadata to the restart file on the meaning of
+ ! these different values).
+ !
+ ! The distinction between "shallow" and "deep" bedrock is not made explicitly
+ ! elsewhere. But, since these classes have somewhat different behavior, they are
+ ! distinguished explicitly here.
+ integer, parameter :: LEVGRND_CLASS_STANDARD = 1
+ integer, parameter :: LEVGRND_CLASS_DEEP_BEDROCK = 2
+ integer, parameter :: LEVGRND_CLASS_SHALLOW_BEDROCK = 3
+
+ character(len=*), parameter :: subname = 'setSoilLayerClass'
+
+ ! ------------------------------------------------------------------------
+ ! Set classes of layers
+ ! ------------------------------------------------------------------------
+
+ do c = bounds%begc, bounds%endc
+ l = col%landunit(c)
+ if (hasBedrock(col_itype=col%itype(c), lun_itype=lun%itype(l))) then
+ ! NOTE(wjs, 2015-10-17) We are assuming that points with bedrock have both
+ ! "shallow" and "deep" bedrock. Currently, this is not true for lake columns:
+ ! lakes do not distinguish between "shallow" bedrock and "normal" soil.
+ ! However, that was just due to an oversight that is supposed to be corrected
+ ! soon; so to keep things simple we assume that any point with bedrock
+ ! potentially has both shallow and deep bedrock.
+ col%levgrnd_class(c, 1:col%nbedrock(c)) = LEVGRND_CLASS_STANDARD
+ if (col%nbedrock(c) < nlevsoi) then
+ col%levgrnd_class(c, (col%nbedrock(c) + 1) : nlevsoi) = LEVGRND_CLASS_SHALLOW_BEDROCK
+ end if
+ col%levgrnd_class(c, (nlevsoi + 1) : nlevmaxurbgrnd) = LEVGRND_CLASS_DEEP_BEDROCK
+ else
+ col%levgrnd_class(c, 1:nlevmaxurbgrnd) = LEVGRND_CLASS_STANDARD
+ end if
+ end do
+
+ do j = 1, nlevmaxurbgrnd
+ do c = bounds%begc, bounds%endc
+ if (col%z(c,j) == spval) then
+ col%levgrnd_class(c,j) = ispval
+ end if
+ end do
+ end do
+
+ end subroutine setSoilLayerClass
+
!------------------------------------------------------------------------
subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
- use clm_varcon, only : zmin_bedrock
+ use clm_varcon , only : zmin_bedrock
+
!
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds
@@ -91,7 +158,7 @@ subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
real(r8) , intent(in) :: thick_roof(bounds%begl:)
!
! LOCAL VARAIBLES:
- integer :: c,l,g,i,j,lev ! indices
+ integer :: c,l,g,i,j,lev ! indices
type(file_desc_t) :: ncid ! netcdf id
logical :: readvar
integer :: dimid ! dimension id
@@ -115,27 +182,6 @@ subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
integer :: begc, endc
integer :: begl, endl
integer :: jmin_bedrock
-
- ! Possible values for levgrnd_class. The important thing is that, for a given column,
- ! layers that are fundamentally different (e.g., soil vs bedrock) have different
- ! values. This information is used in the vertical interpolation in init_interp.
- !
- ! IMPORTANT: These values should not be changed lightly. e.g., try to avoid changing
- ! the values assigned to LEVGRND_CLASS_STANDARD, LEVGRND_CLASS_DEEP_BEDROCK, etc. The
- ! problem with changing these is that init_interp expects that layers with a value of
- ! (e.g.) 1 on the source file correspond to layers with a value of 1 on the
- ! destination file. So if you change the values of these constants, you either need to
- ! adequately inform users of this change, or build in some translation mechanism in
- ! init_interp (such as via adding more metadata to the restart file on the meaning of
- ! these different values).
- !
- ! The distinction between "shallow" and "deep" bedrock is not made explicitly
- ! elsewhere. But, since these classes have somewhat different behavior, they are
- ! distinguished explicitly here.
- integer, parameter :: LEVGRND_CLASS_STANDARD = 1
- integer, parameter :: LEVGRND_CLASS_DEEP_BEDROCK = 2
- integer, parameter :: LEVGRND_CLASS_SHALLOW_BEDROCK = 3
-
character(len=*), parameter :: subname = 'initVertical'
!------------------------------------------------------------------------
@@ -224,7 +270,7 @@ subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
dzsoi(j) = soil_layerstruct_userdefined(j)
end do
else if (soil_layerstruct_predefined == '49SL_10m') then
- !scs: 10 meter soil column, nlevsoi set to 49 in clm_varpar
+ ! 10 meter soil column, nlevsoi set to 49 in clm_varpar
do j = 1, 10
dzsoi(j) = 1.e-2_r8 ! 10-mm layers
enddo
@@ -639,36 +685,11 @@ subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
end if
end do
- ! ------------------------------------------------------------------------
+ ! ----------------------------------------------
! Set classes of layers
- ! ------------------------------------------------------------------------
+ ! ----------------------------------------------
- do c = bounds%begc, bounds%endc
- l = col%landunit(c)
- if (hasBedrock(col_itype=col%itype(c), lun_itype=lun%itype(l))) then
- ! NOTE(wjs, 2015-10-17) We are assuming that points with bedrock have both
- ! "shallow" and "deep" bedrock. Currently, this is not true for lake columns:
- ! lakes do not distinguish between "shallow" bedrock and "normal" soil.
- ! However, that was just due to an oversight that is supposed to be corrected
- ! soon; so to keep things simple we assume that any point with bedrock
- ! potentially has both shallow and deep bedrock.
- col%levgrnd_class(c, 1:col%nbedrock(c)) = LEVGRND_CLASS_STANDARD
- if (col%nbedrock(c) < nlevsoi) then
- col%levgrnd_class(c, (col%nbedrock(c) + 1) : nlevsoi) = LEVGRND_CLASS_SHALLOW_BEDROCK
- end if
- col%levgrnd_class(c, (nlevsoi + 1) : nlevmaxurbgrnd) = LEVGRND_CLASS_DEEP_BEDROCK
- else
- col%levgrnd_class(c, 1:nlevmaxurbgrnd) = LEVGRND_CLASS_STANDARD
- end if
- end do
-
- do j = 1, nlevmaxurbgrnd
- do c = bounds%begc, bounds%endc
- if (col%z(c,j) == spval) then
- col%levgrnd_class(c,j) = ispval
- end if
- end do
- end do
+ call setSoilLayerClass(bounds)
!-----------------------------------------------
! Read in topographic index and slope
@@ -707,7 +728,13 @@ subroutine initVertical(bounds, glc_behavior, thick_wall, thick_roof)
do c = begc,endc
! microtopographic parameter, units are meters (try smooth function of slope)
slope0 = params_inst%slopemax**(1._r8/params_inst%slopebeta)
- col%micro_sigma(c) = (col%topo_slope(c) + slope0)**(params_inst%slopebeta)
+
+ if (col%is_hillslope_column(c)) then
+ col%micro_sigma(c) = (atan(col%hill_slope(c)) + slope0)**(params_inst%slopebeta)
+ else
+ col%micro_sigma(c) = (col%topo_slope(c) + slope0)**(params_inst%slopebeta)
+ endif
+
end do
call ncd_pio_closefile(ncid)
diff --git a/src/main/lnd2atmMod.F90 b/src/main/lnd2atmMod.F90
index 27769a69de..1cda0cff91 100644
--- a/src/main/lnd2atmMod.F90
+++ b/src/main/lnd2atmMod.F90
@@ -15,7 +15,7 @@ module lnd2atmMod
use clm_varctl , only : iulog, use_lch4
use shr_drydep_mod , only : n_drydep
use decompMod , only : bounds_type
- use subgridAveMod , only : p2g, c2g
+ use subgridAveMod , only : p2g, c2g, l2g
use filterColMod , only : filter_col_type, col_filter_from_logical_array
use lnd2atmType , only : lnd2atm_type
use atm2lndType , only : atm2lnd_type
@@ -159,6 +159,7 @@ subroutine lnd2atm(bounds, &
!
! !USES:
use ch4varcon , only : ch4offline
+ use clm_varctl , only : use_hillslope_routing
!
! !ARGUMENTS:
type(bounds_type) , intent(in) :: bounds
@@ -179,8 +180,11 @@ subroutine lnd2atm(bounds, &
real(r8) , intent(in) :: net_carbon_exchange_grc( bounds%begg: ) ! net carbon exchange between land and atmosphere, positive for source (gC/m2/s)
!
! !LOCAL VARIABLES:
- integer :: c, g ! indices
+ integer :: c, l, g ! indices
real(r8) :: eflx_sh_ice_to_liq_grc(bounds%begg:bounds%endg) ! sensible heat flux generated from the ice to liquid conversion, averaged to gridcell
+ real(r8), allocatable :: qflx_surf_col_to_rof(:) ! surface runoff that is sent directly to rof
+ real(r8), allocatable :: qflx_drain_col_to_rof(:) ! drainagec that is sent directly to rof
+ real(r8), allocatable :: qflx_drain_perched_col_to_rof(:) ! perched drainage that is sent directly to rof
real(r8), parameter :: amC = 12.0_r8 ! Atomic mass number for Carbon
real(r8), parameter :: amO = 16.0_r8 ! Atomic mass number for Oxygen
real(r8), parameter :: amCO2 = amC + 2.0_r8*amO ! Atomic mass number for CO2
@@ -336,15 +340,80 @@ subroutine lnd2atm(bounds, &
! lnd -> rof
!----------------------------------------------------
- call c2g( bounds, &
- water_inst%waterfluxbulk_inst%qflx_surf_col (bounds%begc:bounds%endc), &
- water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsur_grc (bounds%begg:bounds%endg), &
- c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
+ if (use_hillslope_routing) then
+ ! streamflow is volume/time, so sum over landunits (do not weight)
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_stream_grc(bounds%begg:bounds%endg) = 0._r8
+ do l = bounds%begl, bounds%endl
+ if(lun%active(l)) then
+ g = lun%gridcell(l)
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_stream_grc(g) = &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_stream_grc(g) &
+ + water_inst%waterfluxbulk_inst%volumetric_streamflow_lun(l) &
+ *1e3_r8/(grc%area(g)*1.e6_r8)
+ endif
+ enddo
+
+ ! If hillslope routing is used, exclude inputs to stream channel from gridcell averages to avoid double counting
+ allocate( &
+ qflx_surf_col_to_rof(bounds%begc:bounds%endc), &
+ qflx_drain_col_to_rof(bounds%begc:bounds%endc), &
+ qflx_drain_perched_col_to_rof(bounds%begc:bounds%endc))
+
+ qflx_surf_col_to_rof(bounds%begc:bounds%endc) = 0._r8
+ qflx_drain_col_to_rof(bounds%begc:bounds%endc) = 0._r8
+ qflx_drain_perched_col_to_rof(bounds%begc:bounds%endc) = 0._r8
+
+ do c = bounds%begc, bounds%endc
+ ! Exclude hillslope columns from gridcell average
+ ! hillslope runoff is sent to stream rather than directly
+ ! to rof, and is accounted for in qflx_rofliq_stream_grc
+ if (col%active(c) .and. .not. col%is_hillslope_column(c)) then
+ qflx_surf_col_to_rof(c) = qflx_surf_col_to_rof(c) &
+ + water_inst%waterfluxbulk_inst%qflx_surf_col(c)
+ qflx_drain_col_to_rof(c) = qflx_drain_col_to_rof(c) &
+ + water_inst%waterfluxbulk_inst%qflx_drain_col(c)
+ qflx_drain_perched_col_to_rof(c) = &
+ qflx_drain_perched_col_to_rof(c) &
+ + water_inst%waterfluxbulk_inst%qflx_drain_perched_col(c)
+ endif
+ enddo
+
+ call c2g( bounds, &
+ qflx_surf_col_to_rof (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsur_grc (bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity')
+
+ call c2g( bounds, &
+ qflx_drain_col_to_rof (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsub_grc (bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity')
+
+ call c2g( bounds, &
+ qflx_drain_perched_col_to_rof (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_drain_perched_grc(bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity')
+
+ deallocate(qflx_surf_col_to_rof,qflx_drain_col_to_rof, &
+ qflx_drain_perched_col_to_rof)
+
+ else
+
+ call c2g( bounds, &
+ water_inst%waterfluxbulk_inst%qflx_surf_col (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsur_grc (bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
+
+ call c2g( bounds, &
+ water_inst%waterfluxbulk_inst%qflx_drain_col (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsub_grc (bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
+
+ call c2g( bounds, &
+ water_inst%waterfluxbulk_inst%qflx_drain_perched_col (bounds%begc:bounds%endc), &
+ water_inst%waterlnd2atmbulk_inst%qflx_rofliq_drain_perched_grc(bounds%begg:bounds%endg), &
+ c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
- call c2g( bounds, &
- water_inst%waterfluxbulk_inst%qflx_drain_col (bounds%begc:bounds%endc), &
- water_inst%waterlnd2atmbulk_inst%qflx_rofliq_qsub_grc (bounds%begg:bounds%endg), &
- c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
+ endif
do c = bounds%begc, bounds%endc
if (col%active(c)) then
@@ -383,12 +452,6 @@ subroutine lnd2atm(bounds, &
water_inst%waterfluxbulk_inst%qflx_liq_dynbal_grc(g)
enddo
- call c2g( bounds, &
- water_inst%waterfluxbulk_inst%qflx_drain_perched_col (bounds%begc:bounds%endc), &
- water_inst%waterlnd2atmbulk_inst%qflx_rofliq_drain_perched_grc(bounds%begg:bounds%endg), &
- c2l_scale_type= 'urbanf', l2g_scale_type='unity' )
-
-
call c2g( bounds, &
water_inst%waterfluxbulk_inst%qflx_sfc_irrig_col (bounds%begc:bounds%endc), &
water_inst%waterlnd2atmbulk_inst%qirrig_grc(bounds%begg:bounds%endg), &
diff --git a/src/main/lnd2glcMod.F90 b/src/main/lnd2glcMod.F90
index 34f50266ad..27fa7639d7 100644
--- a/src/main/lnd2glcMod.F90
+++ b/src/main/lnd2glcMod.F90
@@ -20,7 +20,7 @@ module lnd2glcMod
use decompMod , only : get_proc_bounds, bounds_type, subgrid_level_column
use domainMod , only : ldomain
use clm_varpar , only : maxpatch_glc
- use clm_varctl , only : iulog
+ use clm_varctl , only : iulog, use_hillslope
use clm_varcon , only : spval, tfrz
use column_varcon , only : col_itype_to_ice_class
use landunit_varcon , only : istice, istsoil
@@ -204,7 +204,16 @@ subroutine update_lnd2glc(this, bounds, num_do_smb_c, filter_do_smb_c, &
! Make sure we haven't already assigned the coupling fields for this point
! (this could happen, for example, if there were multiple columns in the
! istsoil landunit, which we aren't prepared to handle)
- if (fields_assigned(g,n)) then
+ !
+ ! BUG(wjs, 2022-07-17, ESCOMP/CTSM#204) We have a known bug in the handling of bare
+ ! land fluxes when we potentially have multiple vegetated columns in a grid cell.
+ ! The most common configuration where this is the case is when use_hillslope is
+ ! true. In order to allow hillslope hydrology runs to work for now, we are
+ ! bypassing this error check when use_hillslope is true - under the assumption
+ ! that, for now, people aren't going to be interested in SMB in a run with
+ ! hillslope hydrology. Once we resolve ESCOMP/CTSM#204, we should remove the '.and.
+ ! .not. use_hillslope' part of this conditional.
+ if (fields_assigned(g,n) .and. .not. use_hillslope) then
write(iulog,*) subname//' ERROR: attempt to assign coupling fields twice for the same index.'
write(iulog,*) 'One possible cause is having multiple columns in the istsoil landunit,'
write(iulog,*) 'which this routine cannot handle.'
diff --git a/src/main/subgridAveMod.F90 b/src/main/subgridAveMod.F90
index c5ce4a4a98..68431582ce 100644
--- a/src/main/subgridAveMod.F90
+++ b/src/main/subgridAveMod.F90
@@ -100,6 +100,70 @@ module subgridAveMod
contains
+ !-----------------------------------------------------------------------
+ subroutine set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
+ !
+ ! !DESCRIPTION:
+ ! Set scale_c2l for different c2l_scale_type values
+ !
+ ! !ARGUMENTS:
+ type(bounds_type), intent(in) :: bounds
+ character(len=*), intent(in) :: c2l_scale_type ! scale factor type for averaging (see note at top of module)
+ real(r8), intent(out) :: scale_c2l(bounds%begc:bounds%endc) ! scale factor for column->landunit mapping
+
+ !
+ ! !LOCAL VARIABLES:
+ integer :: c,l ! indices
+ !------------------------------------------------------------------------
+
+ ! Enforce expected array sizes
+ SHR_ASSERT_ALL_FL((ubound(scale_c2l) == (/bounds%endc/)), sourcefile, __LINE__)
+
+ if (c2l_scale_type == 'unity') then
+ do c = bounds%begc,bounds%endc
+ scale_c2l(c) = 1.0_r8
+ end do
+ else if (c2l_scale_type == 'urbanf') then
+ do c = bounds%begc,bounds%endc
+ l = col%landunit(c)
+ if (lun%urbpoi(l)) then
+ if (col%itype(c) == icol_sunwall) then
+ scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
+ else if (col%itype(c) == icol_shadewall) then
+ scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
+ else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
+ scale_c2l(c) = 3.0_r8
+ else if (col%itype(c) == icol_roof) then
+ scale_c2l(c) = 1.0_r8
+ end if
+ else
+ scale_c2l(c) = 1.0_r8
+ end if
+ end do
+ else if (c2l_scale_type == 'urbans') then
+ do c = bounds%begc,bounds%endc
+ l = col%landunit(c)
+ if (lun%urbpoi(l)) then
+ if (col%itype(c) == icol_sunwall) then
+ scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
+ else if (col%itype(c) == icol_shadewall) then
+ scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
+ else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
+ scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
+ else if (col%itype(c) == icol_roof) then
+ scale_c2l(c) = 1.0_r8
+ end if
+ else
+ scale_c2l(c) = 1.0_r8
+ end if
+ end do
+ else
+ write(iulog,*)'set_c2l_scale: scale type ',c2l_scale_type,' not supported'
+ call endrun(msg=errMsg(sourcefile, __LINE__))
+ end if
+
+ end subroutine set_c2l_scale
+
!-----------------------------------------------------------------------
subroutine p2c_1d (bounds, parr, carr, p2c_scale_type)
!
@@ -310,48 +374,7 @@ subroutine p2l_1d (bounds, parr, larr, p2c_scale_type, c2l_scale_type)
SHR_ASSERT_ALL_FL((ubound(parr) == (/bounds%endp/)), sourcefile, __LINE__)
SHR_ASSERT_ALL_FL((ubound(larr) == (/bounds%endl/)), sourcefile, __LINE__)
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'p2l_1d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
if (p2c_scale_type == 'unity') then
do p = bounds%begp,bounds%endp
@@ -418,48 +441,7 @@ subroutine p2l_2d(bounds, num2d, parr, larr, p2c_scale_type, c2l_scale_type)
SHR_ASSERT_ALL_FL((ubound(parr) == (/bounds%endp, num2d/)), sourcefile, __LINE__)
SHR_ASSERT_ALL_FL((ubound(larr) == (/bounds%endl, num2d/)), sourcefile, __LINE__)
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'p2l_2d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
if (p2c_scale_type == 'unity') then
do p = bounds%begp,bounds%endp
@@ -532,48 +514,7 @@ subroutine p2g_1d(bounds, parr, garr, p2c_scale_type, c2l_scale_type, l2g_scale_
call build_scale_l2g(bounds, l2g_scale_type, &
scale_l2g(bounds%begl:bounds%endl))
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'p2g_1d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
if (p2c_scale_type == 'unity') then
do p = bounds%begp,bounds%endp
@@ -648,48 +589,7 @@ subroutine p2g_2d(bounds, num2d, parr, garr, p2c_scale_type, c2l_scale_type, l2g
call build_scale_l2g(bounds, l2g_scale_type, &
scale_l2g(bounds%begl:bounds%endl))
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'p2g_2d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
if (p2c_scale_type == 'unity') then
do p = bounds%begp,bounds%endp
@@ -770,48 +670,7 @@ subroutine c2l_1d (bounds, carr, larr, c2l_scale_type, include_inactive)
l_include_inactive = .false.
end if
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'c2l_1d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
larr(bounds%begl : bounds%endl) = spval
sumwt(bounds%begl : bounds%endl) = 0._r8
@@ -866,48 +725,7 @@ subroutine c2l_2d (bounds, num2d, carr, larr, c2l_scale_type)
SHR_ASSERT_ALL_FL((ubound(carr) == (/bounds%endc, num2d/)), sourcefile, __LINE__)
SHR_ASSERT_ALL_FL((ubound(larr) == (/bounds%endl, num2d/)), sourcefile, __LINE__)
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'c2l_2d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
larr(bounds%begl : bounds%endl, :) = spval
do j = 1,num2d
@@ -968,48 +786,7 @@ subroutine c2g_1d(bounds, carr, garr, c2l_scale_type, l2g_scale_type)
call build_scale_l2g(bounds, l2g_scale_type, &
scale_l2g(bounds%begl:bounds%endl))
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'c2l_1d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
garr(bounds%begg : bounds%endg) = spval
sumwt(bounds%begg : bounds%endg) = 0._r8
@@ -1070,48 +847,7 @@ subroutine c2g_2d(bounds, num2d, carr, garr, c2l_scale_type, l2g_scale_type)
call build_scale_l2g(bounds, l2g_scale_type, &
scale_l2g(bounds%begl:bounds%endl))
- if (c2l_scale_type == 'unity') then
- do c = bounds%begc,bounds%endc
- scale_c2l(c) = 1.0_r8
- end do
- else if (c2l_scale_type == 'urbanf') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = 3.0 * lun%canyon_hwr(l)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0_r8
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else if (c2l_scale_type == 'urbans') then
- do c = bounds%begc,bounds%endc
- l = col%landunit(c)
- if (lun%urbpoi(l)) then
- if (col%itype(c) == icol_sunwall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_shadewall) then
- scale_c2l(c) = (3.0 * lun%canyon_hwr(l)) / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_road_perv .or. col%itype(c) == icol_road_imperv) then
- scale_c2l(c) = 3.0 / (2.*lun%canyon_hwr(l) + 1.)
- else if (col%itype(c) == icol_roof) then
- scale_c2l(c) = 1.0_r8
- end if
- else
- scale_c2l(c) = 1.0_r8
- end if
- end do
- else
- write(iulog,*)'c2g_2d error: scale type ',c2l_scale_type,' not supported'
- call endrun(msg=errMsg(sourcefile, __LINE__))
- end if
+ call set_c2l_scale (bounds, c2l_scale_type, scale_c2l)
garr(bounds%begg : bounds%endg,:) = spval
do j = 1,num2d
diff --git a/src/main/subgridMod.F90 b/src/main/subgridMod.F90
index 7020f42be5..4118ac73ed 100644
--- a/src/main/subgridMod.F90
+++ b/src/main/subgridMod.F90
@@ -75,6 +75,8 @@ subroutine subgrid_get_gcellinfo (gi, glc_behavior, &
! atm_topo is arbitrary for the sake of getting these counts. We don't have a true
! atm_topo value at the point of this call, so use 0.
real(r8), parameter :: atm_topo = 0._r8
+
+
!------------------------------------------------------------------------------
npatches = 0
@@ -85,6 +87,11 @@ subroutine subgrid_get_gcellinfo (gi, glc_behavior, &
call subgrid_get_info_natveg(gi, npatches_temp, ncols_temp, nlunits_temp)
call accumulate_counters()
+ ! call this after natveg call because we allocate space for
+ ! FATES cohorts based on the number of naturally vegetated columns
+ ! and nothing else
+ call subgrid_get_info_cohort(gi, ncols_temp, ncohorts)
+
call subgrid_get_info_urban_tbd(gi, npatches_temp, ncols_temp, nlunits_temp)
call accumulate_counters()
@@ -107,8 +114,6 @@ subroutine subgrid_get_gcellinfo (gi, glc_behavior, &
call subgrid_get_info_crop(gi, npatches_temp, ncols_temp, nlunits_temp)
call accumulate_counters()
- call subgrid_get_info_cohort(gi,ncohorts)
-
contains
subroutine accumulate_counters
! Accumulate running sums of patches, columns and landunits.
@@ -131,6 +136,8 @@ subroutine subgrid_get_info_natveg(gi, npatches, ncols, nlunits)
!
! !USES
use clm_varpar, only : natpft_lb, natpft_ub
+ use clm_instur, only : ncolumns_hillslope
+ use clm_varctl, only : use_hillslope
!
! !ARGUMENTS:
integer, intent(in) :: gi ! grid cell index
@@ -154,9 +161,16 @@ subroutine subgrid_get_info_natveg(gi, npatches, ncols, nlunits)
end do
if (npatches > 0) then
- ! Assume that the vegetated landunit has one column
- ncols = 1
nlunits = 1
+ if (use_hillslope) then
+ ! ensure ncols is > 0
+ ncols = max(ncolumns_hillslope(gi),1)
+ else
+ ncols = 1
+ endif
+ ! Assume that each PFT present in the grid cell is present in every column
+ npatches = ncols*npatches
+
else
! As noted in natveg_patch_exists, we expect a naturally vegetated landunit in
! every grid cell. This means that npatches should be at least 1 in every grid
@@ -220,7 +234,7 @@ end function natveg_patch_exists
! -----------------------------------------------------------------------------
- subroutine subgrid_get_info_cohort(gi, ncohorts)
+ subroutine subgrid_get_info_cohort(gi, ncols, ncohorts)
!
! !DESCRIPTION:
! Obtain cohort counts per each gridcell.
@@ -230,6 +244,7 @@ subroutine subgrid_get_info_cohort(gi, ncohorts)
!
! !ARGUMENTS:
integer, intent(in) :: gi ! grid cell index
+ integer, intent(in) :: ncols ! number of nat veg columns in this grid cell
integer, intent(out) :: ncohorts ! number of cohorts in this grid cell
!
! !LOCAL VARIABLES:
@@ -248,11 +263,10 @@ subroutine subgrid_get_info_cohort(gi, ncohorts)
! restart vector will just be a little sparse.
! -------------------------------------------------------------------------
- ncohorts = fates_maxElementsPerSite
+ ncohorts = ncols*fates_maxElementsPerSite
end subroutine subgrid_get_info_cohort
-
!-----------------------------------------------------------------------
subroutine subgrid_get_info_urban_tbd(gi, npatches, ncols, nlunits)
!
diff --git a/src/main/surfrdMod.F90 b/src/main/surfrdMod.F90
index 23e96e7c1a..03a0082e97 100644
--- a/src/main/surfrdMod.F90
+++ b/src/main/surfrdMod.F90
@@ -721,7 +721,7 @@ subroutine surfrd_veg_all(begg, endg, ncid, ns, actual_numcft)
! Determine weight arrays for non-dynamic landuse mode
!
! !USES:
- use clm_varctl , only : create_crop_landunit, use_fates, n_dom_pfts
+ use clm_varctl , only : create_crop_landunit, use_fates, n_dom_pfts, use_hillslope
use clm_varpar , only : natpft_lb, natpft_ub, natpft_size, cft_size, cft_lb, cft_ub
use clm_varpar , only : surfpft_lb, surfpft_ub
use clm_instur , only : wt_lunit, wt_nat_patch, wt_cft, fert_cft
@@ -815,7 +815,12 @@ subroutine surfrd_veg_all(begg, endg, ncid, ns, actual_numcft)
' must also have a separate crop landunit, and vice versa)'//&
errMsg(sourcefile, __LINE__))
end if
-
+
+ ! Obtain hillslope hydrology information and modify pft weights
+ if (use_hillslope) then
+ call surfrd_hillslope(begg, endg, ncid, ns)
+ endif
+
! Convert from percent to fraction
wt_lunit(begg:endg,istsoil) = wt_lunit(begg:endg,istsoil) / 100._r8
wt_lunit(begg:endg,istcrop) = wt_lunit(begg:endg,istcrop) / 100._r8
@@ -883,6 +888,115 @@ subroutine surfrd_veg_dgvm(begg, endg)
end subroutine surfrd_veg_dgvm
!-----------------------------------------------------------------------
+ subroutine surfrd_hillslope(begg, endg, ncid, ns)
+ !
+ ! !DESCRIPTION:
+ ! Determine number of hillslopes and columns for hillslope hydrology mode
+ !
+ ! !USES:
+ use clm_instur, only : ncolumns_hillslope, wt_nat_patch
+ use clm_varctl, only : nhillslope,max_columns_hillslope
+ use clm_varpar, only : natpft_size, natpft_lb, natpft_ub
+ use ncdio_pio, only : ncd_inqdid, ncd_inqdlen
+ use pftconMod , only : noveg
+ use HillslopeHydrologyMod, only : pft_distribution_method, pft_standard, pft_from_file, pft_uniform_dominant_pft, pft_lowland_dominant_pft, pft_lowland_upland
+ use array_utils, only: find_k_max_indices
+ use surfrdUtilsMod, only: collapse_to_dominant
+
+ !
+ ! !ARGUMENTS:
+ integer, intent(in) :: begg, endg
+ type(file_desc_t),intent(inout) :: ncid ! netcdf id
+ integer ,intent(in) :: ns ! domain size
+ !
+ ! !LOCAL VARIABLES:
+ integer :: g, nh, m, n ! index
+ integer :: dimid,varid ! netCDF id's
+ integer :: ier ! error status
+ integer, allocatable :: max_indices(:) ! largest weight pft indices
+ logical :: readvar ! is variable on dataset
+ integer,pointer :: arrayl(:) ! local array (needed because ncd_io expects a pointer)
+ character(len=32) :: subname = 'surfrd_hillslope' ! subroutine name
+ logical, allocatable :: do_not_collapse(:)
+ integer :: n_dominant
+ !-----------------------------------------------------------------------
+
+ ! number of hillslopes per landunit
+ call ncd_inqdid(ncid,'nhillslope',dimid,readvar)
+ if (.not. readvar) then
+ call endrun( msg=' ERROR: nhillslope not on surface data file'//errMsg(sourcefile, __LINE__))
+ else
+ call ncd_inqdlen(ncid,dimid,nh)
+ nhillslope = nh
+ endif
+ ! maximum number of columns per landunit
+ call ncd_inqdid(ncid,'nmaxhillcol',dimid,readvar)
+ if (.not. readvar) then
+ call endrun( msg=' ERROR: nmaxhillcol not on surface data file'//errMsg(sourcefile, __LINE__))
+ else
+ call ncd_inqdlen(ncid,dimid,nh)
+ max_columns_hillslope = nh
+ endif
+ ! actual number of columns per landunit
+ allocate(arrayl(begg:endg))
+ call ncd_io(ncid=ncid, varname='nhillcolumns', flag='read', data=arrayl, &
+ dim1name=grlnd, readvar=readvar)
+ if (.not. readvar) then
+ call endrun( msg=' ERROR: nhillcolumns not on surface data file'//errMsg(sourcefile, __LINE__))
+ else
+ ncolumns_hillslope(begg:endg) = arrayl(begg:endg)
+ endif
+ deallocate(arrayl)
+
+ ! pft_from_file and pft_lowland_upland assume that 1 pft
+ ! will exist on each hillslope column. In prepration, set one
+ ! pft weight to 100 and the rest to 0. The vegetation type
+ ! (patch%itype) will be reassigned when initHillslope is called later.
+ if(pft_distribution_method == pft_from_file .or. &
+ pft_distribution_method == pft_lowland_upland) then
+ do g = begg, endg
+ ! If hillslopes will be used in a gridcell, modify wt_nat_patch, otherwise use original patch distribution
+ if(ncolumns_hillslope(g) > 0) then
+ ! First patch gets 100% weight; all other natural patches are zeroed out
+ wt_nat_patch(g,:) = 0._r8
+ wt_nat_patch(g,natpft_lb) = 100._r8
+ endif
+ enddo
+
+ else if (pft_distribution_method == pft_uniform_dominant_pft &
+ .or. pft_distribution_method == pft_lowland_dominant_pft) then
+
+ ! If hillslopes will be used in a gridcell, modify wt_nat_patch,
+ ! otherwise use original patch distribution
+ allocate(do_not_collapse(begg:endg))
+ do_not_collapse(begg:endg) = .false.
+ do g = begg, endg
+ if (ncolumns_hillslope(g) == 0) then
+ do_not_collapse(g) = .true.
+ end if
+ end do
+
+ if (pft_distribution_method == pft_uniform_dominant_pft) then
+ ! pft_uniform_dominant_pft uses the patch with the
+ ! largest weight for all hillslope columns in the gridcell
+ n_dominant = 1
+ else if (pft_distribution_method == pft_lowland_dominant_pft) then
+ ! pft_lowland_dominant_pft uses the two patches with the
+ ! largest weights for the hillslope columns in the gridcell
+ n_dominant = 2
+ else
+ call endrun( msg=' ERROR: unrecognized hillslope_pft_distribution_method'//errMsg(sourcefile, __LINE__))
+ end if
+
+ call collapse_to_dominant(wt_nat_patch(begg:endg,:), natpft_lb, natpft_ub, begg, endg, n_dominant, do_not_collapse)
+ deallocate(do_not_collapse)
+
+ else if (pft_distribution_method /= pft_standard) then
+ call endrun( msg=' ERROR: unrecognized hillslope_pft_distribution_method'//errMsg(sourcefile, __LINE__))
+ endif
+
+ end subroutine surfrd_hillslope
+
subroutine surfrd_lakemask(begg, endg)
!
! !DESCRIPTION:
diff --git a/src/main/surfrdUtilsMod.F90 b/src/main/surfrdUtilsMod.F90
index 6b581a59c1..97f5b7d80f 100644
--- a/src/main/surfrdUtilsMod.F90
+++ b/src/main/surfrdUtilsMod.F90
@@ -235,7 +235,7 @@ subroutine collapse_individual_lunits(wt_lunit, begg, endg, toosmall_soil, &
end subroutine collapse_individual_lunits
!-----------------------------------------------------------------------
- subroutine collapse_to_dominant(weight, lower_bound, upper_bound, begg, endg, n_dominant)
+ subroutine collapse_to_dominant(weight, lower_bound, upper_bound, begg, endg, n_dominant, do_not_collapse)
!
! DESCRIPTION
! Collapse to the top N dominant pfts or landunits (n_dominant)
@@ -251,6 +251,7 @@ subroutine collapse_to_dominant(weight, lower_bound, upper_bound, begg, endg, n_
integer, intent(in) :: lower_bound ! lower bound of pft or landunit indices
integer, intent(in) :: upper_bound ! upper bound of pft or landunit indices
integer, intent(in) :: n_dominant ! # dominant pfts or landunits
+ logical, intent(in), optional :: do_not_collapse(begg:endg)
! This array modified in-place
! Weights of pfts or landunits per grid cell
! Dimensioned [g, lower_bound:upper_bound]
@@ -277,6 +278,14 @@ subroutine collapse_to_dominant(weight, lower_bound, upper_bound, begg, endg, n_
if (n_dominant > 0 .and. n_dominant < upper_bound) then
allocate(max_indices(n_dominant))
do g = begg, endg
+
+ ! original sum of all the weights
+ wt_sum(g) = sum(weight(g,:))
+
+ if (present(do_not_collapse) .and. do_not_collapse(g)) then
+ cycle
+ end if
+
max_indices = 0 ! initialize
call find_k_max_indices(weight(g,:), lower_bound, n_dominant, &
max_indices)
@@ -286,7 +295,6 @@ subroutine collapse_to_dominant(weight, lower_bound, upper_bound, begg, endg, n_
! Typically the original sum of weights = 1, but if
! collapse_urban = .true., it equals the sum of the urban landunits.
! Also set the remaining weights to 0.
- wt_sum(g) = sum(weight(g,:)) ! original sum of all the weights
wt_dom_sum = 0._r8 ! initialize the dominant pft or landunit sum
do n = 1, n_dominant
m = max_indices(n)
diff --git a/src/main/test/atm2lnd_test/test_downscale_forcings.pf b/src/main/test/atm2lnd_test/test_downscale_forcings.pf
index d688ad809d..ddd097d16c 100644
--- a/src/main/test/atm2lnd_test/test_downscale_forcings.pf
+++ b/src/main/test/atm2lnd_test/test_downscale_forcings.pf
@@ -9,6 +9,7 @@ module test_downscale_forcings
use unittestSimpleSubgridSetupsMod
use unittestArrayMod
use atm2lndType, only : atm2lnd_type, atm2lnd_params_type
+ use SurfaceAlbedoType, only : surfalb_type
use Wateratm2lndBulkType, only : wateratm2lndbulk_type
use WaterInfoBulkType, only : water_info_bulk_type
use TopoMod, only : topo_type
@@ -25,6 +26,7 @@ module test_downscale_forcings
@TestCase
type, extends(TestCase) :: TestDownscaleForcings
type(atm2lnd_type) :: atm2lnd_inst
+ type(surfalb_type) :: surfalb_inst
type(wateratm2lndbulk_type) :: wateratm2lndbulk_inst
type(topo_type_always_downscale) :: topo_inst
real(r8), allocatable :: eflx_sh_precip_conversion(:)
@@ -204,8 +206,13 @@ contains
class(TestDownscaleForcings), intent(inout) :: this
this%eflx_sh_precip_conversion = col_array()
- call downscale_forcings(bounds, this%topo_inst, &
- this%atm2lnd_inst, this%wateratm2lndbulk_inst, &
+ call downscale_forcings(bounds, &
+ this%topo_inst, &
+ this%atm2lnd_inst, &
+ ! Currently surfalb_inst is only used for hillslope downscaling; we need to pass
+ ! it to satisfy the interface but we haven't bothered setting it up
+ this%surfalb_inst, &
+ this%wateratm2lndbulk_inst, &
this%eflx_sh_precip_conversion)
end subroutine call_downscale_forcings
diff --git a/src/main/test/atm2lnd_test/test_partition_precip.pf b/src/main/test/atm2lnd_test/test_partition_precip.pf
index 48c12c3f3c..56febc1b30 100644
--- a/src/main/test/atm2lnd_test/test_partition_precip.pf
+++ b/src/main/test/atm2lnd_test/test_partition_precip.pf
@@ -5,6 +5,7 @@ module test_partition_precip
use funit
use atm2lndMod
use atm2lndType
+ use ColumnType, only : col
use shr_kind_mod, only : r8 => shr_kind_r8
use unittestSubgridMod
use unittestSimpleSubgridSetupsMod
@@ -64,6 +65,7 @@ contains
logical :: l_repartition_rain_snow
type(atm2lnd_params_type) :: atm2lnd_params
+ integer :: c, g
if (present(repartition_rain_snow)) then
l_repartition_rain_snow = repartition_rain_snow
@@ -89,6 +91,15 @@ contains
this%wateratm2lndbulk_inst%forc_rain_not_downscaled_grc(bounds%begg:bounds%endg) = rain(:)
this%wateratm2lndbulk_inst%forc_snow_not_downscaled_grc(bounds%begg:bounds%endg) = snow(:)
this%atm2lnd_inst%forc_t_downscaled_col(bounds%begc:bounds%endc) = temperature(:)
+
+ ! In the production code, column-level versions of forc_rain and forc_snow are
+ ! initialized to the gridcell-level versions prior to the call to partition_precip; do
+ ! that here
+ do c = bounds%begc, bounds%endc
+ g = col%gridcell(c)
+ this%wateratm2lndbulk_inst%forc_rain_downscaled_col(c) = this%wateratm2lndbulk_inst%forc_rain_not_downscaled_grc(g)
+ this%wateratm2lndbulk_inst%forc_snow_downscaled_col(c) = this%wateratm2lndbulk_inst%forc_snow_not_downscaled_grc(g)
+ end do
end subroutine set_inputs
@Test
diff --git a/src/main/test/surfrdUtils_test/test_surfrdUtils.pf b/src/main/test/surfrdUtils_test/test_surfrdUtils.pf
index 98191fbe99..f2fcae7af9 100644
--- a/src/main/test/surfrdUtils_test/test_surfrdUtils.pf
+++ b/src/main/test/surfrdUtils_test/test_surfrdUtils.pf
@@ -129,7 +129,7 @@ contains
call check_sums_equal_1( wt_in_out, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_in_out(begg:,:), wt_expected(begg:,:), tolerance=tol)
+ @assertEqual(wt_expected(begg:,:), wt_in_out(begg:,:), tolerance=tol)
deallocate( wt_expected )
deallocate( wt_in_out )
@@ -249,7 +249,7 @@ contains
call check_sums_equal_1( wt_in_out, begg, "test_check_sums_add_to_1", &
"should not trigger an error for wt_in_out")
- @assertEqual(wt_in_out(begg:,:), wt_expected(begg:,:), tolerance=tol)
+ @assertEqual(wt_expected(begg:,:), wt_in_out(begg:,:), tolerance=tol)
end do
@@ -318,7 +318,7 @@ contains
isturb_MIN, isturb_MAX, begg, endg, &
n_dom_urban)
- @assertEqual(wt_in_out(begg:,:), wt_expected(begg:,:), tolerance=tol)
+ @assertEqual(wt_expected(begg:,:), wt_in_out(begg:,:), tolerance=tol)
deallocate( wt_expected )
deallocate( wt_in_out )
@@ -444,7 +444,7 @@ contains
call check_sums_equal_1( wt_in_out, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_in_out(begg:,:), wt_expected(begg:,:), tolerance=tol)
+ @assertEqual(wt_expected(begg:,:), wt_in_out(begg:,:), tolerance=tol)
end do ! loop of tests
@@ -558,7 +558,7 @@ contains
call check_sums_equal_1( wt_nat_patch_in_out, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_nat_patch_in_out(begg:,:), wt_nat_patch_expected(begg:,:), tolerance=tol)
+ @assertEqual(wt_nat_patch_expected(begg:,:), wt_nat_patch_in_out(begg:,:), tolerance=tol)
end do ! loop of tests
@@ -570,6 +570,143 @@ contains
end subroutine test_collapse_to_dom_pfts
+
+ @Test
+ subroutine test_collapse_to_dom_do_not_collapse()
+ ! Tests subroutine collapse_to_dominant when used with an optional logical array indicating which gridcells should actually be collapsed
+ !
+ use pftconMod, only: pftcon
+ use clm_instur, only: wt_nat_patch
+ use clm_varpar, only: natpft_lb, natpft_ub
+
+ implicit none
+ integer, parameter :: begg = 2, endg = 4, natpft_size = 15
+ real(r8), allocatable :: wt_nat_patch_expected(:,:)
+ real(r8), allocatable :: wt_nat_patch_in_out(:,:) ! used in subr. call
+ real(r8) :: expctd(9)
+ logical, allocatable :: do_not_collapse(:)
+
+ ! Set relevant pftcon values to defaults; override where necessary
+ call pftcon%InitForTesting()
+ natpft_ub = natpft_size - 1
+ allocate( wt_nat_patch(begg:endg,natpft_lb:natpft_ub) )
+ allocate( wt_nat_patch_expected(begg:endg,natpft_lb:natpft_ub) )
+ allocate( wt_nat_patch_in_out(begg:endg,natpft_lb:natpft_ub) )
+ allocate( do_not_collapse(begg:endg) )
+
+ ! INPUT VALUES
+ wt_nat_patch(begg:,:) = 0._r8 ! initialize
+ wt_nat_patch(begg:,0) = (/ 30._r8, 40._r8, 0._r8/) ! pft0
+ wt_nat_patch(begg:,1) = (/ 15._r8, 11._r8, 15._r8/) ! pft1
+ wt_nat_patch(begg:,2) = (/ 5._r8, 5._r8, 5._r8/) ! pft2
+ wt_nat_patch(begg:,3) = (/ 0._r8, 4._r8, 35._r8/) ! pft3
+ wt_nat_patch(begg:,4) = (/ 10._r8, 10._r8, 35._r8/) ! pft4
+ wt_nat_patch(begg:,5) = (/ 40._r8, 30._r8, 10._r8/) ! pft5
+ wt_nat_patch(:,:) = wt_nat_patch(:,:) / 100._r8
+ call check_sums_equal_1( wt_nat_patch, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+ do_not_collapse(begg:) = .true.
+
+ ! OUTPUT VALUES EXPECTED
+ wt_nat_patch_expected = wt_nat_patch
+
+ call check_sums_equal_1( wt_nat_patch_expected, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+
+ ! Collapse pfts
+ wt_nat_patch_in_out = wt_nat_patch ! reset argument for next call
+ call collapse_to_dominant(wt_nat_patch_in_out(begg:endg,:), &
+ natpft_lb, natpft_ub, begg, endg, &
+ 1, &
+ do_not_collapse(begg:endg))
+
+ ! Now check that are correct
+ call check_sums_equal_1( wt_nat_patch_in_out, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+
+ @assertEqual(wt_nat_patch_expected(begg:,:), wt_nat_patch_in_out(begg:,:), tolerance=0._r8)
+
+ deallocate( wt_nat_patch_expected )
+ deallocate( wt_nat_patch_in_out )
+ deallocate( wt_nat_patch )
+ deallocate( do_not_collapse )
+
+ call pftcon%clean()
+
+ end subroutine test_collapse_to_dom_do_not_collapse
+
+
+ @Test
+ subroutine test_collapse_to_dom_do_not_collapse_present_false()
+ ! Tests subroutine collapse_to_dominant when used with an optional logical array indicating which gridcells should actually be collapsed
+ !
+ use pftconMod, only: pftcon
+ use clm_instur, only: wt_nat_patch
+ use clm_varpar, only: natpft_lb, natpft_ub
+
+ implicit none
+ integer, parameter :: begg = 2, endg = 4, natpft_size = 15
+ real(r8), allocatable :: wt_nat_patch_expected(:,:)
+ real(r8), allocatable :: wt_nat_patch_in_out(:,:) ! used in subr. call
+ real(r8) :: expctd(9)
+ logical, allocatable :: do_not_collapse(:)
+
+ ! Set relevant pftcon values to defaults; override where necessary
+ call pftcon%InitForTesting()
+ natpft_ub = natpft_size - 1
+ allocate( wt_nat_patch(begg:endg,natpft_lb:natpft_ub) )
+ allocate( wt_nat_patch_expected(begg:endg,natpft_lb:natpft_ub) )
+ allocate( wt_nat_patch_in_out(begg:endg,natpft_lb:natpft_ub) )
+ allocate( do_not_collapse(begg:endg) )
+
+ ! INPUT VALUES
+ wt_nat_patch(begg:,:) = 0._r8 ! initialize
+ wt_nat_patch(begg:,0) = (/ 30._r8, 40._r8, 0._r8/) ! pft0
+ wt_nat_patch(begg:,1) = (/ 15._r8, 11._r8, 15._r8/) ! pft1
+ wt_nat_patch(begg:,2) = (/ 5._r8, 5._r8, 5._r8/) ! pft2
+ wt_nat_patch(begg:,3) = (/ 0._r8, 4._r8, 35._r8/) ! pft3
+ wt_nat_patch(begg:,4) = (/ 10._r8, 10._r8, 35._r8/) ! pft4
+ wt_nat_patch(begg:,5) = (/ 40._r8, 30._r8, 10._r8/) ! pft5
+ wt_nat_patch(:,:) = wt_nat_patch(:,:) / 100._r8
+ call check_sums_equal_1( wt_nat_patch, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+ do_not_collapse(begg:) = .false.
+
+ ! OUTPUT VALUES EXPECTED
+ expctd(1) = 40._r8 / 40._r8
+ expctd(2) = 35._r8 / 35._r8
+ wt_nat_patch_expected(begg:,:) = 0._r8 ! initialize
+ wt_nat_patch_expected(begg:,0) = (/ 0._r8, expctd(1), 0._r8 /) ! pft 0
+ wt_nat_patch_expected(begg:,3) = (/ 0._r8, 0._r8, expctd(2) /) ! pft 3
+ wt_nat_patch_expected(begg:,5) = (/ expctd(1), 0._r8, 0._r8 /) ! pft 5
+
+
+ call check_sums_equal_1( wt_nat_patch_expected, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+
+ ! Collapse pfts
+ wt_nat_patch_in_out = wt_nat_patch ! reset argument for next call
+ call collapse_to_dominant(wt_nat_patch_in_out(begg:endg,:), &
+ natpft_lb, natpft_ub, begg, endg, &
+ 1, &
+ do_not_collapse(begg:endg))
+
+ ! Now check that are correct
+ call check_sums_equal_1( wt_nat_patch_in_out, begg, "test_check_sums_add_to_1", &
+ "should not trigger an error")
+
+ @assertEqual(wt_nat_patch_expected(begg:,:), wt_nat_patch_in_out(begg:,:), tolerance=0._r8)
+
+ deallocate( wt_nat_patch_expected )
+ deallocate( wt_nat_patch_in_out )
+ deallocate( wt_nat_patch )
+ deallocate( do_not_collapse )
+
+ call pftcon%clean()
+
+ end subroutine test_collapse_to_dom_do_not_collapse_present_false
+
+
@Test
subroutine test_collapse_crop_types_none()
! This test sets cftsize = 0, ie crops are lumped together with unmanaged
@@ -598,8 +735,8 @@ contains
call collapse_crop_types( wt_cft, fert_cft, cftsize, begg, endg, verbose = .true.)
! Now check that are correct
- @assertEqual(wt_cft(begg:,:), wt_cft_expected(begg:,:))
- @assertEqual(fert_cft(begg:,:), fert_cft_expected(begg:,:))
+ @assertEqual(wt_cft_expected(begg:,:), wt_cft(begg:,:))
+ @assertEqual(fert_cft_expected(begg:,:), fert_cft(begg:,:))
call pftcon%clean()
end subroutine test_collapse_crop_types_none
@@ -645,11 +782,11 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:), wt_cft_expected(begg:,:))
+ @assertEqual(wt_cft_expected(begg:,:), wt_cft(begg:,:))
! INTENTIONAL? As written, subr. collapse_crop_types does NOT take
! ----------- the avg fert_cft of the irrigated and unirrigated when
! irrigate = .false.. Assuming intentional for now.
- @assertEqual(fert_cft(begg:,:), fert_cft_expected(begg:,:))
+ @assertEqual(fert_cft_expected(begg:,:), fert_cft(begg:,:))
call pftcon%clean()
end subroutine test_collapse_crop_types_16_to_15
@@ -694,8 +831,8 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:), wt_cft_expected(begg:,:))
- @assertEqual(fert_cft(begg:,:), fert_cft_expected(begg:,:))
+ @assertEqual(wt_cft_expected(begg:,:), wt_cft(begg:,:))
+ @assertEqual(fert_cft_expected(begg:,:), fert_cft(begg:,:))
call pftcon%clean()
end subroutine test_collapse_crop_types_16_to_16
@@ -750,8 +887,8 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:2), wt_cft_expected(begg:,:2))
- @assertEqual(fert_cft(begg:,:2), fert_cft_expected(begg:,:2))
+ @assertEqual(wt_cft_expected(begg:,:2), wt_cft(begg:,:2))
+ @assertEqual(fert_cft_expected(begg:,:2), fert_cft(begg:,:2))
call pftcon%clean()
end subroutine test_collapse_crop_types_18_to_16
@@ -806,8 +943,8 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:2), wt_cft_expected(begg:,:2))
- @assertEqual(fert_cft(begg:,1), fert_cft_expected(begg:,1))
+ @assertEqual(wt_cft_expected(begg:,:2), wt_cft(begg:,:2))
+ @assertEqual(fert_cft_expected(begg:,1), fert_cft(begg:,1))
call pftcon%clean()
end subroutine test_collapse_crop_types_18_to_15
@@ -855,8 +992,8 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:), wt_cft_expected(begg:,:))
- @assertEqual(fert_cft(begg:,:), fert_cft_expected(begg:,:))
+ @assertEqual(wt_cft_expected(begg:,:), wt_cft(begg:,:))
+ @assertEqual(fert_cft_expected(begg:,:), fert_cft(begg:,:))
call pftcon%clean()
end subroutine test_collapse_crop_types_18_to_18
@@ -914,8 +1051,8 @@ contains
! Now check that are correct
call check_sums_equal_1( wt_cft/100.0_r8, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_cft(begg:,:), wt_cft_expected(begg:,:))
- @assertEqual(fert_cft(begg:,:), fert_cft_expected(begg:,:))
+ @assertEqual(wt_cft_expected(begg:,:), wt_cft(begg:,:))
+ @assertEqual(fert_cft_expected(begg:,:), fert_cft(begg:,:))
call pftcon%clean()
end subroutine test_collapse_crop_types_20_to_18
@@ -972,7 +1109,7 @@ contains
call check_sums_equal_1( wt_nat_patch, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
@assertEqual(wtpft,wt_nat_patch)
- @assertEqual(wt_lunit(begg:,istsoil),(/1.00_r8,1.00_r8/))
+ @assertEqual((/1.00_r8,1.00_r8/), wt_lunit(begg:,istsoil))
deallocate( wt_nat_patch )
deallocate( wtpft )
@@ -1023,10 +1160,10 @@ contains
"should not trigger an error")
call check_sums_equal_1( wt_nat_patch, begg, "test_check_sums_add_to_1", &
"should not trigger an error")
- @assertEqual(wt_lunit(begg:,istsoil), (/1.00_r8,1.00_r8/))
- @assertEqual(wt_nat_patch(begg:,ndllf_evr_tmp_tree),(/0.25_r8,0.25_r8/))
- @assertEqual(wt_nat_patch(begg:,nc3crop), (/0.1875_r8,0.1875_r8/))
- @assertEqual(wt_nat_patch(begg:,nc3irrig), (/0.5625_r8,0.5625_r8/))
+ @assertEqual((/1.00_r8,1.00_r8/), wt_lunit(begg:,istsoil))
+ @assertEqual((/0.25_r8,0.25_r8/), wt_nat_patch(begg:,ndllf_evr_tmp_tree))
+ @assertEqual((/0.1875_r8,0.1875_r8/), wt_nat_patch(begg:,nc3crop))
+ @assertEqual((/0.5625_r8,0.5625_r8/), wt_nat_patch(begg:,nc3irrig))
call pftcon%clean()
end subroutine test_convert_cft_to_pft
@@ -1071,7 +1208,7 @@ contains
array(lb+1,lb2+2) = array(lb+1,lb2+2) + eps
call check_sums_equal_1( array, lb, "test_check_sums_add_to_1_fail", &
"should trigger an error", ier)
- @assertEqual(ier,-10)
+ @assertEqual(-10, ier)
end subroutine test_check_sums_add_to_1_fail
@Test
subroutine test_renormalize
@@ -1096,7 +1233,7 @@ contains
! Make the normalized result 100, so multiply the expected result by 100
expected(:,:) = expected(:,:)*100.0d00
call renormalize(array, lb, 100.0d00)
- @assertEqual(array, expected, tolerance=tol)
+ @assertEqual(expected, array, tolerance=tol)
! divide by 100 and should add to one
array = array / 100.0d00
call check_sums_equal_1( array, lb, "test_check_sums_add_to_1", &
@@ -1104,7 +1241,7 @@ contains
! Call again returning error code, make sure error code is zero
call check_sums_equal_1( array, lb, "test_check_sums_add_to_1", &
"should not trigger an error", ier)
- @assertEqual(ier,0)
+ @assertEqual(0, ier)
end subroutine test_renormalize
@Test
@@ -1118,7 +1255,7 @@ contains
array(:,:) = 0.0d00
expected(:,:) = array
call renormalize(array, lb, 100.0d00)
- @assertEqual(array, expected, tolerance=tol)
+ @assertEqual(expected, array, tolerance=tol)
end subroutine test_renormalize_zero
end module test_surfrdUtils
diff --git a/src/utils/clmfates_interfaceMod.F90 b/src/utils/clmfates_interfaceMod.F90
index 7039884847..da167d9266 100644
--- a/src/utils/clmfates_interfaceMod.F90
+++ b/src/utils/clmfates_interfaceMod.F90
@@ -2123,7 +2123,7 @@ subroutine wrap_sunfrac(this,nc,atm2lnd_inst,canopystate_inst)
call t_startf('fates_wrapsunfrac')
- associate( forc_solad => atm2lnd_inst%forc_solad_grc, &
+ associate( forc_solad => atm2lnd_inst%forc_solad_not_downscaled_grc, &
forc_solai => atm2lnd_inst%forc_solai_grc, &
fsun => canopystate_inst%fsun_patch, &
laisun => canopystate_inst%laisun_patch, &