From 8b97aa66264fd2da0606cb5bd32dd567646c0e70 Mon Sep 17 00:00:00 2001 From: Tom Vo Date: Thu, 31 Oct 2024 13:03:48 -0700 Subject: [PATCH] CDAT Migration: Update `mp_partition_driver.py` to use Dataset from `dataset_xr.py` (#883) --- .../regression_test_png.ipynb | 213 +++++++++++++++++ .../871-mp-partition/run_script.py | 8 + .../tropical_subseasonal_driver.py | 215 +++++++++++------- e3sm_diags/driver/mp_partition_driver.py | 29 +-- .../plot/{cartopy => }/mp_partition_plot.py | 0 5 files changed, 369 insertions(+), 96 deletions(-) create mode 100644 auxiliary_tools/cdat_regression_testing/871-mp-partition/regression_test_png.ipynb create mode 100644 auxiliary_tools/cdat_regression_testing/871-mp-partition/run_script.py rename e3sm_diags/plot/{cartopy => }/mp_partition_plot.py (100%) diff --git a/auxiliary_tools/cdat_regression_testing/871-mp-partition/regression_test_png.ipynb b/auxiliary_tools/cdat_regression_testing/871-mp-partition/regression_test_png.ipynb new file mode 100644 index 000000000..82ecaaa62 --- /dev/null +++ b/auxiliary_tools/cdat_regression_testing/871-mp-partition/regression_test_png.ipynb @@ -0,0 +1,213 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CDAT Migration Regression Testing Notebook (`.png` files)\n", + "\n", + "This notebook is used to perform regression testing between the development and\n", + "production versions of a diagnostic set.\n", + "\n", + "## How to use\n", + "\n", + "PREREQUISITE: The diagnostic set's netCDF stored in `.json` files in two directories\n", + "(dev and `main` branches).\n", + "\n", + "1. Make a copy of this notebook under `auxiliary_tools/cdat_regression_testing/`.\n", + "2. Run `mamba create -n cdat_regression_test -y -c conda-forge \"python<3.12\" xarray netcdf4 dask pandas matplotlib-base ipykernel`\n", + "3. Run `mamba activate cdat_regression_test`\n", + "4. Update `SET_DIR` and `SET_NAME` in the copy of your notebook.\n", + "5. Run all cells IN ORDER.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup Code\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import glob\n", + "\n", + "from auxiliary_tools.cdat_regression_testing.utils import get_image_diffs\n", + "\n", + "SET_NAME = \"mp_partition\"\n", + "SET_DIR = \"871-mp-partition\"\n", + "\n", + "DEV_PATH = f\"/global/cfs/cdirs/e3sm/www/cdat-migration-fy24/{SET_DIR}/{SET_NAME}/**\"\n", + "DEV_GLOB = sorted(glob.glob(DEV_PATH + \"/*.png\"))\n", + "DEV_NUM_FILES = len(DEV_GLOB)\n", + "\n", + "MAIN_PATH = f\"/global/cfs/cdirs/e3sm/www/cdat-migration-fy24/main/{SET_NAME}/**\"\n", + "MAIN_GLOB = sorted(glob.glob(MAIN_PATH + \"/*.png\"))\n", + "MAIN_NUM_FILES = len(MAIN_GLOB)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def _check_if_files_found():\n", + " if DEV_NUM_FILES == 0 or MAIN_NUM_FILES == 0:\n", + " raise IOError(\n", + " \"No files found at DEV_PATH and/or MAIN_PATH. \"\n", + " f\"Please check {DEV_PATH} and {MAIN_PATH}.\"\n", + " )\n", + "\n", + "\n", + "def _check_if_matching_filecount():\n", + " if DEV_NUM_FILES != MAIN_NUM_FILES:\n", + " raise IOError(\n", + " \"Number of files do not match at DEV_PATH and MAIN_PATH \"\n", + " f\"({DEV_NUM_FILES} vs. {MAIN_NUM_FILES}).\"\n", + " )\n", + "\n", + " print(f\"Matching file count ({DEV_NUM_FILES} and {MAIN_NUM_FILES}).\")\n", + "\n", + "\n", + "def _check_if_missing_files():\n", + " missing_count = 0\n", + "\n", + " for fp_main in MAIN_GLOB:\n", + " fp_dev = fp_main.replace(SET_DIR, \"main\")\n", + "\n", + " if fp_dev not in MAIN_GLOB:\n", + " print(f\"No production file found to compare with {fp_dev}!\")\n", + " missing_count += 1\n", + "\n", + " for fp_dev in DEV_GLOB:\n", + " fp_main = fp_main.replace(\"main\", SET_DIR)\n", + "\n", + " if fp_main not in DEV_GLOB:\n", + " print(f\"No development file found to compare with {fp_main}!\")\n", + " missing_count += 1\n", + "\n", + " print(f\"Number of files missing: {missing_count}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Check for matching and equal number of files\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "_check_if_files_found()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of files missing: 0\n" + ] + } + ], + "source": [ + "_check_if_missing_files()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Matching file count (1 and 1).\n" + ] + } + ], + "source": [ + "_check_if_matching_filecount()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2 Compare the plots between branches\n", + "\n", + "- Compare \"ref\" and \"test\" files\n", + "- \"diff\" files are ignored because getting relative diffs for these does not make sense (relative diff will be above tolerance)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing:\n", + " * /global/cfs/cdirs/e3sm/www/cdat-migration-fy24/main/mp_partition/mixed-phase_partition/mixed-phase_partition.png\n", + " * /global/cfs/cdirs/e3sm/www/cdat-migration-fy24/871-mp-partition/mp_partition/mixed-phase_partition/mixed-phase_partition.png\n", + " * Difference path /global/cfs/cdirs/e3sm/www/cdat-migration-fy24/871-mp-partition/mp_partition/mixed-phase_partition_diff/mixed-phase_partition.png\n" + ] + } + ], + "source": [ + "for main_path, dev_path in zip(MAIN_GLOB, DEV_GLOB):\n", + " print(\"Comparing:\")\n", + " print(f\" * {main_path}\")\n", + " print(f\" * {dev_path}\")\n", + "\n", + " get_image_diffs(dev_path, main_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results\n", + "\n", + "All plots are identical\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/auxiliary_tools/cdat_regression_testing/871-mp-partition/run_script.py b/auxiliary_tools/cdat_regression_testing/871-mp-partition/run_script.py new file mode 100644 index 000000000..a3ee53296 --- /dev/null +++ b/auxiliary_tools/cdat_regression_testing/871-mp-partition/run_script.py @@ -0,0 +1,8 @@ +from auxiliary_tools.cdat_regression_testing.base_run_script import run_set + +SET_NAME = "mp_partition" +SET_DIR = "871-mp-partition" +CFG_PATH: str | None = None +MULTIPROCESSING = True + +run_set(SET_NAME, SET_DIR, CFG_PATH, MULTIPROCESSING) diff --git a/auxiliary_tools/tropical_subseasonal_diags/tropical_subseasonal_driver.py b/auxiliary_tools/tropical_subseasonal_diags/tropical_subseasonal_driver.py index 4e9b209a0..c2aa5854c 100755 --- a/auxiliary_tools/tropical_subseasonal_diags/tropical_subseasonal_driver.py +++ b/auxiliary_tools/tropical_subseasonal_diags/tropical_subseasonal_driver.py @@ -11,7 +11,7 @@ import e3sm_diags from e3sm_diags.driver import utils from e3sm_diags.logger import custom_logger -from e3sm_diags.plot.cartopy.mp_partition_plot import plot +from e3sm_diags.plot.mp_partition_plot import plot import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap, BoundaryNorm @@ -21,17 +21,18 @@ logger = custom_logger(__name__) -# Script to compute and plot spectral powers of a subseasonal tropical field in +# Script to compute and plot spectral powers of a subseasonal tropical field in # zonal wavenumber-frequency space. Both the plot files and files containing the # associated numerical data shown in the plots are created. # Authors: Jim Benedict and Brian Medeiros -# Modified by Jill Zhang to integrate into E3SM Diags. +# Modified by Jill Zhang to integrate into E3SM Diags. + def find_nearest(array, value): array = np.asarray(array) idx = (np.abs(array - value)).argmin() - return idx,array[idx] + return idx, array[idx] """Return index of [array] closest in value to [value] Example: array = [ 0.21069679 0.61290182 0.63425412 0.84635244 0.91599191 0.00213826 @@ -41,156 +42,206 @@ def find_nearest(array, value): """ + def wf_analysis(x, **kwargs): """Return zonal wavenumber-frequency power spectra of x. The returned spectra are: spec_sym: Raw (non-normalized) power spectrum of the component of x that is symmetric about the equator. spec_asym: Raw (non-normalized) power spectrum of the component of x that is antisymmetric about the equator. nspec_sym: Normalized (by a smoothed red-noise background spectrum) power spectrum of the component of x that is symmetric about the equator. nspec_asym: Normalized (by a smoothed red-noise background spectrum) power spectrum of the component of x that is antisymmetric about the equator. - + The NCL version of 'wkSpaceTime' smooths the symmetric and antisymmetric components - along the frequency dimension using a 1-2-1 filter once. - + along the frequency dimension using a 1-2-1 filter once. + """ # Get the "raw" spectral power - # OPTIONAL kwargs: + # OPTIONAL kwargs: # segsize, noverlap, spd, latitude_bounds (tuple: (south, north)), dosymmetries, rmvLowFrq z2 = wf.spacetime_power(x, **kwargs) - z2avg = z2.mean(dim='component') - z2.loc[{'frequency':0}] = np.nan # get rid of spurious power at \nu = 0 (mean) - + z2avg = z2.mean(dim="component") + z2.loc[{"frequency": 0}] = np.nan # get rid of spurious power at \nu = 0 (mean) + # Following NCL's wkSpaceTime, apply one pass of a 1-2-1 filter along the frequency # domain to the raw (non-normalized) spectra/um. # Do not use 0 frequency when smoothing here. # Use weights that sum to 1 to ensure that smoothing is conservative. - z2s = wf.smoothFrq121(z2,1) + z2s = wf.smoothFrq121(z2, 1) # The background is supposed to be derived from both symmetric & antisymmetric # Inputs to the background spectrum calculation should be z2avg background = wf.smoothBackground_wavefreq(z2avg) # separate components - spec_sym = z2s[0,...] - spec_asy = z2s[1,...] + spec_sym = z2s[0, ...] + spec_asy = z2s[1, ...] # normalize: Following NCL's wkSpaceTime, use lightly smoothed version of spectra/um # as numerator nspec_sym = spec_sym / background nspec_asy = spec_asy / background - - spec = xr.merge([spec_sym.rename('spec_raw_sym'), spec_asy.rename('spec_raw_asy'), nspec_sym.rename('spec_norm_sym'), nspec_asy.rename('spec_norm_asy'), background.rename('spec_background')], compat='override') - spec_all = spec.drop('component') - spec_all['spec_raw_sym'].attrs = {"component": "symmetric", "type": "raw"} - spec_all['spec_raw_asy'].attrs = {"component": "antisymmetric", "type": "raw"} - spec_all['spec_norm_sym'].attrs = {"component": "symmetric", "type": "normalized"} - spec_all['spec_norm_asy'].attrs = {"component": "antisymmetric", "type": "normalized"} - spec_all['spec_background'].attrs = {"component": "", "type": "background"} + + spec = xr.merge( + [ + spec_sym.rename("spec_raw_sym"), + spec_asy.rename("spec_raw_asy"), + nspec_sym.rename("spec_norm_sym"), + nspec_asy.rename("spec_norm_asy"), + background.rename("spec_background"), + ], + compat="override", + ) + spec_all = spec.drop("component") + spec_all["spec_raw_sym"].attrs = {"component": "symmetric", "type": "raw"} + spec_all["spec_raw_asy"].attrs = {"component": "antisymmetric", "type": "raw"} + spec_all["spec_norm_sym"].attrs = {"component": "symmetric", "type": "normalized"} + spec_all["spec_norm_asy"].attrs = { + "component": "antisymmetric", + "type": "normalized", + } + spec_all["spec_background"].attrs = {"component": "", "type": "background"} return spec_all def calculate_spectrum(path, variable): - var = xr.open_mfdataset(glob.glob(f"{test_data_path}/{variable}_*.nc")).sel( - lat=slice(-15, 15))[variable] + lat=slice(-15, 15) + )[variable] # TODO: subset time - + # Unit conversion if var.name == "PRECT": if var.attrs["units"] == "m/s" or var.attrs["units"] == "m s{-1}": - print("\nBEFORE unit conversion: Max/min of data: " + str(var.values.max()) + " " + str(var.values.min())) - var.values = var.values * 1000. * 86400. # convert m/s to mm/d, do not alter metadata (yet) - var.attrs["units"] = "mm/d" # adjust metadata to reflect change in units - print("\nAFTER unit conversion: Max/min of data: " + str(var.values.max()) + " " + str(var.values.min())) + print( + "\nBEFORE unit conversion: Max/min of data: " + + str(var.values.max()) + + " " + + str(var.values.min()) + ) + var.values = ( + var.values * 1000.0 * 86400.0 + ) # convert m/s to mm/d, do not alter metadata (yet) + var.attrs["units"] = "mm/d" # adjust metadata to reflect change in units + print( + "\nAFTER unit conversion: Max/min of data: " + + str(var.values.max()) + + " " + + str(var.values.min()) + ) if var.name == "precipAvg": if var.attrs["units"] == "mm/hr": - print("\nBEFORE unit conversion: Max/min of data: " + str(var.values.max()) + " " + str(var.values.min())) - var.values = data.values * 24. # convert mm/hr to mm/d, do not alter metadata (yet) - var.attrs["units"] = "mm/d" # adjust metadata to reflect change in units - print("\nAFTER unit conversion: Max/min of data: " + str(var.values.max()) + " " + str(var.values.min())) + print( + "\nBEFORE unit conversion: Max/min of data: " + + str(var.values.max()) + + " " + + str(var.values.min()) + ) + var.values = ( + data.values * 24.0 + ) # convert mm/hr to mm/d, do not alter metadata (yet) + var.attrs["units"] = "mm/d" # adjust metadata to reflect change in units + print( + "\nAFTER unit conversion: Max/min of data: " + + str(var.values.max()) + + " " + + str(var.values.min()) + ) # Wavenumber Frequency Analysis spec_all = wf_analysis(var, **opt) - #spec_all.to_netcdf(outDataDir + "/full_spec.nc") + # spec_all.to_netcdf(outDataDir + "/full_spec.nc") return spec_all + # # Options ... right now these only go into wk.spacetime_power() # -do_zooming = False # Set to True to also make plots to zoom into MJO spectral region, - # in addition to the default (larger) spectral region -latBound = (-15,15) # latitude bounds for analysis -spd = 1 # SAMPLES PER DAY -nDayWin = 96 # Wheeler-Kiladis [WK] temporal window length (days) +do_zooming = False # Set to True to also make plots to zoom into MJO spectral region, +# in addition to the default (larger) spectral region +latBound = (-15, 15) # latitude bounds for analysis +spd = 1 # SAMPLES PER DAY +nDayWin = 96 # Wheeler-Kiladis [WK] temporal window length (days) nDaySkip = -60 # time (days) between temporal windows [segments] - # negative means there will be overlapping temporal segments -twoMonthOverlap = -1*nDaySkip +# negative means there will be overlapping temporal segments +twoMonthOverlap = -1 * nDaySkip vari = "PRECT" srcID = "model" outDataDir = "/global/cfs/cdirs/e3sm/www/chengzhu/tests/tropical_diags" outDataDir = "/Users/zhang40/Documents/repos/e3sm_diags/auxiliary_tools/tropical_subseasonal_diags/data" -opt = {'segsize': nDayWin, - 'noverlap': twoMonthOverlap, - 'spd': spd, - 'latitude_bounds': latBound, - 'dosymmetries': True, - 'rmvLowFrq':True} +opt = { + "segsize": nDayWin, + "noverlap": twoMonthOverlap, + "spd": spd, + "latitude_bounds": latBound, + "dosymmetries": True, + "rmvLowFrq": True, +} -#datapath = '/global/cfs/cdirs/e3sm/forsyth/E3SMv2/v2.LR.historical_0201/post/atm/180x360_aave/ts/daily/5yr' -datapath = '/Users/zhang40/Documents/e3sm_diags_data/e3sm_diags_test_data/E3SM_v2_daily' +# datapath = '/global/cfs/cdirs/e3sm/forsyth/E3SMv2/v2.LR.historical_0201/post/atm/180x360_aave/ts/daily/5yr' +datapath = "/Users/zhang40/Documents/e3sm_diags_data/e3sm_diags_test_data/E3SM_v2_daily" from e3sm_diags.parameter.core_parameter import CoreParameter + parameter = CoreParameter() -test_data_path = '/Users/zhang40/Documents/e3sm_diags_data/e3sm_diags_test_data/E3SM_v2_daily' +test_data_path = ( + "/Users/zhang40/Documents/e3sm_diags_data/e3sm_diags_test_data/E3SM_v2_daily" +) parameter.test_data_path = test_data_path parameter.test_timeseries_input = True -parameter.test_start_yr = '2000' -parameter.test_end_yr = '2014' +parameter.test_start_yr = "2000" +parameter.test_end_yr = "2014" parameter.ref_data_path = test_data_path parameter.ref_timeseries_input = True -parameter.ref_start_yr = '2000' -parameter.ref_end_yr = '2014' -parameter.variables = ['PRECT'] +parameter.ref_start_yr = "2000" +parameter.ref_end_yr = "2014" +parameter.variables = ["PRECT"] season = "ANN" test_data = utils.dataset.Dataset(parameter, test=True) -parameter.test_name_yrs = utils.general.get_name_and_yrs( - parameter, test_data, season -) +parameter.test_name_yrs = utils.general.get_name_and_yrs(parameter, test_data, season) ref_data = utils.dataset.Dataset(parameter, ref=True) -parameter.ref_name_yrs = utils.general.get_name_and_yrs( - parameter, ref_data, season -) +parameter.ref_name_yrs = utils.general.get_name_and_yrs(parameter, ref_data, season) for variable in parameter.variables: - #test = calculate_spectrum(parameter.test_data_path, variable) - #test.to_netcdf("data/full_spec_test.nc") - #ref = calculate_spectrum(parameter.ref_data_path, variable) - #ref.to_netcdf("data/full_spec_ref.nc") + # test = calculate_spectrum(parameter.test_data_path, variable) + # test.to_netcdf("data/full_spec_test.nc") + # ref = calculate_spectrum(parameter.ref_data_path, variable) + # ref.to_netcdf("data/full_spec_ref.nc") # Below uses intermediate saved files for development - test = xr.open_dataset("/Users/zhang40/Documents/repos/e3sm_diags/auxiliary_tools/tropical_subseasonal_diags/data/full_spec_ref.nc").load() - ref = xr.open_dataset("/Users/zhang40/Documents/repos/e3sm_diags/auxiliary_tools/tropical_subseasonal_diags/data/full_spec_ref.nc").load() + test = xr.open_dataset( + "/Users/zhang40/Documents/repos/e3sm_diags/auxiliary_tools/tropical_subseasonal_diags/data/full_spec_ref.nc" + ).load() + ref = xr.open_dataset( + "/Users/zhang40/Documents/repos/e3sm_diags/auxiliary_tools/tropical_subseasonal_diags/data/full_spec_ref.nc" + ).load() parameter.var_id = variable for diff_name in ["raw_sym", "raw_asy", "norm_sym", "norm_asy", "background"]: - - # Compute percentage difference - diff = 100 * (test[f"spec_{diff_name}"]-ref[f"spec_{diff_name}"])/ref[f"spec_{diff_name}"] - diff.name = f"spec_{diff_name}" - diff.attrs.update(test[f"spec_{diff_name}"].attrs) - parameter.spec_type = diff_name - plot(parameter, test[f"spec_{diff_name}"], ref[f"spec_{diff_name}"], diff) - if "norm" in diff_name: - parameter.spec_type = f"{diff_name}_zoom" - plot(parameter, test[f"spec_{diff_name}"], ref[f"spec_{diff_name}"], diff, do_zoom = True) - - - -display_name, url = create_viewer('.', parameter) + # Compute percentage difference + diff = ( + 100 + * (test[f"spec_{diff_name}"] - ref[f"spec_{diff_name}"]) + / ref[f"spec_{diff_name}"] + ) + diff.name = f"spec_{diff_name}" + diff.attrs.update(test[f"spec_{diff_name}"].attrs) + parameter.spec_type = diff_name + plot(parameter, test[f"spec_{diff_name}"], ref[f"spec_{diff_name}"], diff) + if "norm" in diff_name: + parameter.spec_type = f"{diff_name}_zoom" + plot( + parameter, + test[f"spec_{diff_name}"], + ref[f"spec_{diff_name}"], + diff, + do_zoom=True, + ) + + +display_name, url = create_viewer(".", parameter) print("Viewer Created: ", url) - diff --git a/e3sm_diags/driver/mp_partition_driver.py b/e3sm_diags/driver/mp_partition_driver.py index d1e3da56c..f1602ce0b 100644 --- a/e3sm_diags/driver/mp_partition_driver.py +++ b/e3sm_diags/driver/mp_partition_driver.py @@ -1,3 +1,9 @@ +""" +This analysis set for mixed-phase cloud partition/T5050 metrics is requested by +the E3SM Aerosol Working Group. The script is integrated in e3sm_diags by Jill +Zhang and Yuying Zhang, with contribution from Yunpeng Shan, Jiwen Fan, +Xue Zheng and Susannah Burrows. +""" from __future__ import annotations import glob @@ -9,10 +15,10 @@ import xarray as xr from scipy.stats import binned_statistic -import e3sm_diags -from e3sm_diags.driver import utils +from e3sm_diags import INSTALL_PATH +from e3sm_diags.driver.utils.dataset_xr import Dataset from e3sm_diags.logger import custom_logger -from e3sm_diags.plot.cartopy.mp_partition_plot import plot +from e3sm_diags.plot.mp_partition_plot import plot if TYPE_CHECKING: from e3sm_diags.parameter.mp_partition_parameter import MPpartitionParameter @@ -20,8 +26,6 @@ logger = custom_logger(__name__) -# This analysis set for mixed-phase cloud partition/T5050 metrics is requested by the E3SM Aerosol Working Group. The script is integrated in e3sm_diags by Jill Zhang and Yuying Zhang, with contribution from Yunpeng Shan, Jiwen Fan, Xue Zheng and Susannah Burrows. - def flatten_array(var): var_1d = var.stack(stacked=[...]).values @@ -69,7 +73,7 @@ def run_diag(parameter: MPpartitionParameter) -> MPpartitionParameter: # Read reference data first benchmark_data_path = os.path.join( - e3sm_diags.INSTALL_PATH, + INSTALL_PATH, "control_runs", "mixed-phase_partition_data_1985-2014.json", ) @@ -80,7 +84,7 @@ def run_diag(parameter: MPpartitionParameter) -> MPpartitionParameter: # parse file metrics_dict = json.loads(lcf_file) - test_data = utils.dataset.Dataset(parameter, test=True) + test_data = Dataset(parameter, data_type="test") # test = test_data.get_timeseries_variable("LANDFRAC") # print(dir(test)) # landfrac = test_data.get_timeseries_variable("LANDFRAC")(cdutil.region.domain(latitude=(-70.0, -30, "ccb"))) @@ -112,9 +116,7 @@ def run_diag(parameter: MPpartitionParameter) -> MPpartitionParameter: ) raise - parameter.test_name_yrs = utils.general.get_name_and_yrs( - parameter, test_data, season - ) + parameter.test_name_yrs = test_data.get_name_yrs_attr(season) # type: ignore metrics_dict["test"] = {} metrics_dict["test"]["T"], metrics_dict["test"]["LCF"] = compute_lcf( @@ -122,7 +124,8 @@ def run_diag(parameter: MPpartitionParameter) -> MPpartitionParameter: ) if run_type == "model-vs-model": - ref_data = utils.dataset.Dataset(parameter, ref=True) + ref_data = Dataset(parameter, data_type="ref") + ref_data_path = parameter.reference_data_path start_year = parameter.ref_start_yr end_year = parameter.ref_end_yr @@ -162,9 +165,7 @@ def run_diag(parameter: MPpartitionParameter) -> MPpartitionParameter: # cliq = ref_data.get_timeseries_variable("CLDLIQ")( # cdutil.region.domain(latitude=(-70.0, -30, "ccb")) # ) - parameter.ref_name_yrs = utils.general.get_name_and_yrs( - parameter, ref_data, season - ) + parameter.ref_name_yrs = ref_data.get_name_yrs_attr(season) # type: ignore metrics_dict["ref"] = {} metrics_dict["ref"]["T"], metrics_dict["ref"]["LCF"] = compute_lcf( cice, cliq, temp, landfrac diff --git a/e3sm_diags/plot/cartopy/mp_partition_plot.py b/e3sm_diags/plot/mp_partition_plot.py similarity index 100% rename from e3sm_diags/plot/cartopy/mp_partition_plot.py rename to e3sm_diags/plot/mp_partition_plot.py