From 2a94de6a6ea774a6006d2ab71463ecfb811d6512 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sat, 17 Feb 2024 18:54:41 +0000 Subject: [PATCH 1/6] type(utils): Specify that TrialData is SINDyTrialData, and type out amax func --- pyproject.toml | 2 +- src/gen_experiments/__init__.py | 6 +-- src/gen_experiments/config.py | 4 +- src/gen_experiments/data.py | 2 +- src/gen_experiments/gridsearch.py | 4 +- src/gen_experiments/odes.py | 12 ++--- src/gen_experiments/pdes.py | 10 ++-- src/gen_experiments/utils.py | 76 ++++++++++++++++++++++--------- 8 files changed, 74 insertions(+), 42 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 324197d..d2613b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "pysindy-experiments" dynamic = ["version"] description = "My general exam experiments" readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.11" license = {file = "LICENSE"} keywords = ["Machine Learning", "Science", "Mathematics", "Experiments"] authors = [ diff --git a/src/gen_experiments/__init__.py b/src/gen_experiments/__init__.py index 7f38c6b..fbffc64 100644 --- a/src/gen_experiments/__init__.py +++ b/src/gen_experiments/__init__.py @@ -8,7 +8,7 @@ from pysindy import BaseDifferentiation, FiniteDifference, SINDy # type: ignore from . import gridsearch, odes, pdes -from .utils import TrialData +from .utils import SINDyTrialData this_module = importlib.import_module(__name__) BORING_ARRAY = np.ones((2, 2)) @@ -38,13 +38,13 @@ class NoExperiment: @staticmethod def run( *args: Any, return_all: bool = True, **kwargs: Any - ) -> Scores | tuple[Scores, TrialData]: + ) -> Scores | tuple[Scores, SINDyTrialData]: metrics = defaultdict( lambda: 1, main=1, ) if return_all: - trial_data: TrialData = { + trial_data: SINDyTrialData = { "dt": 1, "coeff_true": BORING_ARRAY[:1], "coeff_fit": BORING_ARRAY[:1], diff --git a/src/gen_experiments/config.py b/src/gen_experiments/config.py index 9efb66f..2e9137c 100644 --- a/src/gen_experiments/config.py +++ b/src/gen_experiments/config.py @@ -5,7 +5,7 @@ from gen_experiments.data import _signal_avg_power from gen_experiments.plotting import _PlotPrefs -from gen_experiments.utils import FullTrialData, NestedDict, SeriesDef, SeriesList +from gen_experiments.utils import FullSINDyTrialData, NestedDict, SeriesDef, SeriesList T = TypeVar("T") U = TypeVar("U") @@ -16,7 +16,7 @@ def ND(d: dict[T, U]) -> NestedDict[T, U]: def _convert_abs_rel_noise( - grid_vals: list, grid_params: list, recent_results: FullTrialData + grid_vals: list, grid_params: list, recent_results: FullSINDyTrialData ): """Convert abs_noise grid_vals to rel_noise""" signal = np.stack(recent_results["x_true"], axis=-1) diff --git a/src/gen_experiments/data.py b/src/gen_experiments/data.py index 51fc6e4..a3091ad 100644 --- a/src/gen_experiments/data.py +++ b/src/gen_experiments/data.py @@ -218,7 +218,7 @@ def gen_pde_data( return dt, t_train, x_train, x_test, x_dot_test, x_train_true -def _max_amplitude(signal: np.ndarray): +def _max_amplitude(signal: np.ndarray) -> float: return np.abs(scipy.fft.rfft(signal, axis=0)[1:]).max() / np.sqrt(len(signal)) diff --git a/src/gen_experiments/gridsearch.py b/src/gen_experiments/gridsearch.py index 676ad49..e7aa488 100644 --- a/src/gen_experiments/gridsearch.py +++ b/src/gen_experiments/gridsearch.py @@ -20,7 +20,7 @@ SavedData, SeriesDef, SeriesList, - TrialData, + SINDyTrialData, _amax_to_full_inds, _argopt, _grid_locator_match, @@ -115,7 +115,7 @@ def run( curr_results, grid_data = base_ex.run( new_seed, **curr_other_params, display=False, return_all=True ) - grid_data: TrialData + grid_data: SINDyTrialData intermediate_data.append( {"params": curr_other_params.flatten(), "pind": ind, "data": grid_data} ) diff --git a/src/gen_experiments/odes.py b/src/gen_experiments/odes.py index 8d0cc29..2f3acf3 100644 --- a/src/gen_experiments/odes.py +++ b/src/gen_experiments/odes.py @@ -12,8 +12,8 @@ plot_training_data, ) from .utils import ( - FullTrialData, - TrialData, + FullSINDyTrialData, + SINDyTrialData, _make_model, coeff_metrics, integration_metrics, @@ -161,7 +161,7 @@ def run( opt_params: dict, display: bool = True, return_all: bool = False, -) -> dict | tuple[dict, TrialData | FullTrialData]: +) -> dict | tuple[dict, SINDyTrialData | FullSINDyTrialData]: rhsfunc = ode_setup[group]["rhsfunc"] input_features = ode_setup[group]["input_features"] coeff_true = ode_setup[group]["coeff_true"] @@ -187,7 +187,7 @@ def run( coeff_true, coefficients, feature_names = unionize_coeff_matrices(model, coeff_true) sim_ind = -1 - trial_data: TrialData = { + trial_data: SINDyTrialData = { "dt": dt, "coeff_true": coeff_true, "coeff_fit": coefficients, @@ -202,7 +202,7 @@ def run( "model": model, } if display: - trial_data: FullTrialData = trial_data | simulate_test_data( + trial_data: FullSINDyTrialData = trial_data | simulate_test_data( trial_data["model"], trial_data["dt"], trial_data["x_test"] ) plot_ode_panel(trial_data) @@ -214,7 +214,7 @@ def run( return metrics -def plot_ode_panel(trial_data: FullTrialData): +def plot_ode_panel(trial_data: FullSINDyTrialData): trial_data["model"].print() plot_training_data( trial_data["x_train"], trial_data["x_true"], trial_data["smooth_train"] diff --git a/src/gen_experiments/pdes.py b/src/gen_experiments/pdes.py index c7f905e..8cb712b 100644 --- a/src/gen_experiments/pdes.py +++ b/src/gen_experiments/pdes.py @@ -5,8 +5,8 @@ from .data import gen_pde_data from .plotting import compare_coefficient_plots, plot_pde_training_data from .utils import ( - FullTrialData, - TrialData, + FullSINDyTrialData, + SINDyTrialData, _make_model, coeff_metrics, integration_metrics, @@ -149,7 +149,7 @@ def run( opt_params: dict, display: bool = True, return_all: bool = False, -) -> dict | tuple[dict, TrialData | FullTrialData]: +) -> dict | tuple[dict, SINDyTrialData | FullSINDyTrialData]: rhsfunc = pde_setup[group]["rhsfunc"]["func"] input_features = pde_setup[group]["input_features"] initial_condition = sim_params["init_cond"] @@ -177,7 +177,7 @@ def run( coeff_true, coefficients, feature_names = unionize_coeff_matrices(model, coeff_true) sim_ind = -1 - trial_data: TrialData = { + trial_data: SINDyTrialData = { "dt": dt, "coeff_true": coeff_true, "coeff_fit": coefficients, @@ -192,7 +192,7 @@ def run( "model": model, } if display: - trial_data: FullTrialData = trial_data | simulate_test_data( + trial_data: FullSINDyTrialData = trial_data | simulate_test_data( trial_data["model"], trial_data["dt"], trial_data["x_test"] ) trial_data["model"].print() diff --git a/src/gen_experiments/utils.py b/src/gen_experiments/utils.py index 0bc4da2..4d53aff 100644 --- a/src/gen_experiments/utils.py +++ b/src/gen_experiments/utils.py @@ -2,7 +2,16 @@ from dataclasses import dataclass from itertools import chain from types import EllipsisType as ellipsis -from typing import Annotated, Any, Collection, Optional, Sequence, TypedDict, TypeVar +from typing import ( + Annotated, + Any, + Collection, + Optional, + Sequence, + TypedDict, + TypeVar, + cast, +) from warnings import warn import auto_ks as aks @@ -10,16 +19,23 @@ import numpy as np import pysindy as ps import sklearn -from numpy.typing import DTypeLike, NDArray +import sklearn.metrics +from numpy.typing import DTypeLike, NBitBase, NDArray +NpFlt = np.dtype[np.floating[NBitBase]] +Float1D = np.ndarray[tuple[int], NpFlt] +Float2D = np.ndarray[tuple[int, int], NpFlt] +Shape = TypeVar("Shape", bound=tuple[int, ...]) +FloatND = np.ndarray[Shape, np.dtype[np.floating[NBitBase]]] -class TrialData(TypedDict): + +class SINDyTrialData(TypedDict): dt: float - coeff_true: Annotated[np.ndarray, "(n_coord, n_features)"] - coeff_fit: Annotated[np.ndarray, "(n_coord, n_features)"] + coeff_true: Annotated[Float2D, "(n_coord, n_features)"] + coeff_fit: Annotated[Float2D, "(n_coord, n_features)"] feature_names: Annotated[list[str], "length=n_features"] input_features: Annotated[list[str], "length=n_coord"] - t_train: np.ndarray + t_train: Float1D x_train: np.ndarray x_true: np.ndarray smooth_train: np.ndarray @@ -28,20 +44,25 @@ class TrialData(TypedDict): model: ps.SINDy -class FullTrialData(TrialData): - t_sim: np.ndarray +class SINDyTrialUpdate(TypedDict): + t_sim: Float1D + t_test: Float1D + x_sim: FloatND + + +class FullSINDyTrialData(SINDyTrialData): + t_sim: Float1D x_sim: np.ndarray class SavedData(TypedDict): params: dict pind: tuple[int] - data: TrialData | FullTrialData + data: SINDyTrialData | FullSINDyTrialData T = TypeVar("T", bound=np.generic) GridsearchResult = Annotated[NDArray[T], "(n_metrics, n_plot_axis)"] # type: ignore - SeriesData = Annotated[ list[ tuple[ @@ -215,28 +236,28 @@ def finalize_param(lookup_func, pdict, lookup_key): return ps.SINDy( differentiation_method=diff, optimizer=opt, - t_default=dt, + t_default=dt, # type: ignore feature_library=features, feature_names=input_features, ) -def simulate_test_data(model: ps.SINDy, dt: float, x_test: np.ndarray) -> TrialData: +def simulate_test_data(model: ps.SINDy, dt: float, x_test: Float2D) -> SINDyTrialUpdate: """Add simulation data to grid_data This includes the t_sim and x_sim keys. Does not mutate argument. Returns: Complete GridPointData """ - t_test = np.arange(len(x_test) * dt, step=dt) + t_test = cast(Float1D, np.arange(0, len(x_test) * dt, step=dt)) t_sim = t_test try: - x_sim = model.simulate(x_test[0], t_test) + x_sim = cast(Float2D, model.simulate(x_test[0], t_test)) except ValueError: warn(message="Simulation blew up; returning zeros") x_sim = np.zeros_like(x_test) # truncate if integration returns wrong number of points - t_sim = t_test[: len(x_sim)] + t_sim = cast(Float1D, t_test[: len(x_sim)]) return {"t_sim": t_sim, "x_sim": x_sim, "t_test": t_test} @@ -350,7 +371,7 @@ def __setitem__(self, key, value): else: return super().__setitem__(key, value) - def update(self, other: dict): + def update(self, other: dict): # type: ignore try: for k, v in other.items(): self.__setitem__(k, v) @@ -414,9 +435,20 @@ def proj(curr_params, t): def _amax_to_full_inds( - amax_inds: Collection[tuple[int | slice, int] | ellipsis], - amax_arrays: list[list[GridsearchResult]], + amax_inds: Collection[tuple[int | slice, int] | ellipsis] | ellipsis, + amax_arrays: list[list[GridsearchResult[np.void]]], ) -> set[tuple[int, ...]]: + """Find full indexers to selected elements of argmax arrays + + Args: + amax_inds: selection statemtent of which argmaxes to return. + amax_arrays: arrays of indexes to full gridsearch that are responsible for + the computed max values. First level of nesting reflects series(?), second + level reflects which grid grid axis. + Returns: + all indexers to full gridsearch that are requested by amax_inds + """ + def np_to_primitive(tuple_like: np.void) -> tuple[int, ...]: return tuple(int(el) for el in tuple_like) @@ -438,15 +470,15 @@ def np_to_primitive(tuple_like: np.void) -> tuple[int, ...]: for el in arr.flatten() } elif isinstance(ind[0], int): - all_inds |= {np_to_primitive(plot_axis_results[ind])} + all_inds |= {np_to_primitive(cast(np.void, plot_axis_results[ind]))} else: # ind[0] is slice(None) all_inds |= {np_to_primitive(el) for el in plot_axis_results[ind]} return all_inds def _argopt( - arr: np.ndarray, axis: int | tuple[int, ...] = None, opt: str = "max" -) -> np.ndarray[tuple[int, ...]]: + arr: FloatND, axis: Optional[int | tuple[int, ...]] = None, opt: str = "max" +) -> NDArray[tuple[int, ...]]: """Calculate the argmax/min, but accept tuple axis. Ignores NaN values @@ -554,7 +586,7 @@ def strict_find_grid_match( *, params: Optional[dict[str, Any]] = None, ind_spec: Optional[tuple[int | slice, int] | ellipsis] = None, -) -> TrialData: +) -> SINDyTrialData: if params is None: params = {} if ind_spec is None: From 64b7f0572990ff65edfd220892f9db094c40ab1f Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sat, 17 Feb 2024 19:42:09 +0000 Subject: [PATCH 2/6] typing: Add mypy settings and finish types in utils --- pyproject.toml | 16 ++++++++++++++++ src/gen_experiments/utils.py | 7 +++++-- tests/test_all.py | 2 ++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d2613b7..95a2dc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,3 +93,19 @@ markers = ["slow"] [tool.mypy] files = ["src/gen_experiments/__init__.py"] + +[[tool.mypy.overrides]] +module="auto_ks.*" +ignore_missing_imports=true + +[[tool.mypy.overrides]] +module="sklearn.*" +ignore_missing_imports=true + +[[tool.mypy.overrides]] +module="pysindy.*" +ignore_missing_imports=true + +[[tool.mypy.overrides]] +module="kalman.*" +ignore_missing_imports=true diff --git a/src/gen_experiments/utils.py b/src/gen_experiments/utils.py index 4d53aff..7841f4d 100644 --- a/src/gen_experiments/utils.py +++ b/src/gen_experiments/utils.py @@ -1,4 +1,5 @@ from collections import defaultdict +from collections.abc import Iterable from dataclasses import dataclass from itertools import chain from types import EllipsisType as ellipsis @@ -450,7 +451,7 @@ def _amax_to_full_inds( """ def np_to_primitive(tuple_like: np.void) -> tuple[int, ...]: - return tuple(int(el) for el in tuple_like) + return tuple(int(el) for el in cast(Iterable, tuple_like)) if amax_inds is ...: # grab each element from arrays in list of lists of arrays return { @@ -478,7 +479,7 @@ def np_to_primitive(tuple_like: np.void) -> tuple[int, ...]: def _argopt( arr: FloatND, axis: Optional[int | tuple[int, ...]] = None, opt: str = "max" -) -> NDArray[tuple[int, ...]]: +) -> NDArray[np.void]: """Calculate the argmax/min, but accept tuple axis. Ignores NaN values @@ -494,6 +495,8 @@ def _argopt( tuples of length m """ dtype: DTypeLike = [(f"f{axind}", "i") for axind in range(arr.ndim)] + if axis is None: + axis = () axis = (axis,) if isinstance(axis, int) else axis keep_axes = tuple(sorted(set(range(arr.ndim)) - set(axis))) keep_shape = tuple(arr.shape[ax] for ax in keep_axes) diff --git a/tests/test_all.py b/tests/test_all.py index dd5c789..962d0df 100644 --- a/tests/test_all.py +++ b/tests/test_all.py @@ -96,6 +96,8 @@ def test_argopt_empty_tuple_axis(): result = utils._argopt(arr, ()) expected = np.array([(0,), (1,), (2,), (3,)], dtype=[("f0", "i")]) np.testing.assert_array_equal(result, expected) + result = utils._argopt(arr, None) + pass def test_argopt_int_axis(): From e8a75c2185710318b59fb564cfd8982dfa5dc3de Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 18 Feb 2024 15:18:26 +0000 Subject: [PATCH 3/6] ok even better typing --- pyproject.toml | 4 ++++ src/gen_experiments/data.py | 35 ++++++++++++++++++----------------- src/gen_experiments/debug.py | 25 +++++++++++++++++++++++++ src/gen_experiments/utils.py | 4 ++-- 4 files changed, 49 insertions(+), 19 deletions(-) create mode 100644 src/gen_experiments/debug.py diff --git a/pyproject.toml b/pyproject.toml index 95a2dc6..1b9ce17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,3 +109,7 @@ ignore_missing_imports=true [[tool.mypy.overrides]] module="kalman.*" ignore_missing_imports=true + +[[tool.mypy.overrides]] +module="scipy.*" +ignore_missing_imports=true diff --git a/src/gen_experiments/data.py b/src/gen_experiments/data.py index a3091ad..0d651f2 100644 --- a/src/gen_experiments/data.py +++ b/src/gen_experiments/data.py @@ -1,31 +1,31 @@ from math import ceil from pathlib import Path -from typing import Callable +from typing import Callable, Optional, cast from warnings import warn import mitosis import numpy as np import scipy -from gen_experiments.utils import GridsearchResultDetails +from gen_experiments.utils import Float1D, Float2D, GridsearchResultDetails INTEGRATOR_KEYWORDS = {"rtol": 1e-12, "method": "LSODA", "atol": 1e-12} TRIALS_FOLDER = Path(__file__).parent.absolute() / "trials" def gen_data( - rhs_func, - n_coord, - seed=None, - n_trajectories=1, - x0_center=None, - ic_stdev=3, - noise_abs=None, - noise_rel=None, - nonnegative=False, - dt=0.01, - t_end=10, -): + rhs_func: Callable, + n_coord: int, + seed: Optional[int] = None, + n_trajectories: int = 1, + x0_center: Optional[Float1D] = None, + ic_stdev: float = 3, + noise_abs: Optional[float] = None, + noise_rel: Optional[float] = None, + nonnegative: bool = False, + dt: float = 0.01, + t_end: float = 10, +) -> tuple[float, Float1D, Float2D, Float2D, Float2D, Float2D]: """Generate random training and test data Note that test data has no noise. @@ -57,7 +57,7 @@ def gen_data( rng = np.random.default_rng(seed) if x0_center is None: x0_center = np.zeros((n_coord)) - t_train = np.arange(0, t_end, dt) + t_train = np.arange(0, t_end, dt, dtype=np.float_) t_train_span = (t_train[0], t_train[-1]) if nonnegative: shape = ((x0_center + 1) / ic_stdev) ** 2 @@ -123,10 +123,11 @@ def _alert_short(arr): x_train_true = np.copy(x_train) if noise_rel is not None: noise_abs = np.sqrt(_signal_avg_power(x_test) * noise_rel) - x_train = x_train + noise_abs * rng.standard_normal(x_train.shape) + x_train = x_train + cast(float, noise_abs) * rng.standard_normal(x_train.shape) x_train = list(x_train) x_test = list(x_test) x_dot_test = list(x_dot_test) + x_train_true = list(x_train_true) return dt, t_train, x_train, x_test, x_dot_test, x_train_true @@ -211,7 +212,7 @@ def gen_pde_data( x_train_true = np.copy(x_train) if noise_rel is not None: noise_abs = _max_amplitude(x_test) * noise_rel - x_train = x_train + noise_abs * rng.standard_normal(x_train.shape) + x_train = x_train + cast(float, noise_abs) * rng.standard_normal(x_train.shape) x_train = [np.moveaxis(x_train, 0, -2)] x_train_true = np.moveaxis(x_train_true, 0, -2) x_test = [np.moveaxis(x_test, [0, 1], [-1, -2])] diff --git a/src/gen_experiments/debug.py b/src/gen_experiments/debug.py new file mode 100644 index 0000000..a1193ca --- /dev/null +++ b/src/gen_experiments/debug.py @@ -0,0 +1,25 @@ +from typing import Annotated, Generic, TypedDict, TypeVar + +import numpy as np +from numpy.typing import DTypeLike, NBitBase, NDArray + +# T = TypeVar("T") + +# class Foo[T]: +# items: list[T] + +# def __init__(self, thing: T): +# self.items = [thing, thing] + +# Bar = + + +T = TypeVar("T", bound=np.generic) +Foo = NDArray[T] +Bar = Annotated[NDArray, "foobar"] + +lil_foo = NDArray[np.void] + + +def baz(qux: Foo[np.void]): + pass diff --git a/src/gen_experiments/utils.py b/src/gen_experiments/utils.py index 7841f4d..65580ee 100644 --- a/src/gen_experiments/utils.py +++ b/src/gen_experiments/utils.py @@ -63,12 +63,12 @@ class SavedData(TypedDict): T = TypeVar("T", bound=np.generic) -GridsearchResult = Annotated[NDArray[T], "(n_metrics, n_plot_axis)"] # type: ignore +GridsearchResult = Annotated[NDArray[T], "(n_metrics, n_plot_axis)"] SeriesData = Annotated[ list[ tuple[ Annotated[GridsearchResult, "metrics"], - Annotated[GridsearchResult, "arg_opts"], + Annotated[GridsearchResult[np.void], "arg_opts"], ] ], "len=n_grid_axes", From fa382fb77d3f126dc01a0fbf8546c97da3774319 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 18 Feb 2024 18:04:44 +0000 Subject: [PATCH 4/6] feat(plots): Also remove ticks when labels=False in train/test plots --- src/gen_experiments/plotting.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/gen_experiments/plotting.py b/src/gen_experiments/plotting.py index 1c71180..0aa09bd 100644 --- a/src/gen_experiments/plotting.py +++ b/src/gen_experiments/plotting.py @@ -6,6 +6,7 @@ import numpy as np import scipy import seaborn as sns +from matplotlib.axes import Axes PAL = sns.color_palette("Set1") PLOT_KWS = {"alpha": 0.7, "linewidth": 3} @@ -128,7 +129,7 @@ def signed_sqrt(x): def plot_training_trajectory( - ax: plt.Axes, + ax: Axes, x_train: np.ndarray, x_true: np.ndarray, x_smooth: np.ndarray, @@ -156,6 +157,8 @@ def plot_training_trajectory( ) if labels: ax.set(xlabel="$x_0$", ylabel="$x_1$") + else: + ax.set(xticks=[], yticks=[]) elif x_train.shape[1] == 3: ax.plot( x_true[:, 0], @@ -187,6 +190,8 @@ def plot_training_trajectory( ) if labels: ax.set(xlabel="$x$", ylabel="$y$", zlabel="$z$") + else: + ax.set(xticks=[], yticks=[], zticks=[]) else: raise ValueError("Can only plot 2d or 3d data.") @@ -226,7 +231,7 @@ def plot_pde_training_data(last_train, last_train_true, smoothed_last_train): def plot_test_sim_data_1d_panel( - axs: Sequence[plt.Axes], + axs: Sequence[Axes], x_test: np.ndarray, x_sim: np.ndarray, t_test: np.ndarray, @@ -240,31 +245,33 @@ def plot_test_sim_data_1d_panel( def _plot_test_sim_data_2d( - axs: Annotated[Sequence[plt.Axes], "len=2"], + axs: Annotated[Sequence[Axes], "len=2"], x_test: np.ndarray, x_sim: np.ndarray, labels: bool = True, ) -> None: axs[0].plot(x_test[:, 0], x_test[:, 1], "k", label="True Trajectory") - if labels: - axs[0].set(xlabel="$x_0$", ylabel="$x_1$") axs[1].plot(x_sim[:, 0], x_sim[:, 1], "r--", label="Simulation") - if labels: - axs[1].set(xlabel="$x_0$", ylabel="$x_1$") + for ax in axs: + if labels: + ax.set(xlabel="$x_0$", ylabel="$x_1$") + else: + ax.set(xticks=[], yticks=[]) def _plot_test_sim_data_3d( - axs: Annotated[Sequence[plt.Axes], "len=3"], + axs: Annotated[Sequence[Axes], "len=3"], x_test: np.ndarray, x_sim: np.ndarray, labels: bool = True, ) -> None: axs[0].plot(x_test[:, 0], x_test[:, 1], x_test[:, 2], "k", label="True Trajectory") - if labels: - axs[0].set(xlabel="$x_0$", ylabel="$x_1$", zlabel="$x_2$") axs[1].plot(x_sim[:, 0], x_sim[:, 1], x_sim[:, 2], "r--", label="Simulation") - if labels: - axs[1].set(xlabel="$x_0$", ylabel="$x_1$", zlabel="$x_2$") + for ax in axs: + if labels: + ax.set(xlabel="$x_0$", ylabel="$x_1$", zlabel="$x_2$") + else: + ax.set(xticks=[], yticks=[], zticks=[]) def plot_test_trajectories( From 69a65b6986ed877bec5be5d842a0593456e6af54 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 18 Feb 2024 19:53:56 +0000 Subject: [PATCH 5/6] bld: Revert python version 3.9 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1b9ce17..94d1b14 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "pysindy-experiments" dynamic = ["version"] description = "My general exam experiments" readme = "README.md" -requires-python = ">=3.11" +requires-python = ">=3.9" license = {file = "LICENSE"} keywords = ["Machine Learning", "Science", "Mathematics", "Experiments"] authors = [ From 5369b2db5b508a8add72e4c0bf732bdd31e999d1 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 18 Feb 2024 20:04:41 +0000 Subject: [PATCH 6/6] types(mock): Set dtype arg to get right types --- pyproject.toml | 2 +- src/gen_experiments/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 94d1b14..81edcf7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ addopts = '-m "not slow"' markers = ["slow"] [tool.mypy] -files = ["src/gen_experiments/__init__.py"] +files = ["src/gen_experiments/__init__.py", "src/gen_experiments/utils.py"] [[tool.mypy.overrides]] module="auto_ks.*" diff --git a/src/gen_experiments/__init__.py b/src/gen_experiments/__init__.py index fbffc64..f25bde1 100644 --- a/src/gen_experiments/__init__.py +++ b/src/gen_experiments/__init__.py @@ -11,7 +11,7 @@ from .utils import SINDyTrialData this_module = importlib.import_module(__name__) -BORING_ARRAY = np.ones((2, 2)) +BORING_ARRAY = np.ones((2, 2), dtype=float) Scores = Mapping[str, float] @@ -51,7 +51,7 @@ def run( # "coefficients": boring_array, "feature_names": ["1"], "input_features": ["x", "y"], - "t_train": np.arange(0, 1, 1), + "t_train": np.arange(0, 1, 1, dtype=float), "x_train": BORING_ARRAY, "x_true": BORING_ARRAY, "smooth_train": BORING_ARRAY,