From 0d1958c46298607c393152e65627fc11406d3fbd Mon Sep 17 00:00:00 2001 From: Apoorva Lal Date: Wed, 28 Aug 2024 10:00:15 -0700 Subject: [PATCH] add support for sparse X (#86) * add support for csr matrix * Appease mypy about notebook. * Test against csr in test_cross_fit_estimator.py * Test against csr in test_utils.py * Adapt S-Learner to work with csr matrix. * Test against csr matrix in test_learner and test_metalearner. * fix notebook metadata * reduce sparse problem size for docs * final touches --------- Co-authored-by: kklein Co-authored-by: kklein --- CHANGELOG.rst | 8 + docs/examples/example_estimating_ates.ipynb | 23 +- docs/examples/example_sparse_inputs.ipynb | 272 ++++++++++++++++++++ docs/examples/index.rst | 1 + metalearners/_typing.py | 3 +- metalearners/_utils.py | 7 + metalearners/cross_fit_estimator.py | 19 +- metalearners/drlearner.py | 7 +- metalearners/explainer.py | 4 +- metalearners/metalearner.py | 5 +- metalearners/rlearner.py | 9 +- metalearners/slearner.py | 24 +- metalearners/utils.py | 5 +- metalearners/xlearner.py | 7 +- tests/test_cross_fit_estimator.py | 10 +- tests/test_learner.py | 9 +- tests/test_metalearner.py | 25 +- tests/test_slearner.py | 29 ++- tests/test_utils.py | 26 +- 19 files changed, 419 insertions(+), 74 deletions(-) create mode 100644 docs/examples/example_sparse_inputs.ipynb diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 56fd87c7..34eb7f81 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,14 @@ Changelog ========= +0.11.0 (2024-09-xx) +------------------- + +**New features** + +* Add support for using ``scipy.sparse.csr_matrix`` as datastructure for covariates ``X``. + + 0.10.0 (2024-08-13) ------------------- diff --git a/docs/examples/example_estimating_ates.ipynb b/docs/examples/example_estimating_ates.ipynb index 0eda7327..ec88ec48 100644 --- a/docs/examples/example_estimating_ates.ipynb +++ b/docs/examples/example_estimating_ates.ipynb @@ -150,7 +150,13 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "outputs": [], "source": [ "metalearners_dr = DRLearner(\n", @@ -558,21 +564,16 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "py311", "language": "python", "name": "python3" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.11.7" + }, + "mystnb": { + "execution_timeout": 120 } }, "nbformat": 4, diff --git a/docs/examples/example_sparse_inputs.ipynb b/docs/examples/example_sparse_inputs.ipynb new file mode 100644 index 00000000..03cad0c2 --- /dev/null +++ b/docs/examples/example_sparse_inputs.ipynb @@ -0,0 +1,272 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(example-sparse)=\n", + "\n", + " Example: Using Sparse Covariate Matrices\n", + "=============================\n", + "\n", + "Motivation\n", + "----------\n", + "\n", + "In many applications, we want to adjust for categorical covariates with many levels. As a natural pre-processing step, this may involve one-hot-encoding the covariates, which can lead to a high-dimensional covariate matrix, which is typically very sparse. Many scikit-style learners accept (scipy's) sparse matrices as input, which allows us to use them for treatment effect estimation as well. \n", + "\n", + "Example\n", + "-------" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time, psutil, os, gc\n", + "import numpy as np\n", + "import pandas as pd\n", + "import scipy as sp\n", + "\n", + "from sklearn.dummy import DummyRegressor\n", + "from sklearn.preprocessing import OneHotEncoder\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.metrics import mean_squared_error, r2_score\n", + "\n", + "from lightgbm import LGBMRegressor, LGBMClassifier\n", + "from metalearners import DRLearner\n", + "\n", + "# This is required for when nbconvert converts the cell-magic to regular function calls.\n", + "from IPython import get_ipython" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_memory_usage():\n", + " process = psutil.Process(os.getpid())\n", + " return process.memory_info().rss / 1024 / 1024 # in MB\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Causal Inference\n", + "\n", + "### DRLearner\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We generate some data where X comprises of 100 categorical variables with 1000 possible levels. Naively one-hot-encoding this data produces a very large matrix with many zeroes, which is an ideal application of `scipy.sparse.csr_matrix`. We then use the `DRLearner` to estimate the treatment effect. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_causal_data(\n", + " n_samples=100_000,\n", + " n_categories=500,\n", + " n_features=100,\n", + " tau_magnitude=1.0,\n", + "):\n", + " ######################################################################\n", + " # Generate covariate matrix X\n", + " X = np.random.randint(0, n_categories, size=(n_samples, n_features))\n", + " ######################################################################\n", + " # Generate potential outcome y0\n", + " y0 = np.zeros(n_samples)\n", + " # Select a few features for main effects\n", + " main_effect_features = np.random.choice(n_features, 3, replace=False)\n", + " # Create main effects - fully dense\n", + " for i in main_effect_features:\n", + " category_effects = np.random.normal(0, 4, n_categories)\n", + " y0 += category_effects[X[:, i]]\n", + " # Select a couple of feature pairs for interaction effects\n", + " interaction_pairs = [\n", + " (i, j) for i in range(n_features) for j in range(i + 1, n_features)\n", + " ]\n", + " selected_interactions = np.random.choice(len(interaction_pairs), 2, replace=False)\n", + " # Create interaction effects\n", + " for idx in selected_interactions:\n", + " i, j = interaction_pairs[idx]\n", + " interaction_effect = np.random.choice(\n", + " [-1, 0, 1], size=(n_categories, n_categories), p=[0.25, 0.5, 0.25]\n", + " )\n", + " y0 += interaction_effect[X[:, i], X[:, j]]\n", + " # Normalize y0\n", + " y0 = (y0 - np.mean(y0)) / np.std(y0)\n", + " y0 += np.random.normal(0, 0.1, n_samples)\n", + " ######################################################################\n", + " # Generate treatment assignment W\n", + " propensity_score = np.zeros(n_samples)\n", + " for i in main_effect_features:\n", + " category_effects = np.random.normal(0, 4, n_categories)\n", + " propensity_score += category_effects[X[:, i]]\n", + " # same interactions enter pscore\n", + " # Create interaction effects\n", + " for idx in selected_interactions:\n", + " i, j = interaction_pairs[idx]\n", + " interaction_effect = np.random.choice(\n", + " [-1, 0, 1], size=(n_categories, n_categories), p=[0.25, 0.5, 0.25]\n", + " )\n", + " propensity_score += interaction_effect[X[:, i], X[:, j]]\n", + " # Convert to probabilities using logistic function\n", + " propensity_score = sp.special.expit(propensity_score)\n", + " # Generate binary treatment\n", + " W = np.random.binomial(1, propensity_score)\n", + " ######################################################################\n", + " # Generate treatment effect\n", + " tau = tau_magnitude * np.ones(n_samples)\n", + " # Generate final outcome\n", + " Y = y0 + W * tau\n", + " return X, W, Y, tau, propensity_score\n", + "\n", + "\n", + "X, W, Y, tau, propensity_score = generate_causal_data(\n", + " n_samples=1000, tau_magnitude=1.0\n", + ")\n", + "Xdf = pd.DataFrame(X)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# sparse and dense X matrices\n", + "e1 = OneHotEncoder(\n", + " sparse_output=True\n", + ") # onehot encoder generates sparse output automatically\n", + "\n", + "X_csr = e1.fit_transform(X)\n", + "X_np = pd.get_dummies(\n", + " Xdf, columns=Xdf.columns\n", + ").values # dense onehot encoding with pandas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"\\nSparse data memory: {X_csr.data.nbytes / 1024 / 1024:.2f}MB\")\n", + "print(f\"Dense data memory: {X_np.nbytes / 1024 / 1024:.2f}MB\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As expected, the memory footprint of the sparse matrix is considerably smaller than the dense matrix. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def fit_drlearner_wrapper(X, name):\n", + " start_memory = get_memory_usage()\n", + " start_time = time.time()\n", + " metalearners_dr = DRLearner(\n", + " nuisance_model_factory=LGBMRegressor,\n", + " treatment_model_factory=DummyRegressor,\n", + " propensity_model_factory=LGBMClassifier,\n", + " is_classification=False,\n", + " n_variants=2,\n", + " nuisance_model_params={\"verbose\": -1},\n", + " propensity_model_params={\"verbose\": -1},\n", + " )\n", + "\n", + " metalearners_dr.fit_all_nuisance(\n", + " X=X,\n", + " y=Y,\n", + " w=W,\n", + " )\n", + " metalearners_est = metalearners_dr.average_treatment_effect(\n", + " X=X,\n", + " y=Y,\n", + " w=W,\n", + " is_oos=False,\n", + " )\n", + " end_time = time.time()\n", + " end_memory = get_memory_usage()\n", + " runtime = end_time - start_time\n", + " memory_used = end_memory - start_memory\n", + " print(f\"{name} data - Runtime: {runtime:.2f}s, Memory used: {memory_used:.2f}MB\")\n", + " print(metalearners_est)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`scipy.sparse.csr_matrix` input" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fit_drlearner_wrapper(X_csr, \"Sparse\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`np.ndarray` input" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fit_drlearner_wrapper(X_np, \"Dense\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py311", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + }, + "mystnb": { + "execution_timeout": 120 + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/examples/index.rst b/docs/examples/index.rst index b91cea15..81401fdc 100644 --- a/docs/examples/index.rst +++ b/docs/examples/index.rst @@ -16,3 +16,4 @@ Examples Estimating CATEs for survival analysis What if I know the propensity score? Converting a MetaLearner to ONNX + Using Sparse Covariate Matrices diff --git a/metalearners/_typing.py b/metalearners/_typing.py index 95b66b8b..10a6f4d7 100644 --- a/metalearners/_typing.py +++ b/metalearners/_typing.py @@ -6,6 +6,7 @@ import numpy as np import pandas as pd +import scipy.sparse as sps PredictMethod = Literal["predict", "predict_proba"] @@ -21,7 +22,7 @@ # ruff is not happy about the usage of Union. Vector = Union[pd.Series, np.ndarray] # noqa -Matrix = Union[pd.DataFrame, np.ndarray] # noqa +Matrix = Union[pd.DataFrame, np.ndarray, sps.csr_matrix] # noqa class _ScikitModel(Protocol): diff --git a/metalearners/_utils.py b/metalearners/_utils.py index 2337c421..a5c02f37 100644 --- a/metalearners/_utils.py +++ b/metalearners/_utils.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd +import scipy from sklearn.base import check_array, check_X_y, is_classifier, is_regressor from sklearn.ensemble import ( HistGradientBoostingClassifier, @@ -24,6 +25,12 @@ default_rng = np.random.default_rng() +def safe_len(X: Matrix) -> int: + if scipy.sparse.issparse(X): + return X.shape[0] + return len(X) + + def index_matrix(matrix: Matrix, rows: Vector) -> Matrix: """Subselect certain rows from a matrix.""" if isinstance(rows, pd.Series): diff --git a/metalearners/cross_fit_estimator.py b/metalearners/cross_fit_estimator.py index aa112c03..568ca274 100644 --- a/metalearners/cross_fit_estimator.py +++ b/metalearners/cross_fit_estimator.py @@ -16,7 +16,12 @@ from typing_extensions import Self from metalearners._typing import Matrix, OosMethod, PredictMethod, SplitIndices, Vector -from metalearners._utils import _ScikitModel, index_matrix, validate_number_positive +from metalearners._utils import ( + _ScikitModel, + index_matrix, + safe_len, + validate_number_positive, +) OVERALL: OosMethod = "overall" MEDIAN: OosMethod = "median" @@ -157,7 +162,7 @@ def fit( (train_indices, test_indices) tuples indicating how to split the data at hand into train and test/estimation sets for different folds. """ - _validate_data_match_prior_split(len(X), self._test_indices) + _validate_data_match_prior_split(safe_len(X), self._test_indices) if fit_params is None: fit_params = dict() @@ -215,13 +220,13 @@ def _n_outputs(self, method: PredictMethod) -> int: def _predict_all(self, X: Matrix, method: PredictMethod) -> np.ndarray: n_outputs = self._n_outputs(method) predictions = self._initialize_prediction_tensor( - n_observations=len(X), + n_observations=safe_len(X), n_outputs=n_outputs, n_folds=self.n_folds, ) for i, estimator in enumerate(self._estimators): predictions[:, :, i] = np.reshape( - getattr(estimator, method)(X), (len(X), n_outputs) + getattr(estimator, method)(X), (safe_len(X), n_outputs) ) if n_outputs == 1: return predictions[:, 0, :] @@ -242,15 +247,15 @@ def _predict_in_sample( ) -> np.ndarray: if not self._test_indices: raise ValueError() - if len(X) != sum(len(fold) for fold in self._test_indices): + if safe_len(X) != sum(len(fold) for fold in self._test_indices): raise ValueError( "Trying to predict in-sample on data that is unlike data encountered in training. " f"Training data included {sum(len(fold) for fold in self._test_indices)} " - f"observations while prediction data includes {len(X)} observations." + f"observations while prediction data includes {safe_len(X)} observations." ) n_outputs = self._n_outputs(method) predictions = self._initialize_prediction_tensor( - n_observations=len(X), + n_observations=safe_len(X), n_outputs=n_outputs, n_folds=1, ) diff --git a/metalearners/drlearner.py b/metalearners/drlearner.py index 7bff6fb5..6c03ab36 100644 --- a/metalearners/drlearner.py +++ b/metalearners/drlearner.py @@ -28,6 +28,7 @@ get_predict_proba, index_matrix, infer_input_dict, + safe_len, validate_valid_treatment_variant_not_control, warning_experimental_feature, ) @@ -253,7 +254,7 @@ def predict( oos_method: OosMethod = OVERALL, ) -> np.ndarray: n_outputs = 2 if self.is_classification else 1 - estimates = np.zeros((len(X), self.n_variants - 1, n_outputs)) + estimates = np.zeros((safe_len(X), self.n_variants - 1, n_outputs)) for treatment_variant in range(1, self.n_variants): estimates_variant = self.predict_treatment( X, @@ -365,7 +366,7 @@ def average_treatment_effect( raise ValueError( "The nuisance models need to be fitted before computing the treatment effect." ) - gamma_matrix = np.zeros((len(X), self.n_variants - 1)) + gamma_matrix = np.zeros((safe_len(X), self.n_variants - 1)) for treatment_variant in range(1, self.n_variants): gamma_matrix[:, treatment_variant - 1] = self._pseudo_outcome( X=X, @@ -375,7 +376,7 @@ def average_treatment_effect( is_oos=is_oos, ) treatment_effect = gamma_matrix.mean(axis=0) - standard_error = gamma_matrix.std(axis=0) / np.sqrt(len(X)) + standard_error = gamma_matrix.std(axis=0) / np.sqrt(safe_len(X)) return treatment_effect, standard_error def _pseudo_outcome( diff --git a/metalearners/explainer.py b/metalearners/explainer.py index 721514c3..8449bc11 100644 --- a/metalearners/explainer.py +++ b/metalearners/explainer.py @@ -8,7 +8,7 @@ import shap from metalearners._typing import Matrix, _ScikitModel -from metalearners._utils import simplify_output_2d +from metalearners._utils import safe_len, simplify_output_2d from metalearners.metalearner import Params @@ -59,7 +59,7 @@ def from_estimates( The ``cate_estimates`` should be the raw outcome of a MetaLearner with 3 dimensions and should not be simplified. """ - if len(X) != len(cate_estimates) or len(X) == 0: + if safe_len(X) != len(cate_estimates) or safe_len(X) == 0: raise ValueError( "X and cate_estimates should contain the same number of observations " "and not be empty." diff --git a/metalearners/metalearner.py b/metalearners/metalearner.py index 5aca3169..1ec53c90 100644 --- a/metalearners/metalearner.py +++ b/metalearners/metalearner.py @@ -30,6 +30,7 @@ ONNX_PROBABILITIES_OUTPUTS, default_metric, index_matrix, + safe_len, validate_model_and_predict_method, validate_number_positive, ) @@ -120,7 +121,7 @@ def _filter_x_columns(X: Matrix, feature_set: Features) -> Matrix: if feature_set is None: X_filtered = X elif len(feature_set) == 0: - X_filtered = np.ones((len(X), 1)) + X_filtered = np.ones((safe_len(X), 1)) else: if isinstance(X, pd.DataFrame): X_filtered = X[list(feature_set)] @@ -1347,7 +1348,7 @@ def predict_conditional_average_outcomes( "typically set during fitting, is None." ) # TODO: Consider multiprocessing - n_obs = len(X) + n_obs = safe_len(X) nuisance_tensors = self._nuisance_tensors(n_obs) conditional_average_outcomes_list = nuisance_tensors[VARIANT_OUTCOME_MODEL] diff --git a/metalearners/rlearner.py b/metalearners/rlearner.py index b86e9764..95cc237e 100644 --- a/metalearners/rlearner.py +++ b/metalearners/rlearner.py @@ -20,6 +20,7 @@ get_predict_proba, index_matrix, infer_input_dict, + safe_len, validate_all_vectors_same_index, validate_valid_treatment_variant_not_control, warning_experimental_feature, @@ -277,7 +278,7 @@ def predict( oos_method: OosMethod = OVERALL, ) -> np.ndarray: n_outputs = 2 if self.is_classification else 1 - tau_hat = np.zeros((len(X), self.n_variants - 1, n_outputs)) + tau_hat = np.zeros((safe_len(X), self.n_variants - 1, n_outputs)) if is_oos: @@ -298,7 +299,7 @@ def predict( variant_estimates = np.stack( [-variant_estimates, variant_estimates], axis=-1 ) - variant_estimates = variant_estimates.reshape(len(X), n_outputs) + variant_estimates = variant_estimates.reshape(safe_len(X), n_outputs) tau_hat[:, treatment_variant - 1, :] = variant_estimates return tau_hat @@ -486,7 +487,7 @@ def _pseudo_outcome_and_weights( constant ``epsilon`` to the denominator in order to avoid numerical problems. """ if mask is None: - mask = np.ones(len(X), dtype=bool) + mask = np.ones(safe_len(X), dtype=bool) validate_valid_treatment_variant_not_control(treatment_variant, self.n_variants) @@ -560,7 +561,7 @@ def predict_conditional_average_outcomes( where :math:`K` is the number of treatment variants. """ - n_obs = len(X) + n_obs = safe_len(X) cate_estimates = self.predict( X=X, diff --git a/metalearners/slearner.py b/metalearners/slearner.py index 7bda6a13..bd4395d3 100644 --- a/metalearners/slearner.py +++ b/metalearners/slearner.py @@ -6,6 +6,7 @@ import numpy as np import pandas as pd +from scipy.sparse import csr_matrix, hstack from typing_extensions import Self from metalearners._typing import ( @@ -21,6 +22,7 @@ from metalearners._utils import ( convert_treatment, get_one, + safe_len, supports_categoricals, ) from metalearners.cross_fit_estimator import OVERALL, CrossFitEstimator @@ -56,21 +58,29 @@ def _append_treatment_to_covariates( # names are integers and some strings. X_with_w.columns = X_with_w.columns.astype(str) return X_with_w + elif isinstance(X, csr_matrix): + return hstack((X, w_dummies), format="csr") else: return np.concatenate([X, w_dummies], axis=1) + # This is necessary as each model works differently with categoricals, + # in some you need to specify them on instantiation while some others on + # fitting. This solutions converts it to a pd.DataFrame as most of the models + # have some "automatic" detection of categorical features based on pandas + # dtypes. Theoretically it would be possible to get around this conversion + # but it would require loads of model specific code. if isinstance(X, np.ndarray): - # This is necessary as each model works differently with categoricals, - # in some you need to specify them on instantiation while some others on - # fitting. This solutions converts it to a pd.DataFrame as most of the models - # have some "automatic" detection of categorical features based on pandas - # dtypes. Theoretically it would be possible to get around this conversion - # but it would require loads of model specific code. warnings.warn( "Converting the input covariates X from np.ndarray to a " f"pd.DataFrame as the {_BASE_MODEL} supports categorical variables." ) X = pd.DataFrame(X, copy=True) + elif isinstance(X, csr_matrix): + warnings.warn( + "Converting the input covariates X from a scipy csr_matrix to a " + f"pd.DataFrame as the {_BASE_MODEL} supports categorical variables." + ) + X = pd.DataFrame.sparse.from_spmatrix(X) X_with_w = pd.concat([X, pd.Series(w, dtype="category", name="treatment")], axis=1) X_with_w.columns = X_with_w.columns.astype(str) @@ -231,7 +241,7 @@ def evaluate( def predict_conditional_average_outcomes( self, X: Matrix, is_oos: bool, oos_method: OosMethod = OVERALL ) -> np.ndarray: - n_obs = len(X) + n_obs = safe_len(X) conditional_average_outcomes_list = [] for treatment_variant in range(self.n_variants): diff --git a/metalearners/utils.py b/metalearners/utils.py index 587bef67..7f8ece7f 100644 --- a/metalearners/utils.py +++ b/metalearners/utils.py @@ -9,6 +9,7 @@ from typing_extensions import Self from metalearners._typing import Matrix, Vector +from metalearners._utils import safe_len from metalearners.drlearner import DRLearner from metalearners.metalearner import MetaLearner from metalearners.rlearner import RLearner @@ -104,4 +105,6 @@ def predict(self, X: Matrix) -> np.ndarray[Any, Any]: return np.argmax(self.predict_proba(X), axis=1) def predict_proba(self, X: pd.DataFrame) -> np.ndarray[Any, Any]: - return np.full((len(X), 2), [1 - self.propensity_score, self.propensity_score]) + return np.full( + (safe_len(X), 2), [1 - self.propensity_score, self.propensity_score] + ) diff --git a/metalearners/xlearner.py b/metalearners/xlearner.py index 28bee892..016a726e 100644 --- a/metalearners/xlearner.py +++ b/metalearners/xlearner.py @@ -18,6 +18,7 @@ index_matrix, infer_input_dict, infer_probabilities_output, + safe_len, validate_valid_treatment_variant_not_control, warning_experimental_feature, ) @@ -231,7 +232,7 @@ def predict( "typically set during fitting, is None." ) n_outputs = 2 if self.is_classification else 1 - tau_hat = np.zeros((len(X), self.n_variants - 1, n_outputs)) + tau_hat = np.zeros((safe_len(X), self.n_variants - 1, n_outputs)) # Propensity score model is always a classifier so we can't use MEDIAN propensity_score_oos = OVERALL if oos_method == MEDIAN else oos_method propensity_score = self.predict_nuisance( @@ -266,8 +267,8 @@ def predict( oos_method=oos_method, ) else: - tau_hat_treatment = np.zeros(len(X)) - tau_hat_control = np.zeros(len(X)) + tau_hat_treatment = np.zeros(safe_len(X)) + tau_hat_control = np.zeros(safe_len(X)) tau_hat_treatment[non_treatment_variant_indices] = ( self.predict_treatment( diff --git a/tests/test_cross_fit_estimator.py b/tests/test_cross_fit_estimator.py index 4165a06c..eb709735 100644 --- a/tests/test_cross_fit_estimator.py +++ b/tests/test_cross_fit_estimator.py @@ -6,6 +6,7 @@ import numpy as np import pytest from lightgbm import LGBMClassifier, LGBMRegressor +from scipy.sparse import csr_matrix from sklearn.base import is_classifier, is_regressor from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import accuracy_score, log_loss @@ -24,10 +25,10 @@ @pytest.mark.parametrize("predict_proba", [True, False]) @pytest.mark.parametrize("is_oos", [True, False]) @pytest.mark.parametrize("oos_method", ["overall", "mean", "median"]) -@pytest.mark.parametrize("use_np", [True, False]) +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) @pytest.mark.parametrize("pass_cv", [True, False]) def test_crossfitestimator_oos_smoke( - mindset_data, rng, use_clf, predict_proba, is_oos, oos_method, use_np, pass_cv + mindset_data, rng, use_clf, predict_proba, is_oos, oos_method, backend, pass_cv ): if not use_clf and predict_proba: pytest.skip() @@ -50,9 +51,12 @@ def test_crossfitestimator_oos_smoke( # Arbitrary cut-off y = y > 0.8 - if use_np: + if backend == "np": X = X.to_numpy() y = y.to_numpy() + elif backend == "csr": + X = csr_matrix(df.values) + y = y.to_numpy() cfe = CrossFitEstimator( n_folds=5, diff --git a/tests/test_learner.py b/tests/test_learner.py index 01d33754..afe26342 100644 --- a/tests/test_learner.py +++ b/tests/test_learner.py @@ -5,6 +5,7 @@ import pandas as pd import pytest from lightgbm import LGBMClassifier, LGBMRegressor +from scipy.sparse import csr_matrix from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import make_scorer, root_mean_squared_error from sklearn.model_selection import train_test_split @@ -939,16 +940,18 @@ def test_model_reusage(outcome_kind, request): ), ], ) -@pytest.mark.parametrize("use_pandas", [False, True]) -def test_evaluate_feature_set_smoke(metalearner_factory, feature_set, rng, use_pandas): +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) +def test_evaluate_feature_set_smoke(metalearner_factory, feature_set, rng, backend): n_samples = 100 X = rng.standard_normal((n_samples, 5)) y = rng.standard_normal(n_samples) w = rng.integers(0, 2, n_samples) - if use_pandas: + if backend == "pd": X = pd.DataFrame(X) y = pd.Series(y) w = pd.Series(w) + elif backend == "csr": + X = csr_matrix(X) ml = metalearner_factory( n_variants=2, diff --git a/tests/test_metalearner.py b/tests/test_metalearner.py index d9ac1f68..133d3d7f 100644 --- a/tests/test_metalearner.py +++ b/tests/test_metalearner.py @@ -9,6 +9,7 @@ import pandas as pd import pytest from lightgbm import LGBMClassifier, LGBMRegressor +from scipy.sparse import csr_matrix from shap import TreeExplainer, summary_plot from sklearn.base import BaseEstimator from sklearn.linear_model import LinearRegression, LogisticRegression @@ -480,8 +481,8 @@ def test_combine_propensity_and_nuisance_specs( ), ], ) -@pytest.mark.parametrize("use_pandas", [False, True]) -def test_feature_set(feature_set, expected_n_features, use_pandas, rng): +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) +def test_feature_set(feature_set, expected_n_features, backend, rng): ml = _TestMetaLearner( nuisance_model_factory=LGBMRegressor, is_classification=False, @@ -495,10 +496,12 @@ def test_feature_set(feature_set, expected_n_features, use_pandas, rng): X = rng.standard_normal((sample_size, n_features)) y = rng.standard_normal(sample_size) w = rng.integers(0, 2, sample_size) - if use_pandas: + if backend == "pd": X = pd.DataFrame(X) y = pd.Series(y) w = pd.Series(w) + elif backend == "csr": + X = csr_matrix(X) ml.fit(X, y, w) for model_kind, model_kind_list in ml._nuisance_models.items(): @@ -1078,15 +1081,17 @@ def test_n_jobs_base_learners(implementation, rng): "implementation", [TLearner, SLearner, XLearner, RLearner, DRLearner], ) -@pytest.mark.parametrize("use_pandas", [False, True]) -def test_validate_outcome_one_class(implementation, use_pandas, rng): +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) +def test_validate_outcome_one_class(implementation, backend, rng): X = rng.standard_normal((10, 2)) y = np.zeros(10) w = rng.integers(0, 2, 10) - if use_pandas: + if backend == "pandas": X = pd.DataFrame(X) y = pd.Series(y) w = pd.Series(w) + elif backend == "csr": + X = csr_matrix(X) ml = implementation( True, @@ -1106,15 +1111,17 @@ def test_validate_outcome_one_class(implementation, use_pandas, rng): "implementation", [TLearner, SLearner, XLearner, RLearner, DRLearner], ) -@pytest.mark.parametrize("use_pandas", [False, True]) -def test_validate_outcome_different_classes(implementation, use_pandas, rng): +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) +def test_validate_outcome_different_classes(implementation, backend, rng): X = rng.standard_normal((4, 2)) y = np.array([0, 1, 0, 0]) w = np.array([0, 0, 1, 1]) - if use_pandas: + if backend == "pd": X = pd.DataFrame(X) y = pd.Series(y) w = pd.Series(w) + elif backend == "csr": + X = csr_matrix(X) ml = implementation( True, diff --git a/tests/test_slearner.py b/tests/test_slearner.py index 37f6086c..15f0eaa1 100644 --- a/tests/test_slearner.py +++ b/tests/test_slearner.py @@ -5,6 +5,7 @@ import pandas as pd import pytest from lightgbm import LGBMRegressor +from scipy.sparse import csr_matrix from sklearn.linear_model import LinearRegression from metalearners.slearner import SLearner, _append_treatment_to_covariates @@ -31,16 +32,20 @@ def test_feature_set_doesnt_raise(rng): @pytest.mark.parametrize( "model, supports_categoricals", [(LinearRegression, False), (LGBMRegressor, True)] ) -@pytest.mark.parametrize("use_pd", [False, True]) +@pytest.mark.parametrize("backend", ["np", "pd", "csr"]) def test_append_treatment_to_covariates( model, supports_categoricals, - use_pd, + backend, sample_size, request, ): - dataset_name = "mixed" if use_pd else "numerical" + dataset_name = "mixed" if backend == "pd" else "numerical" covariates, _, _ = request.getfixturevalue(f"{dataset_name}_covariates") + + if backend == "csr": + covariates = csr_matrix(covariates) + treatment = np.array([0] * sample_size) n_variants = 4 X_with_w = _append_treatment_to_covariates( @@ -52,20 +57,28 @@ def test_append_treatment_to_covariates( list(range(n_variants)) ) - if not use_pd and not supports_categoricals: - assert isinstance(X_with_w, np.ndarray) + if backend in ["np", "csr"] and not supports_categoricals: + if backend == "np": + assert isinstance(X_with_w, np.ndarray) + elif backend == "csr": + assert isinstance(X_with_w, csr_matrix) assert ( ( X_with_w[:, -3:] - == pd.get_dummies(treatment_pd, dtype=int, drop_first=True) + == pd.get_dummies(treatment_pd, dtype=int, drop_first=True).values ) .all() .all() ) - assert np.all(X_with_w[:, :-3] == covariates) + assert (X_with_w[:, :-3] != covariates).sum() < 1 else: assert isinstance(X_with_w, pd.DataFrame) - covariates_pd = pd.DataFrame(covariates) if not use_pd else covariates + if backend == "np": + covariates_pd = pd.DataFrame(covariates) + elif backend == "csr": + covariates_pd = pd.DataFrame.sparse.from_spmatrix(covariates) + else: + covariates_pd = covariates covariates_pd.columns = covariates_pd.columns.astype(str) if not supports_categoricals: assert X_with_w[["treatment_1", "treatment_2", "treatment_3"]].equals( diff --git a/tests/test_utils.py b/tests/test_utils.py index d88b5d14..f634b772 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -5,6 +5,7 @@ import pandas as pd import pytest from lightgbm import LGBMRegressor +from scipy.sparse import csr_matrix from metalearners.metalearner import MetaLearner from metalearners.utils import ( @@ -59,8 +60,8 @@ def test_simplify_output_raises(input): simplify_output(input) -@pytest.mark.parametrize("use_pd", [True, False]) -def test_fixed_binary_propensity(use_pd): +@pytest.mark.parametrize("backend", ["pd", "pd", "csr"]) +def test_fixed_binary_propensity(backend): propensity_score = 0.3 dominant_class = propensity_score >= 0.5 @@ -69,19 +70,24 @@ def test_fixed_binary_propensity(use_pd): n_samples = 5 X_train = np.ones((n_samples, 5)) y_train = np.ones(n_samples) - if use_pd: + + n_test_samples = 3 + X_test = np.zeros((n_test_samples, 5)) + + expected_result = np.array(np.ones(n_test_samples) * dominant_class) + + if backend == "pd": X_train = pd.DataFrame(X_train) y_train = pd.Series(y_train) + X_test = pd.DataFrame(X_test) + elif backend == "csr": + X_train = csr_matrix(X_train) + X_test = csr_matrix(X_test) model.fit(X_train, y_train) - - n_test_samples = 3 - X_test = np.zeros(n_test_samples) - class_predictions = model.predict(X_test) - assert np.array_equal( - class_predictions, np.array(np.ones(n_test_samples) * dominant_class) - ) + + assert np.array_equal(class_predictions, expected_result) probability_estimates = model.predict_proba(X_test) assert np.array_equal(