Skip to content

Commit

Permalink
Add adaptive clipping parameter to init and test
Browse files Browse the repository at this point in the history
  • Loading branch information
FrancescMartiEscofetQC committed Jul 10, 2024
1 parent 8fa0c10 commit af0b641
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 4 deletions.
50 changes: 46 additions & 4 deletions metalearners/drlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,16 @@
from joblib import Parallel, delayed
from typing_extensions import Self

from metalearners._typing import Matrix, OosMethod, Scoring, Vector
from metalearners._typing import (
Features,
Matrix,
ModelFactory,
OosMethod,
Params,
Scoring,
Vector,
_ScikitModel,
)
from metalearners._utils import (
clip_element_absolute_value_to_epsilon,
get_one,
Expand All @@ -15,7 +24,7 @@
index_matrix,
validate_valid_treatment_variant_not_control,
)
from metalearners.cross_fit_estimator import OVERALL
from metalearners.cross_fit_estimator import OVERALL, CrossFitEstimator
from metalearners.metalearner import (
NUISANCE,
PROPENSITY_MODEL,
Expand Down Expand Up @@ -82,6 +91,40 @@ def _supports_multi_treatment(cls) -> bool:
def _supports_multi_class(cls) -> bool:
return False

def __init__(
self,
is_classification: bool,
n_variants: int,
nuisance_model_factory: ModelFactory | None = None,
treatment_model_factory: ModelFactory | None = None,
propensity_model_factory: type[_ScikitModel] | None = None,
nuisance_model_params: Params | dict[str, Params] | None = None,
treatment_model_params: Params | dict[str, Params] | None = None,
propensity_model_params: Params | None = None,
fitted_nuisance_models: dict[str, list[CrossFitEstimator]] | None = None,
fitted_propensity_model: CrossFitEstimator | None = None,
feature_set: Features | dict[str, Features] | None = None,
n_folds: int | dict[str, int] = 10,
random_state: int | None = None,
adaptive_clipping: bool = False,
):
super().__init__(
nuisance_model_factory=nuisance_model_factory,
is_classification=is_classification,
n_variants=n_variants,
treatment_model_factory=treatment_model_factory,
propensity_model_factory=propensity_model_factory,
nuisance_model_params=nuisance_model_params,
treatment_model_params=treatment_model_params,
propensity_model_params=propensity_model_params,
fitted_nuisance_models=fitted_nuisance_models,
fitted_propensity_model=fitted_propensity_model,
feature_set=feature_set,
n_folds=n_folds,
random_state=random_state,
)
self.adaptive_clipping = adaptive_clipping

def fit(
self,
X: Matrix,
Expand Down Expand Up @@ -268,7 +311,6 @@ def _pseudo_outcome(
is_oos: bool,
oos_method: OosMethod = OVERALL,
epsilon: float = _EPSILON,
adaptive_clipping: bool = False,
) -> np.ndarray:
"""Compute the DR-Learner pseudo outcome."""
validate_valid_treatment_variant_not_control(treatment_variant, self.n_variants)
Expand Down Expand Up @@ -318,7 +360,7 @@ def _pseudo_outcome(
- y0_estimate
)

if adaptive_clipping:
if self.adaptive_clipping:
t_pseudo_outcome = y1_estimate - y0_estimate
pseudo_outcome = np.where(
propensity_estimates.min(axis=1) < epsilon,
Expand Down
20 changes: 20 additions & 0 deletions tests/test_drlearner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Copyright (c) QuantCo 2024-2024
# SPDX-License-Identifier: BSD-3-Clause

from sklearn.linear_model import LinearRegression, LogisticRegression

from metalearners.drlearner import DRLearner


def test_adaptive_clipping_smoke(dummy_dataset):
X, y, w = dummy_dataset
ml = DRLearner(
False,
2,
LinearRegression,
LinearRegression,
LogisticRegression,
n_folds=2,
adaptive_clipping=True,
)
ml.fit(X, y, w)

0 comments on commit af0b641

Please sign in to comment.