Skip to content

Commit

Permalink
Pass feature set to _evaluate_model_kind
Browse files Browse the repository at this point in the history
  • Loading branch information
FrancescMartiEscofetQC committed Jul 19, 2024
1 parent f66a7f0 commit d81ea7a
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 3 deletions.
2 changes: 1 addition & 1 deletion metalearners/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
OosMethod = Literal["overall", "median", "mean"]

Params = Mapping[str, int | float | str]
Features = Collection[str] | Collection[int]
Features = Collection[str] | Collection[int] | None

# ruff is not happy about the usage of Union.
Vector = Union[pd.Series, np.ndarray] # noqa
Expand Down
3 changes: 3 additions & 0 deletions metalearners/drlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[VARIANT_OUTCOME_MODEL],
)

propensity_evaluation = _evaluate_model_kind(
Expand All @@ -278,6 +279,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[PROPENSITY_MODEL],
)

pseudo_outcome: list[np.ndarray] = []
Expand All @@ -301,6 +303,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=True,
feature_set=self.feature_set[TREATMENT_MODEL],
)

return variant_outcome_evaluation | propensity_evaluation | treatment_evaluation
Expand Down
6 changes: 4 additions & 2 deletions metalearners/metalearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ def _evaluate_model_kind(
model_kind: str,
is_oos: bool,
is_treatment_model: bool,
feature_set: Features,
oos_method: OosMethod = OVERALL,
sample_weights: Sequence[Vector] | None = None,
) -> dict[str, float]:
Expand All @@ -168,14 +169,15 @@ def _evaluate_model_kind(
else:
index_str = f"{i}_"
name = f"{prefix}{index_str}{scorer_name}"
X_filtered = _filter_x_columns(Xs[i], feature_set)
with _PredictContext(cfe, is_oos, oos_method) as modified_cfe:
if sample_weights:
evaluation_metrics[name] = scorer_callable(
modified_cfe, Xs[i], ys[i], sample_weight=sample_weights[i]
modified_cfe, X_filtered, ys[i], sample_weight=sample_weights[i]
)
else:
evaluation_metrics[name] = scorer_callable(
modified_cfe, Xs[i], ys[i]
modified_cfe, X_filtered, ys[i]
)
return evaluation_metrics

Expand Down
3 changes: 3 additions & 0 deletions metalearners/rlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[PROPENSITY_MODEL],
)

outcome_evaluation = _evaluate_model_kind(
Expand All @@ -363,6 +364,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[OUTCOME_MODEL],
)

# TODO: improve this? generalize it to other metalearners?
Expand Down Expand Up @@ -414,6 +416,7 @@ def evaluate(
oos_method=oos_method,
is_treatment_model=True,
sample_weights=sample_weights,
feature_set=self.feature_set[TREATMENT_MODEL],
)

rloss_evaluation = {}
Expand Down
1 change: 1 addition & 0 deletions metalearners/slearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[_BASE_MODEL],
)

def predict_conditional_average_outcomes(
Expand Down
1 change: 1 addition & 0 deletions metalearners/tlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,4 +126,5 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[VARIANT_OUTCOME_MODEL],
)
4 changes: 4 additions & 0 deletions metalearners/xlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[VARIANT_OUTCOME_MODEL],
)

propensity_evaluation = _evaluate_model_kind(
Expand All @@ -311,6 +312,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=False,
feature_set=self.feature_set[PROPENSITY_MODEL],
)

imputed_te_control: list[np.ndarray] = []
Expand All @@ -331,6 +333,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=True,
feature_set=self.feature_set[TREATMENT_EFFECT_MODEL],
)

te_control_evaluation = _evaluate_model_kind(
Expand All @@ -342,6 +345,7 @@ def evaluate(
is_oos=is_oos,
oos_method=oos_method,
is_treatment_model=True,
feature_set=self.feature_set[CONTROL_EFFECT_MODEL],
)

return (
Expand Down

0 comments on commit d81ea7a

Please sign in to comment.