From c5359af943b1472dfed1b7ff010283f5a8ce4f20 Mon Sep 17 00:00:00 2001 From: Jiaming Yuan Date: Thu, 2 Mar 2023 09:47:36 +0800 Subject: [PATCH] Support custom metric as well. --- python-package/xgboost/sklearn.py | 11 ++++++++--- tests/python/test_with_sklearn.py | 14 ++++++++++++-- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/python-package/xgboost/sklearn.py b/python-package/xgboost/sklearn.py index 92ae43509367..ad57aee8214c 100644 --- a/python-package/xgboost/sklearn.py +++ b/python-package/xgboost/sklearn.py @@ -2108,7 +2108,7 @@ def apply( return super().apply(X, ntree_limit, iteration_range) def score(self, X: ArrayLike, y: ArrayLike) -> float: - """Evaluate score for data using the first evaluation metric. + """Evaluate score for data using the last evaluation metric. Parameters ---------- @@ -2126,6 +2126,11 @@ def score(self, X: ArrayLike, y: ArrayLike) -> float: """ X, qid = _get_qid(X, None) Xyq = DMatrix(X, y, qid=qid) - result_str = self.get_booster().eval(Xyq) + if callable(self.eval_metric): + metric = ltr_metric_decorator(self.eval_metric, self.n_jobs) + result_str = self.get_booster().eval_set([(Xyq, "eval")], feval=metric) + else: + result_str = self.get_booster().eval(Xyq) + metric_score = _parse_eval_str(result_str) - return metric_score[0][1] + return metric_score[-1][1] diff --git a/tests/python/test_with_sklearn.py b/tests/python/test_with_sklearn.py index 3cbeb2e10c7d..a745431f9276 100644 --- a/tests/python/test_with_sklearn.py +++ b/tests/python/test_with_sklearn.py @@ -3,7 +3,7 @@ import pickle import random import tempfile -from typing import Callable, Optional +from typing import Any, Callable, Optional import numpy as np import pytest @@ -185,6 +185,7 @@ def test_ranking_qid_df(): import pandas as pd import scipy.sparse from sklearn.model_selection import StratifiedGroupKFold, cross_val_score + from sklearn.metrics import mean_squared_error X, y, q, w = tm.make_ltr(n_samples=128, n_features=2, n_query_groups=8, max_rel=3) @@ -212,13 +213,22 @@ def test_ranking_qid_df(): results = cross_val_score(ranker, df, y, cv=kfold, groups=df.qid) assert len(results) == 5 + # Works with custom metric + def neg_mse(*args: Any, **kwargs: Any) -> float: + return -mean_squared_error(*args, **kwargs) + + ranker = xgb.XGBRanker(n_estimators=3, eval_metric=neg_mse) + ranker.fit(df, y, eval_set=[(valid_df, y)]) + score = ranker.score(valid_df, y) + assert np.isclose(score, ranker.evals_result()["validation_0"]["neg_mse"][-1]) + # Works with sparse data X_csr = scipy.sparse.csr_matrix(X) df = pd.DataFrame.sparse.from_spmatrix( X_csr, columns=[str(i) for i in range(X.shape[1])] ) df["qid"] = q - ranker = xgb.XGBRanker(n_estimators=3) + ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg") ranker.fit(df, y) s2 = ranker.score(df, y) assert np.isclose(s2, s)