From 9996295b1cb3106663795ae79dcdfc58cdc1a846 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Thu, 22 Feb 2024 13:22:07 +0100 Subject: [PATCH] fix: Revert "Allow passing in required project_name (#445)" (#467) * Revert "Allow passing in required project_name (#445)" This reverts commit ffe86a922549f30af6929187de666e21b1c8daec. * fix: Fail early when API params are not correctly passed to the evaluator doc: Update docstring to mention required API parameters * Add back dependencies required for integration testing --- .../evaluators/uptrain/evaluator.py | 18 ++++++------ integrations/uptrain/tests/test_evaluator.py | 29 ++++++++++++------- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py index af543d726..3699d50f6 100644 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py @@ -36,7 +36,6 @@ def __init__( api: str = "openai", api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"), api_params: Optional[Dict[str, Any]] = None, - project_name: Optional[str] = None, ): """ Construct a new UpTrain evaluator. @@ -53,8 +52,8 @@ def __init__( The API key to use. :param api_params: Additional parameters to pass to the API client. - :param project_name: - Name of the project required when using UpTrain API. + + Required parameters for the UpTrain API: `project_name`. """ self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) self.metric_params = metric_params @@ -62,7 +61,6 @@ def __init__( self.api = api self.api_key = api_key self.api_params = api_params - self.project_name = project_name self._init_backend() expected_inputs = self.descriptor.input_parameters @@ -94,7 +92,7 @@ def run(self, **inputs) -> Dict[str, Any]: :param inputs: The inputs to evaluate. These are determined by the - metric being calculated. See :class:`UpTrainMetric` for more + metric being calculated. See `UpTrainMetric` for more information. :returns: A nested list of metric results. Each input can have one or more @@ -116,7 +114,7 @@ def run(self, **inputs) -> Dict[str, Any]: if isinstance(self._backend_client, EvalLLM): results = self._backend_client.evaluate(**eval_args) else: - results = self._backend_client.log_and_evaluate(**eval_args, project_name=self.project_name) + results = self._backend_client.log_and_evaluate(**eval_args) OutputConverters.validate_outputs(results) converted_results = [ @@ -148,7 +146,6 @@ def check_serializable(obj: Any): api=self.api, api_key=self.api_key.to_dict(), api_params=self.api_params, - project_name=self.project_name, ) @classmethod @@ -197,9 +194,12 @@ def _init_backend(self): assert api_key is not None if self.api == "openai": backend_client = EvalLLM(openai_api_key=api_key) + if self.api_params is not None: + msg = "OpenAI API does not support additional parameters" + raise ValueError(msg) elif self.api == "uptrain": - if not self.project_name: - msg = "project_name not provided. UpTrain API requires a project name." + if self.api_params is None or "project_name" not in self.api_params: + msg = "UpTrain API requires a 'project_name' API parameter" raise ValueError(msg) backend_client = APIClient(uptrain_api_key=api_key) diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py index 1f9b245e3..d7566c795 100644 --- a/integrations/uptrain/tests/test_evaluator.py +++ b/integrations/uptrain/tests/test_evaluator.py @@ -115,10 +115,11 @@ def test_evaluator_api(monkeypatch): UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key=Secret.from_env_var("UPTRAIN_API_KEY"), - project_name="test", + api_params={"project_name": "test"}, ) assert eval.api == "uptrain" assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY") + assert eval.api_params == {"project_name": "test"} with pytest.raises(ValueError, match="Unsupported API"): UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere") @@ -126,6 +127,20 @@ def test_evaluator_api(monkeypatch): with pytest.raises(ValueError, match="None of the following authentication environment variables are set"): UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain", api_key=Secret.from_env_var("asd39920qqq")) + with pytest.raises(ValueError, match="does not support additional parameters"): + UpTrainEvaluator( + UpTrainMetric.CONTEXT_RELEVANCE, + api_params={"project_name": "test"}, + api="openai", + ) + + with pytest.raises(ValueError, match="requires .* API parameter"): + UpTrainEvaluator( + UpTrainMetric.CONTEXT_RELEVANCE, + api_params=None, + api="uptrain", + ) + def test_evaluator_metric_init_params(): eval = UpTrainEvaluator( @@ -158,8 +173,7 @@ def test_evaluator_serde(os_environ_get): "metric_params": {"method": "rouge"}, "api": "uptrain", "api_key": Secret.from_env_var("ENV_VAR", strict=False), - "api_params": {"eval_name": "test"}, - "project_name": "test", + "api_params": {"project_name": "test"}, } eval = UpTrainEvaluator(**init_params) serde_data = eval.to_dict() @@ -170,13 +184,12 @@ def test_evaluator_serde(os_environ_get): assert eval.api_key == new_eval.api_key assert eval.metric_params == new_eval.metric_params assert eval.api_params == new_eval.api_params - assert eval.project_name == new_eval.project_name assert type(new_eval._backend_client) == type(eval._backend_client) assert type(new_eval._backend_metric) == type(eval._backend_metric) with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"): init_params3 = copy.deepcopy(init_params) - init_params3["api_params"] = {"arg": Unserializable("")} + init_params3["api_params"] = {"arg": Unserializable(""), "project_name": "test"} eval = UpTrainEvaluator(**init_params3) eval.to_dict() @@ -205,10 +218,8 @@ def test_evaluator_valid_inputs(metric, inputs, params): init_params = { "metric": metric, "metric_params": params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) @@ -234,10 +245,8 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params): init_params = { "metric": metric, "metric_params": params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) @@ -311,10 +320,8 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): init_params = { "metric": metric, "metric_params": metric_params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric])