From 0fa27a17545f0f5573294bf078d284318484287b Mon Sep 17 00:00:00 2001 From: shadeMe Date: Thu, 22 Feb 2024 10:44:09 +0100 Subject: [PATCH 1/3] Revert "Allow passing in required project_name (#445)" This reverts commit ffe86a922549f30af6929187de666e21b1c8daec. --- integrations/uptrain/pyproject.toml | 7 +------ .../components/evaluators/uptrain/evaluator.py | 10 +--------- integrations/uptrain/tests/test_evaluator.py | 11 +---------- 3 files changed, 3 insertions(+), 25 deletions(-) diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml index 0e9166adc..4bec65e2e 100644 --- a/integrations/uptrain/pyproject.toml +++ b/integrations/uptrain/pyproject.toml @@ -21,12 +21,7 @@ classifiers = [ "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] -dependencies = [ - "haystack-ai>=2.0.0b6", - "uptrain==0.5.0", - "nest_asyncio", - "litellm", -] +dependencies = ["haystack-ai>=2.0.0b6", "uptrain==0.5.0"] [project.urls] Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain" diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py index af543d726..4e7307602 100644 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py @@ -36,7 +36,6 @@ def __init__( api: str = "openai", api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"), api_params: Optional[Dict[str, Any]] = None, - project_name: Optional[str] = None, ): """ Construct a new UpTrain evaluator. @@ -53,8 +52,6 @@ def __init__( The API key to use. :param api_params: Additional parameters to pass to the API client. - :param project_name: - Name of the project required when using UpTrain API. """ self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) self.metric_params = metric_params @@ -62,7 +59,6 @@ def __init__( self.api = api self.api_key = api_key self.api_params = api_params - self.project_name = project_name self._init_backend() expected_inputs = self.descriptor.input_parameters @@ -116,7 +112,7 @@ def run(self, **inputs) -> Dict[str, Any]: if isinstance(self._backend_client, EvalLLM): results = self._backend_client.evaluate(**eval_args) else: - results = self._backend_client.log_and_evaluate(**eval_args, project_name=self.project_name) + results = self._backend_client.log_and_evaluate(**eval_args) OutputConverters.validate_outputs(results) converted_results = [ @@ -148,7 +144,6 @@ def check_serializable(obj: Any): api=self.api, api_key=self.api_key.to_dict(), api_params=self.api_params, - project_name=self.project_name, ) @classmethod @@ -198,9 +193,6 @@ def _init_backend(self): if self.api == "openai": backend_client = EvalLLM(openai_api_key=api_key) elif self.api == "uptrain": - if not self.project_name: - msg = "project_name not provided. UpTrain API requires a project name." - raise ValueError(msg) backend_client = APIClient(uptrain_api_key=api_key) self._backend_metric = backend_metric diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py index 1f9b245e3..0f2eaba4d 100644 --- a/integrations/uptrain/tests/test_evaluator.py +++ b/integrations/uptrain/tests/test_evaluator.py @@ -112,10 +112,7 @@ def test_evaluator_api(monkeypatch): assert eval.api_key == Secret.from_env_var("OPENAI_API_KEY") eval = UpTrainEvaluator( - UpTrainMetric.RESPONSE_COMPLETENESS, - api="uptrain", - api_key=Secret.from_env_var("UPTRAIN_API_KEY"), - project_name="test", + UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key=Secret.from_env_var("UPTRAIN_API_KEY") ) assert eval.api == "uptrain" assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY") @@ -159,7 +156,6 @@ def test_evaluator_serde(os_environ_get): "api": "uptrain", "api_key": Secret.from_env_var("ENV_VAR", strict=False), "api_params": {"eval_name": "test"}, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) serde_data = eval.to_dict() @@ -170,7 +166,6 @@ def test_evaluator_serde(os_environ_get): assert eval.api_key == new_eval.api_key assert eval.metric_params == new_eval.metric_params assert eval.api_params == new_eval.api_params - assert eval.project_name == new_eval.project_name assert type(new_eval._backend_client) == type(eval._backend_client) assert type(new_eval._backend_metric) == type(eval._backend_metric) @@ -208,7 +203,6 @@ def test_evaluator_valid_inputs(metric, inputs, params): "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) @@ -237,7 +231,6 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params): "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) @@ -314,7 +307,6 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, - "project_name": "test", } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) @@ -334,7 +326,6 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): # This integration test validates the evaluator by running it against the # OpenAI API. It is parameterized by the metric, the inputs to the evalutor # and the metric parameters. -@pytest.mark.integration @pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") @pytest.mark.parametrize( "metric, inputs, metric_params", From d65bd6ceaf424404c3d6c28fc7c08475fc400cd2 Mon Sep 17 00:00:00 2001 From: shadeMe Date: Thu, 22 Feb 2024 10:50:18 +0100 Subject: [PATCH 2/3] fix: Fail early when API params are not correctly passed to the evaluator doc: Update docstring to mention required API parameters --- .../evaluators/uptrain/evaluator.py | 10 ++++++- integrations/uptrain/tests/test_evaluator.py | 28 +++++++++++++++---- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py index 4e7307602..3699d50f6 100644 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py @@ -52,6 +52,8 @@ def __init__( The API key to use. :param api_params: Additional parameters to pass to the API client. + + Required parameters for the UpTrain API: `project_name`. """ self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) self.metric_params = metric_params @@ -90,7 +92,7 @@ def run(self, **inputs) -> Dict[str, Any]: :param inputs: The inputs to evaluate. These are determined by the - metric being calculated. See :class:`UpTrainMetric` for more + metric being calculated. See `UpTrainMetric` for more information. :returns: A nested list of metric results. Each input can have one or more @@ -192,7 +194,13 @@ def _init_backend(self): assert api_key is not None if self.api == "openai": backend_client = EvalLLM(openai_api_key=api_key) + if self.api_params is not None: + msg = "OpenAI API does not support additional parameters" + raise ValueError(msg) elif self.api == "uptrain": + if self.api_params is None or "project_name" not in self.api_params: + msg = "UpTrain API requires a 'project_name' API parameter" + raise ValueError(msg) backend_client = APIClient(uptrain_api_key=api_key) self._backend_metric = backend_metric diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py index 0f2eaba4d..d7566c795 100644 --- a/integrations/uptrain/tests/test_evaluator.py +++ b/integrations/uptrain/tests/test_evaluator.py @@ -112,10 +112,14 @@ def test_evaluator_api(monkeypatch): assert eval.api_key == Secret.from_env_var("OPENAI_API_KEY") eval = UpTrainEvaluator( - UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key=Secret.from_env_var("UPTRAIN_API_KEY") + UpTrainMetric.RESPONSE_COMPLETENESS, + api="uptrain", + api_key=Secret.from_env_var("UPTRAIN_API_KEY"), + api_params={"project_name": "test"}, ) assert eval.api == "uptrain" assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY") + assert eval.api_params == {"project_name": "test"} with pytest.raises(ValueError, match="Unsupported API"): UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere") @@ -123,6 +127,20 @@ def test_evaluator_api(monkeypatch): with pytest.raises(ValueError, match="None of the following authentication environment variables are set"): UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain", api_key=Secret.from_env_var("asd39920qqq")) + with pytest.raises(ValueError, match="does not support additional parameters"): + UpTrainEvaluator( + UpTrainMetric.CONTEXT_RELEVANCE, + api_params={"project_name": "test"}, + api="openai", + ) + + with pytest.raises(ValueError, match="requires .* API parameter"): + UpTrainEvaluator( + UpTrainMetric.CONTEXT_RELEVANCE, + api_params=None, + api="uptrain", + ) + def test_evaluator_metric_init_params(): eval = UpTrainEvaluator( @@ -155,7 +173,7 @@ def test_evaluator_serde(os_environ_get): "metric_params": {"method": "rouge"}, "api": "uptrain", "api_key": Secret.from_env_var("ENV_VAR", strict=False), - "api_params": {"eval_name": "test"}, + "api_params": {"project_name": "test"}, } eval = UpTrainEvaluator(**init_params) serde_data = eval.to_dict() @@ -171,7 +189,7 @@ def test_evaluator_serde(os_environ_get): with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"): init_params3 = copy.deepcopy(init_params) - init_params3["api_params"] = {"arg": Unserializable("")} + init_params3["api_params"] = {"arg": Unserializable(""), "project_name": "test"} eval = UpTrainEvaluator(**init_params3) eval.to_dict() @@ -200,7 +218,6 @@ def test_evaluator_valid_inputs(metric, inputs, params): init_params = { "metric": metric, "metric_params": params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, } @@ -228,7 +245,6 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params): init_params = { "metric": metric, "metric_params": params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, } @@ -304,7 +320,6 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): init_params = { "metric": metric, "metric_params": metric_params, - "api": "uptrain", "api_key": Secret.from_token("Aaa"), "api_params": None, } @@ -326,6 +341,7 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): # This integration test validates the evaluator by running it against the # OpenAI API. It is parameterized by the metric, the inputs to the evalutor # and the metric parameters. +@pytest.mark.integration @pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") @pytest.mark.parametrize( "metric, inputs, metric_params", From 3ce16ab0c10d578e85d45afd43f4223c770ba153 Mon Sep 17 00:00:00 2001 From: shadeMe Date: Thu, 22 Feb 2024 11:44:30 +0100 Subject: [PATCH 3/3] Add back dependencies required for integration testing --- integrations/uptrain/pyproject.toml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml index 4bec65e2e..0e9166adc 100644 --- a/integrations/uptrain/pyproject.toml +++ b/integrations/uptrain/pyproject.toml @@ -21,7 +21,12 @@ classifiers = [ "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] -dependencies = ["haystack-ai>=2.0.0b6", "uptrain==0.5.0"] +dependencies = [ + "haystack-ai>=2.0.0b6", + "uptrain==0.5.0", + "nest_asyncio", + "litellm", +] [project.urls] Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain"