Skip to content

Commit

Permalink
Allow passing in required project_name (#445)
Browse files Browse the repository at this point in the history
* Allow passing in required project_name

UpTrain requires a project_name when using its API whereas openai does not. The integration will crash if no project_name is included.

This will allow for an eval declaration like so:

evaluator = UpTrainEvaluator(
    metric=UpTrainMetric.METRIC,
    api="uptrain",
    project_name="uptrain-project",
)

* comments + assert test

* Update integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py

* fix tests

---------

Co-authored-by: Massimiliano Pippi <[email protected]>
  • Loading branch information
lbux and masci authored Feb 22, 2024
1 parent 0ccf07a commit ffe86a9
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 3 deletions.
7 changes: 6 additions & 1 deletion integrations/uptrain/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,12 @@ classifiers = [
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = ["haystack-ai>=2.0.0b6", "uptrain>=0.5"]
dependencies = [
"haystack-ai>=2.0.0b6",
"uptrain>=0.5",
"nest_asyncio",
"litellm",
]

[project.urls]
Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def __init__(
api: str = "openai",
api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
api_params: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
):
"""
Construct a new UpTrain evaluator.
Expand All @@ -52,13 +53,16 @@ def __init__(
The API key to use.
:param api_params:
Additional parameters to pass to the API client.
:param project_name:
Name of the project required when using UpTrain API.
"""
self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric)
self.metric_params = metric_params
self.descriptor = METRIC_DESCRIPTORS[self.metric]
self.api = api
self.api_key = api_key
self.api_params = api_params
self.project_name = project_name

self._init_backend()
expected_inputs = self.descriptor.input_parameters
Expand Down Expand Up @@ -112,7 +116,7 @@ def run(self, **inputs) -> Dict[str, Any]:
if isinstance(self._backend_client, EvalLLM):
results = self._backend_client.evaluate(**eval_args)
else:
results = self._backend_client.log_and_evaluate(**eval_args)
results = self._backend_client.log_and_evaluate(**eval_args, project_name=self.project_name)

OutputConverters.validate_outputs(results)
converted_results = [
Expand Down Expand Up @@ -144,6 +148,7 @@ def check_serializable(obj: Any):
api=self.api,
api_key=self.api_key.to_dict(),
api_params=self.api_params,
project_name=self.project_name,
)

@classmethod
Expand Down Expand Up @@ -193,6 +198,9 @@ def _init_backend(self):
if self.api == "openai":
backend_client = EvalLLM(openai_api_key=api_key)
elif self.api == "uptrain":
if not self.project_name:
msg = "project_name not provided. UpTrain API requires a project name."
raise ValueError(msg)
backend_client = APIClient(uptrain_api_key=api_key)

self._backend_metric = backend_metric
Expand Down
11 changes: 10 additions & 1 deletion integrations/uptrain/tests/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,10 @@ def test_evaluator_api(monkeypatch):
assert eval.api_key == Secret.from_env_var("OPENAI_API_KEY")

eval = UpTrainEvaluator(
UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key=Secret.from_env_var("UPTRAIN_API_KEY")
UpTrainMetric.RESPONSE_COMPLETENESS,
api="uptrain",
api_key=Secret.from_env_var("UPTRAIN_API_KEY"),
project_name="test",
)
assert eval.api == "uptrain"
assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY")
Expand Down Expand Up @@ -156,6 +159,7 @@ def test_evaluator_serde(os_environ_get):
"api": "uptrain",
"api_key": Secret.from_env_var("ENV_VAR", strict=False),
"api_params": {"eval_name": "test"},
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
serde_data = eval.to_dict()
Expand All @@ -166,6 +170,7 @@ def test_evaluator_serde(os_environ_get):
assert eval.api_key == new_eval.api_key
assert eval.metric_params == new_eval.metric_params
assert eval.api_params == new_eval.api_params
assert eval.project_name == new_eval.project_name
assert type(new_eval._backend_client) == type(eval._backend_client)
assert type(new_eval._backend_metric) == type(eval._backend_metric)

Expand Down Expand Up @@ -203,6 +208,7 @@ def test_evaluator_valid_inputs(metric, inputs, params):
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand Down Expand Up @@ -231,6 +237,7 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params):
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand Down Expand Up @@ -307,6 +314,7 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params):
"api": "uptrain",
"api_key": Secret.from_token("Aaa"),
"api_params": None,
"project_name": "test",
}
eval = UpTrainEvaluator(**init_params)
eval._backend_client = MockBackend([metric])
Expand All @@ -326,6 +334,7 @@ def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params):
# This integration test validates the evaluator by running it against the
# OpenAI API. It is parameterized by the metric, the inputs to the evalutor
# and the metric parameters.
@pytest.mark.integration
@pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set")
@pytest.mark.parametrize(
"metric, inputs, metric_params",
Expand Down

0 comments on commit ffe86a9

Please sign in to comment.