From 68e7338ae161ac57ff1ff66976eaa12edc3052df Mon Sep 17 00:00:00 2001 From: shadeMe Date: Wed, 24 Jan 2024 17:22:30 +0100 Subject: [PATCH 1/4] feat: Implement `UpTrainEvaluator` and co. --- .github/labeler.yml | 5 + .github/workflows/uptrain.yml | 56 +++ integrations/uptrain/LICENSE.txt | 73 ++++ integrations/uptrain/README.md | 29 ++ integrations/uptrain/pyproject.toml | 147 +++++++ .../uptrain/src/uptrain_haystack/__init__.py | 3 + .../uptrain/src/uptrain_haystack/evaluator.py | 194 +++++++++ .../uptrain/src/uptrain_haystack/metrics.py | 336 ++++++++++++++++ integrations/uptrain/tests/__init__.py | 0 integrations/uptrain/tests/test_evaluator.py | 372 ++++++++++++++++++ integrations/uptrain/tests/test_metrics.py | 11 + 11 files changed, 1226 insertions(+) create mode 100644 .github/workflows/uptrain.yml create mode 100644 integrations/uptrain/LICENSE.txt create mode 100644 integrations/uptrain/README.md create mode 100644 integrations/uptrain/pyproject.toml create mode 100644 integrations/uptrain/src/uptrain_haystack/__init__.py create mode 100644 integrations/uptrain/src/uptrain_haystack/evaluator.py create mode 100644 integrations/uptrain/src/uptrain_haystack/metrics.py create mode 100644 integrations/uptrain/tests/__init__.py create mode 100644 integrations/uptrain/tests/test_evaluator.py create mode 100644 integrations/uptrain/tests/test_metrics.py diff --git a/.github/labeler.yml b/.github/labeler.yml index 93eba1d82..ba74c43a2 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -79,6 +79,11 @@ integration:unstructured-fileconverter: - any-glob-to-any-file: "integrations/unstructured/fileconverter/**/*" - any-glob-to-any-file: ".github/workflows/unstructured_fileconverter.yml" +integration:uptrain: + - changed-files: + - any-glob-to-any-file: "integrations/uptrain/**/*" + - any-glob-to-any-file: ".github/workflows/uptrain.yml" + integration:weaviate: - changed-files: - any-glob-to-any-file: "integrations/weaviate/**/*" diff --git a/.github/workflows/uptrain.yml b/.github/workflows/uptrain.yml new file mode 100644 index 000000000..bacfa27fb --- /dev/null +++ b/.github/workflows/uptrain.yml @@ -0,0 +1,56 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / uptrain + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/uptrain/**" + - ".github/workflows/uptrain.yml" + +defaults: + run: + working-directory: integrations/uptrain + +concurrency: + group: uptrain-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.10"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Run tests + run: hatch run cov diff --git a/integrations/uptrain/LICENSE.txt b/integrations/uptrain/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/uptrain/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/uptrain/README.md b/integrations/uptrain/README.md new file mode 100644 index 000000000..96caded09 --- /dev/null +++ b/integrations/uptrain/README.md @@ -0,0 +1,29 @@ +# uptrain-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) + +--- + +**Table of Contents** + +- [uptrain-haystack](#uptrain-haystack) + - [Installation](#installation) + - [Testing](#testing) + - [License](#license) + +## Installation + +```console +pip install uptrain-haystack +``` + +## Testing + +```console +hatch run test +``` + +## License + +`uptrain-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml new file mode 100644 index 000000000..7f4c4fd98 --- /dev/null +++ b/integrations/uptrain/pyproject.toml @@ -0,0 +1,147 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "uptrain-haystack" +dynamic = ["version"] +description = 'An integration of UpTrain LLM evaluation framework with Haystack' +readme = "README.md" +requires-python = ">=3.7" +license = "Apache-2.0" +keywords = [] +authors = [{ name = "deepset GmbH", email = "info@deepset.ai" }] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = ["haystack-ai", "uptrain>=0.5"] + +[project.urls] +Source = "https://github.com/deepset-ai/haystack-core-integrations" +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/uptrain/README.md" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/uptrain(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/uptrain[0-9]*"' + +[tool.hatch.envs.default] +dependencies = ["coverage[toml]>=6.5", "pytest"] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = ["- coverage combine", "coverage report"] +cov = ["test-cov", "cov-report"] + +[[tool.hatch.envs.all.matrix]] +python = ["3.7", "3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = ["black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive {args:src/uptrain_haystack tests}" +style = ["ruff {args:.}", "black --check --diff {args:.}"] +fmt = ["black {args:.}", "ruff --fix {args:.}", "style"] +all = ["style", "typing"] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", + "S106", + "S107", + # Ignore complexity + "C901", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + # Misc + "S101", +] +unfixable = [ + # Don't touch unused imports + "F401", +] +extend-exclude = ["tests"] + +[tool.ruff.isort] +known-first-party = ["uptrain_haystack"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["uptrain_haystack", "tests"] +branch = true +parallel = true +omit = ["src/uptrain_haystack/__about__.py"] + +[tool.coverage.paths] +uptrain_haystack = [ + "src/uptrain_haystack", + "*/uptrain-haystack/src/uptrain_haystack", +] +tests = ["tests", "*uptrain-haystack/tests"] + +[tool.coverage.report] +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[[tool.mypy.overrides]] +module = ["haystack.*", "pytest.*", "uptrain.*", "numpy", "grpc"] +ignore_missing_imports = true diff --git a/integrations/uptrain/src/uptrain_haystack/__init__.py b/integrations/uptrain/src/uptrain_haystack/__init__.py new file mode 100644 index 000000000..14d8700fe --- /dev/null +++ b/integrations/uptrain/src/uptrain_haystack/__init__.py @@ -0,0 +1,3 @@ +from uptrain_haystack.evaluator import UpTrainEvaluator, UpTrainEvaluatorOutput, UpTrainMetric + +__all__ = ("UpTrainEvaluator", "UpTrainMetric", "UpTrainEvaluatorOutput") diff --git a/integrations/uptrain/src/uptrain_haystack/evaluator.py b/integrations/uptrain/src/uptrain_haystack/evaluator.py new file mode 100644 index 000000000..a0e4f7f45 --- /dev/null +++ b/integrations/uptrain/src/uptrain_haystack/evaluator.py @@ -0,0 +1,194 @@ +import json +import os +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union + +from haystack import DeserializationError, component, default_from_dict, default_to_dict +from uptrain import APIClient, EvalLLM, Evals +from uptrain.framework.evals import ParametricEval + +from uptrain_haystack.metrics import ( + METRIC_DESCRIPTORS, + InputConverters, + OutputConverters, + UpTrainMetric, + UpTrainMetricResult, +) + + +@component +class UpTrainEvaluator: + """ + A component that uses the UpTrain framework to evaluate inputs against a specific metric. + + The supported metrics are defined by :class:`UpTrainMetric`. The inputs of the component + metric-dependent. The output is a list of :class:`UpTrainEvaluatorOutput` objects, each + containing a single input and the result of the evaluation performed on it. + """ + + _backend_metric: Union[Evals, ParametricEval] + _backend_client: Union[APIClient, EvalLLM] + + def __init__( + self, + metric: Union[str, UpTrainMetric], + metric_params: Optional[Dict[str, Any]] = None, + *, + api: str = "openai", + api_key_env_var: Optional[str] = "OPENAI_API_KEY", + api_params: Optional[Dict[str, Any]] = None, + ): + """ + Construct a new UpTrain evaluator. + + :param metric: + The metric to use for evaluation. + :param metric_params: + Parameters to pass to the metric's constructor. + :param api: + The API to use for evaluation. + + Supported APIs: "openai", "uptrain". + :param api_key_env_var: + The name of the environment variable containing the API key. + :param api_params: + Additional parameters to pass to the API client. + """ + self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) + self.metric_params = metric_params + self.descriptor = METRIC_DESCRIPTORS[self.metric] + self.api = api + self.api_key_env_var = api_key_env_var + self.api_params = api_params + + self._init_backend() + expected_inputs = self.descriptor.input_parameters + component.set_input_types(self, **expected_inputs) + + @component.output_types(output=List["UpTrainEvaluatorOutput"]) + def run(self, **inputs) -> Dict[str, Any]: + """ + Run the UpTrain evaluator. + + :param inputs: + The inputs to evaluate. Match the input parameters of the metric. + :returns: + A list of :class:`UpTrainEvaluatorOutput` objects, each containing + a single input and the result of the evaluation performed on it. + """ + # The backend requires random access to the data, so we can't stream it. + InputConverters.validate_input_parameters(self.metric, self.descriptor.input_parameters, inputs) + converted_inputs: List[Dict[str, str]] = list(self.descriptor.input_converter(**inputs)) # type: ignore + + eval_args = {"data": converted_inputs, "checks": [self._backend_metric]} + if self.api_params is not None: + eval_args.update({k: v for k, v in self.api_params.items() if k not in eval_args}) + + if isinstance(self._backend_client, EvalLLM): + results = self._backend_client.evaluate(**eval_args) + else: + results = self._backend_client.log_and_evaluate(**eval_args) + + OutputConverters.validate_outputs(results) + converted_results = [self.descriptor.output_converter(x, self.metric_params) for x in results] + output = [ + UpTrainEvaluatorOutput(input_item, result_item) + for input_item, result_item in zip(converted_inputs, converted_results) # type: ignore + ] + + return {"output": output} + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize this component to a dictionary. + """ + + def check_serializable(obj: Any): + try: + json.dumps(obj) + return True + except (TypeError, OverflowError): + return False + + if not check_serializable(self.api_params) or not check_serializable(self.metric_params): + msg = "UpTrain evaluator cannot serialize the API/metric parameters" + raise DeserializationError(msg) + + return default_to_dict( + self, + metric=self.metric, + metric_params=self.metric_params, + api=self.api, + api_key_env_var=self.api_key_env_var, + api_params=self.api_params, + ) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "UpTrainEvaluator": + """ + Deserialize a component from a dictionary. + + :param data: + The dictionary to deserialize from. + """ + return default_from_dict(cls, data) + + def _init_backend(self): + """ + Initialize the UpTrain backend. + """ + if isinstance(self.descriptor.backend, Evals): + if self.metric_params is not None: + msg = ( + f"Uptrain metric '{self.metric}' received the following unexpected init parameters:" + f"{self.metric_params}" + ) + raise ValueError(msg) + backend_metric = self.descriptor.backend + else: + assert issubclass(self.descriptor.backend, ParametricEval) + if self.metric_params is None: + msg = f"Uptrain metric '{self.metric}' expected init parameters but got none" + raise ValueError(msg) + elif not all(k in self.descriptor.init_parameters for k in self.metric_params.keys()): + msg = ( + f"Invalid init parameters for UpTrain metric '{self.metric}'. " + f"Expected: {list(self.descriptor.init_parameters.keys())}" + ) + + raise ValueError(msg) + backend_metric = self.descriptor.backend(**self.metric_params) + + supported_apis = ("openai", "uptrain") + if self.api not in supported_apis: + msg = f"Unsupported API '{self.api}' for UpTrain evaluator. Supported APIs: {supported_apis}" + raise ValueError(msg) + + api_key = os.environ.get(self.api_key_env_var) + if api_key is None: + msg = f"Missing API key environment variable '{self.api_key_env_var}' for UpTrain evaluator" + raise ValueError(msg) + + if self.api == "openai": + backend_client = EvalLLM(openai_api_key=api_key) + elif self.api == "uptrain": + backend_client = APIClient(uptrain_api_key=api_key) + + self._backend_metric = backend_metric + self._backend_client = backend_client + + +@dataclass(frozen=True) +class UpTrainEvaluatorOutput: + """ + Output of the UpTrain evaluator component. + + :param input: + The input that was evaluated. + :param result: + The result of the evaluation. Can contain + multiple results depending on the metric. + """ + + input: Dict[str, Any] + result: List[UpTrainMetricResult] diff --git a/integrations/uptrain/src/uptrain_haystack/metrics.py b/integrations/uptrain/src/uptrain_haystack/metrics.py new file mode 100644 index 000000000..ac1f6b2ad --- /dev/null +++ b/integrations/uptrain/src/uptrain_haystack/metrics.py @@ -0,0 +1,336 @@ +import inspect +from dataclasses import dataclass +from enum import Enum +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Type, Union + +from uptrain import CritiqueTone, Evals, GuidelineAdherence, ResponseMatching +from uptrain.framework.evals import ParametricEval + + +class UpTrainMetric(Enum): + """ + Metrics supported by UpTrain. + """ + + CONTEXT_RELEVANCE = "context_relevance" + FACTUAL_ACCURACY = "factual_accuracy" + RESPONSE_RELEVANCE = "response_relevance" + RESPONSE_COMPLETENESS = "response_completeness" + RESPONSE_COMPLETENESS_WRT_CONTEXT = "response_completeness_wrt_context" + RESPONSE_CONSISTENCY = "response_consistency" + RESPONSE_CONCISENESS = "response_conciseness" + CRITIQUE_LANGUAGE = "critique_language" + CRITIQUE_TONE = "critique_tone" + GUIDELINE_ADHERENCE = "guideline_adherence" + RESPONSE_MATCHING = "response_matching" + + def __str__(self): + return self.value + + @classmethod + def from_str(cls, string: str) -> "UpTrainMetric": + """ + Create a metric type from a string. + + :param string: + The string to convert. + :returns: + The metric. + """ + enum_map = {e.value: e for e in UpTrainMetric} + metric = enum_map.get(string) + if metric is None: + msg = f"Unknown UpTrain metric '{string}'. Supported metrics: {list(enum_map.keys())}" + raise ValueError(msg) + return metric + + +@dataclass(frozen=True) +class UpTrainMetricResult: + """ + Result of a metric evaluation. + + :param name: + The name of the metric. + :param score: + The score of the metric. + :param explanation: + An optional explanation of the metric. + """ + + name: str + score: float + explanation: Optional[str] = None + + +@dataclass(frozen=True) +class MetricDescriptor: + """ + Descriptor for a metric. + + :param metric: + The metric. + :param backend: + The associated UpTrain metric class. + :param input_parameters: + Parameters accepted by the metric. This is used + to set the input types of the evaluator component. + :param input_converter: + Callable that converts input parameters to the UpTrain input format. + :param output_converter: + Callable that converts the UpTrain output format to our output format. + :param init_parameters: + Additional parameters that need to be passed to the metric class during initialization. + """ + + metric: UpTrainMetric + backend: Union[Evals, Type[ParametricEval]] + input_parameters: Dict[str, Type] + input_converter: Callable[[Any], Iterable[Dict[str, str]]] + output_converter: Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]] + init_parameters: Optional[Dict[str, Type[Any]]] = None + + @classmethod + def new( + cls, + metric: UpTrainMetric, + backend: Union[Evals, Type[ParametricEval]], + input_converter: Callable[[Any], Iterable[Dict[str, str]]], + output_converter: Optional[ + Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]] + ] = None, + *, + init_parameters: Optional[Dict[str, Type]] = None, + ) -> "MetricDescriptor": + input_converter_signature = inspect.signature(input_converter) + input_parameters = {} + for name, param in input_converter_signature.parameters.items(): + if name in ("cls", "self"): + continue + elif param.kind not in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): + continue + input_parameters[name] = param.annotation + + return cls( + metric=metric, + backend=backend, + input_parameters=input_parameters, + input_converter=input_converter, + output_converter=output_converter if output_converter is not None else OutputConverters.default(metric), + init_parameters=init_parameters, + ) + + +class InputConverters: + """ + Converters for input parameters. + + The signature of the converter functions serves as the ground-truth of the + expected input parameters of a given metric. They are also responsible for validating + the input parameters and converting them to the format expected by UpTrain. + """ + + @staticmethod + def _validate_input_elements(**kwargs): + for k, collection in kwargs.items(): + if not isinstance(collection, list): + msg = ( + f"UpTrain evaluator expected input '{k}' to be a collection of type 'list', " + f"got '{type(collection).__name__}' instead" + ) + raise ValueError(msg) + elif not all(isinstance(x, str) for x in collection): + msg = f"UpTrain evaluator expects inputs to be of type 'str' in '{k}'" + raise ValueError(msg) + + same_length = len({len(x) for x in kwargs.values()}) == 1 + if not same_length: + msg = f"Mismatching counts in the following inputs: {({k: len(v) for k, v in kwargs.items()})}" + raise ValueError(msg) + + @staticmethod + def validate_input_parameters(metric: UpTrainMetric, expected: Dict[str, Any], received: Dict[str, Any]): + for param, _ in expected.items(): + if param not in received: + msg = f"UpTrain evaluator expected input parameter '{param}' for metric '{metric}'" + raise ValueError(msg) + + @staticmethod + def question_context_response( + questions: List[str], contexts: List[str], responses: List[str] + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts, responses=responses) + for q, c, r in zip(questions, contexts, responses): # type: ignore + yield {"question": q, "context": c, "response": r} + + @staticmethod + def question_context( + questions: List[str], + contexts: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts) + for q, c in zip(questions, contexts): # type: ignore + yield {"question": q, "context": c} + + @staticmethod + def question_response( + questions: List[str], + responses: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, responses=responses) + for q, r in zip(questions, responses): # type: ignore + yield {"question": q, "response": r} + + @staticmethod + def response( + responses: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(responses=responses) + for r in responses: + yield {"response": r} + + @staticmethod + def response_ground_truth( + responses: List[str], + ground_truths: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(ground_truths=ground_truths, responses=responses) + for r, gt in zip(responses, ground_truths): # type: ignore + yield {"response": r, "ground_truth": gt} + + +class OutputConverters: + """ + Converters for results returned by UpTrain. + + They are responsible for converting the results to our output format. + """ + + @staticmethod + def validate_outputs(outputs: List[Dict[str, Any]]): + msg = None + if not isinstance(outputs, list): + msg = f"Expected response from UpTrain evaluator to be a 'list', got '{type(outputs).__name__}'" + elif not all(isinstance(x, dict) for x in outputs): + msg = "UpTrain evaluator expects outputs to be a list of `dict`s" + elif not all(isinstance(y, str) for x in outputs for y in x.keys()): + msg = "UpTrain evaluator expects keys in the output dicts to be `str`" + elif not all(isinstance(y, (float, str)) for x in outputs for y in x.values()): + msg = "UpTrain evaluator expects values in the output dicts to be either `str` or `float`" + + if msg is not None: + raise ValueError(msg) + + @staticmethod + def _extract_default_results(output: Dict[str, Any], metric_name: str) -> UpTrainMetricResult: + try: + score_key = f"score_{metric_name}" + explanation_key = f"explanation_{metric_name}" + return UpTrainMetricResult( + name=metric_name, score=output[score_key], explanation=output.get(explanation_key) + ) + except KeyError as e: + msg = f"UpTrain evaluator did not return an expected output for metric '{metric_name}'" + raise ValueError(msg) from e + + @staticmethod + def default( + metric: UpTrainMetric, + ) -> Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]]: + def inner( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]], metric: UpTrainMetric # noqa: ARG001 + ) -> List[UpTrainMetricResult]: + return [OutputConverters._extract_default_results(output, str(metric))] + + return partial(inner, metric=metric) + + @staticmethod + def critique_language( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[UpTrainMetricResult]: + out = [] + for expected_key in ("fluency", "coherence", "grammar", "politeness"): + out.append(OutputConverters._extract_default_results(output, expected_key)) + return out + + @staticmethod + def critique_tone( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[UpTrainMetricResult]: + return [OutputConverters._extract_default_results(output, "tone")] + + @staticmethod + def guideline_adherence( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] + ) -> List[UpTrainMetricResult]: + assert metric_params is not None + return [OutputConverters._extract_default_results(output, f'{metric_params["guideline_name"]}_adherence')] + + @staticmethod + def response_matching( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[UpTrainMetricResult]: + metric_str = "response_match" + out = [OutputConverters._extract_default_results(output, metric_str)] + + # Enumerate other relevant keys. + score_key = f"score_{metric_str}" + for k, v in output.items(): + if k != score_key and metric_str in k and isinstance(v, float): + out.append(UpTrainMetricResult(name=k, score=v)) + return out + + +METRIC_DESCRIPTORS = { + UpTrainMetric.CONTEXT_RELEVANCE: MetricDescriptor.new( + UpTrainMetric.CONTEXT_RELEVANCE, Evals.CONTEXT_RELEVANCE, InputConverters.question_context # type: ignore + ), + UpTrainMetric.FACTUAL_ACCURACY: MetricDescriptor.new( + UpTrainMetric.FACTUAL_ACCURACY, Evals.FACTUAL_ACCURACY, InputConverters.question_context_response # type: ignore + ), + UpTrainMetric.RESPONSE_RELEVANCE: MetricDescriptor.new( + UpTrainMetric.RESPONSE_RELEVANCE, Evals.RESPONSE_RELEVANCE, InputConverters.question_response # type: ignore + ), + UpTrainMetric.RESPONSE_COMPLETENESS: MetricDescriptor.new( + UpTrainMetric.RESPONSE_COMPLETENESS, Evals.RESPONSE_COMPLETENESS, InputConverters.question_response # type: ignore + ), + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: MetricDescriptor.new( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + Evals.RESPONSE_COMPLETENESS_WRT_CONTEXT, + InputConverters.question_context_response, # type: ignore + ), + UpTrainMetric.RESPONSE_CONSISTENCY: MetricDescriptor.new( + UpTrainMetric.RESPONSE_CONSISTENCY, Evals.RESPONSE_CONSISTENCY, InputConverters.question_context_response # type: ignore + ), + UpTrainMetric.RESPONSE_CONCISENESS: MetricDescriptor.new( + UpTrainMetric.RESPONSE_CONCISENESS, Evals.RESPONSE_CONCISENESS, InputConverters.question_response # type: ignore + ), + UpTrainMetric.CRITIQUE_LANGUAGE: MetricDescriptor.new( + UpTrainMetric.CRITIQUE_LANGUAGE, + Evals.CRITIQUE_LANGUAGE, + InputConverters.response, + OutputConverters.critique_language, + ), + UpTrainMetric.CRITIQUE_TONE: MetricDescriptor.new( + UpTrainMetric.CRITIQUE_TONE, + CritiqueTone, + InputConverters.response, + OutputConverters.critique_tone, + init_parameters={"llm_persona": str}, + ), + UpTrainMetric.GUIDELINE_ADHERENCE: MetricDescriptor.new( + UpTrainMetric.GUIDELINE_ADHERENCE, + GuidelineAdherence, + InputConverters.question_response, # type: ignore + OutputConverters.guideline_adherence, + init_parameters={"guideline": str, "guideline_name": str, "response_schema": Optional[str]}, # type: ignore + ), + UpTrainMetric.RESPONSE_MATCHING: MetricDescriptor.new( + UpTrainMetric.RESPONSE_MATCHING, + ResponseMatching, + InputConverters.response_ground_truth, # type: ignore + OutputConverters.response_matching, + init_parameters={"method": Optional[str]}, # type: ignore + ), +} diff --git a/integrations/uptrain/tests/__init__.py b/integrations/uptrain/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py new file mode 100644 index 000000000..14101c5f4 --- /dev/null +++ b/integrations/uptrain/tests/test_evaluator.py @@ -0,0 +1,372 @@ +import copy +import os +from dataclasses import dataclass +from typing import List +from unittest.mock import patch + +import pytest +from haystack import DeserializationError + +from uptrain_haystack import UpTrainEvaluator, UpTrainMetric + +DEFAULT_QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +DEFAULT_CONTEXTS = [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.", +] +DEFAULT_RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] + + +@dataclass(frozen=True) +class Unserializable: + something: str + + +class MockBackend: + def __init__(self, metric_outputs: List[UpTrainMetric]) -> None: + self.metrics = metric_outputs + if not self.metrics: + self.metrics = [e for e in UpTrainMetric] + + def log_and_evaluate(self, data, checks, **kwargs): + output_map = { + UpTrainMetric.CONTEXT_RELEVANCE: { + "score_context_relevance": 0.5, + "explanation_context_relevance": "1", + }, + UpTrainMetric.FACTUAL_ACCURACY: { + "score_factual_accuracy": 1.0, + "explanation_factual_accuracy": "2", + }, + UpTrainMetric.RESPONSE_RELEVANCE: { + "score_response_relevance": 1.0, + "explanation_response_relevance": "3", + }, + UpTrainMetric.RESPONSE_COMPLETENESS: { + "score_response_completeness": 0.5, + "explanation_response_completeness": "4", + }, + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: { + "score_response_completeness_wrt_context": 1.0, + "explanation_response_completeness_wrt_context": "5", + }, + UpTrainMetric.RESPONSE_CONSISTENCY: { + "score_response_consistency": 0.9, + "explanation_response_consistency": "6", + }, + UpTrainMetric.RESPONSE_CONCISENESS: { + "score_response_conciseness": 1.0, + "explanation_response_conciseness": "7", + }, + UpTrainMetric.CRITIQUE_LANGUAGE: { + "score_fluency": 1.0, + "score_coherence": 1.0, + "score_grammar": 1.0, + "score_politeness": 1.0, + "explanation_fluency": "8", + "explanation_coherence": "9", + "explanation_grammar": "10", + "explanation_politeness": "11", + }, + UpTrainMetric.CRITIQUE_TONE: { + "score_tone": 0.4, + "explanation_tone": "12", + }, + UpTrainMetric.GUIDELINE_ADHERENCE: { + "score_guideline_adherence": 1.0, + "explanation_guideline_adherence": "13", + }, + UpTrainMetric.RESPONSE_MATCHING: { + "response_match_precision": 1.0, + "response_match_recall": 0.6666666666666666, + "score_response_match": 0.7272727272727273, + }, + } + + data = copy.deepcopy(data) + for x in data: + for m in self.metrics: + x.update(output_map[m]) + return data + + +@patch("os.environ.get") +def test_evaluator_api(os_environ_get): + api_key_var = "test-api-key" + os_environ_get.return_value = api_key_var + + eval = UpTrainEvaluator(UpTrainMetric.RESPONSE_COMPLETENESS) + assert eval.api == "openai" + assert eval.api_key_env_var == "OPENAI_API_KEY" + + eval = UpTrainEvaluator(UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key_env_var="UPTRAIN_API_KEY") + assert eval.api == "uptrain" + assert eval.api_key_env_var == "UPTRAIN_API_KEY" + + with pytest.raises(ValueError, match="Unsupported API"): + UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere") + + os_environ_get.return_value = None + with pytest.raises(ValueError, match="Missing API key"): + UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain") + + +@patch("os.environ.get") +def test_evaluator_metric_init_params(os_environ_get): + api_key = "test-api-key" + os_environ_get.return_value = api_key + + eval = UpTrainEvaluator(UpTrainMetric.CRITIQUE_TONE, metric_params={"llm_persona": "village idiot"}) + assert eval._backend_metric.llm_persona == "village idiot" + + with pytest.raises(ValueError, match="Invalid init parameters"): + UpTrainEvaluator(UpTrainMetric.CRITIQUE_TONE, metric_params={"role": "village idiot"}) + + with pytest.raises(ValueError, match="unexpected init parameters"): + UpTrainEvaluator(UpTrainMetric.FACTUAL_ACCURACY, metric_params={"check_numbers": True}) + + with pytest.raises(ValueError, match="expected init parameters"): + UpTrainEvaluator(UpTrainMetric.RESPONSE_MATCHING) + + +@patch("os.environ.get") +def test_evaluator_serde(os_environ_get): + os_environ_get.return_value = "abacab" + + init_params = { + "metric": UpTrainMetric.RESPONSE_MATCHING, + "metric_params": {"method": "rouge"}, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": {"eval_name": "test"}, + } + eval = UpTrainEvaluator(**init_params) + serde_data = eval.to_dict() + new_eval = UpTrainEvaluator.from_dict(serde_data) + + assert eval.metric == new_eval.metric + assert eval.api == new_eval.api + assert eval.api_key_env_var == new_eval.api_key_env_var + assert eval.metric_params == new_eval.metric_params + assert eval.api_params == new_eval.api_params + assert type(new_eval._backend_client) == type(eval._backend_client) + assert type(new_eval._backend_metric) == type(eval._backend_metric) + + with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"): + init_params3 = copy.deepcopy(init_params) + init_params3["api_params"] = {"arg": Unserializable("")} + eval = UpTrainEvaluator(**init_params3) + eval.to_dict() + + +@pytest.mark.parametrize( + "metric, inputs, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": [], "contexts": []}, None), + (UpTrainMetric.FACTUAL_ACCURACY, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_CONSISTENCY, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": [], "responses": []}, None), + (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": []}, None), + (UpTrainMetric.CRITIQUE_TONE, {"responses": []}, {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": [], "responses": []}, + {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, + ), + (UpTrainMetric.RESPONSE_MATCHING, {"ground_truths": [], "responses": []}, {"method": "llm"}), + ], +) +@patch("os.environ.get") +def test_evaluator_valid_inputs(os_environ_get, metric, inputs, params): + os_environ_get.return_value = "abacab" + init_params = { + "metric": metric, + "metric_params": params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + output = eval.run(**inputs) + + +@pytest.mark.parametrize( + "metric, inputs, error_string, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": {}, "contexts": []}, "to be a collection of type 'list'", None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": [1], "contexts": [2], "responses": [3]}, + "expects inputs to be of type 'str'", + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [""], "responses": []}, "Mismatching counts ", None), + (UpTrainMetric.RESPONSE_RELEVANCE, {"responses": []}, "expected input parameter ", None), + ], +) +@patch("os.environ.get") +def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, params): + os_environ_get.return_value = "abacab" + with pytest.raises(ValueError, match=error_string): + init_params = { + "metric": metric, + "metric_params": params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + output = eval.run(**inputs) + + +@pytest.mark.parametrize( + "metric, inputs, outputs, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": ["q1"], "contexts": ["c1"]}, [[(None, 0.5, "1")]], None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": ["q2"], "contexts": ["c2"], "responses": ["r2"]}, + [[(None, 1.0, "2")]], + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": ["q3"], "responses": ["r3"]}, [[(None, 1.0, "3")]], None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": ["q4"], "responses": ["r4"]}, [[(None, 0.5, "4")]], None), + ( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + {"questions": ["q5"], "contexts": ["c5"], "responses": ["r5"]}, + [[(None, 1.0, "5")]], + None, + ), + ( + UpTrainMetric.RESPONSE_CONSISTENCY, + {"questions": ["q6"], "contexts": ["c6"], "responses": ["r6"]}, + [[(None, 0.9, "6")]], + None, + ), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": ["q7"], "responses": ["r7"]}, [[(None, 1.0, "7")]], None), + ( + UpTrainMetric.CRITIQUE_LANGUAGE, + {"responses": ["r8"]}, + [ + [ + ("fluency", 1.0, "8"), + ("coherence", 1.0, "9"), + ("grammar", 1.0, "10"), + ("politeness", 1.0, "11"), + ] + ], + None, + ), + (UpTrainMetric.CRITIQUE_TONE, {"responses": ["r9"]}, [[("tone", 0.4, "12")]], {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": ["q10"], "responses": ["r10"]}, + [[(None, 1.0, "13")]], + {"guideline": "Do nothing", "guideline_name": "guideline", "response_schema": None}, + ), + ( + UpTrainMetric.RESPONSE_MATCHING, + {"ground_truths": ["g11"], "responses": ["r11"]}, + [ + [ + ("response_match_precision", 1.0, None), + ("response_match_recall", 0.6666666666666666, None), + ("response_match", 0.7272727272727273, None), + ] + ], + {"method": "llm"}, + ), + ], +) +@patch("os.environ.get") +def test_evaluator_outputs(os_environ_get, metric, inputs, outputs, params): + os_environ_get.return_value = "abacab" + init_params = { + "metric": metric, + "metric_params": params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + results = eval.run(**inputs)["output"] + + assert type(results) == type(outputs) + assert len(results) == len(outputs) + + for r, o in zip(results, outputs): + assert len(r.result) == len(o) + + expected = {(name if name is not None else str(metric), score, exp) for name, score, exp in o} + got = {(x.name, x.score, x.explanation) for x in r.result} + assert got == expected + + +@pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") +@pytest.mark.parametrize( + "metric, inputs, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS}, None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + ( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + ( + UpTrainMetric.RESPONSE_CONSISTENCY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.CRITIQUE_TONE, {"responses": DEFAULT_RESPONSES}, {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, + {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, + ), + ( + UpTrainMetric.RESPONSE_MATCHING, + { + "ground_truths": [ + "Consumerism is the most popular sport in the world", + "Python language was created by some dude.", + ], + "responses": DEFAULT_RESPONSES, + }, + {"method": "llm"}, + ), + ], +) +def test_integration_run(metric, inputs, params): + init_params = { + "metric": metric, + "metric_params": params, + "api": "openai", + } + eval = UpTrainEvaluator(**init_params) + output = eval.run(**inputs) + + assert type(output) == dict + assert len(output) == 1 + assert "output" in output + assert len(output["output"]) == len(next(iter(inputs.values()))) diff --git a/integrations/uptrain/tests/test_metrics.py b/integrations/uptrain/tests/test_metrics.py new file mode 100644 index 000000000..1dddb651e --- /dev/null +++ b/integrations/uptrain/tests/test_metrics.py @@ -0,0 +1,11 @@ +import pytest + +from uptrain_haystack import UpTrainMetric + + +def test_uptrain_metric(): + for e in UpTrainMetric: + assert e == UpTrainMetric.from_str(e.value) + + with pytest.raises(ValueError, match="Unknown UpTrain metric"): + UpTrainMetric.from_str("smugness") From 554bd1884b92c8d1714d010aea7b6e09c02fc967 Mon Sep 17 00:00:00 2001 From: shadeMe Date: Fri, 26 Jan 2024 14:24:44 +0100 Subject: [PATCH 2/4] Address review comments Update project structure to use the `haystack_integrations` namespace --- integrations/uptrain/README.md | 7 ++ integrations/uptrain/example/example.py | 32 +++++++++ integrations/uptrain/pyproject.toml | 28 +++++--- .../components/evaluators/__init__.py | 7 ++ .../components/evaluators}/evaluator.py | 67 ++++++++++--------- .../components/evaluators}/metrics.py | 66 +++++++++++++----- .../uptrain/src/uptrain_haystack/__init__.py | 3 - integrations/uptrain/tests/test_evaluator.py | 38 ++++++----- integrations/uptrain/tests/test_metrics.py | 2 +- 9 files changed, 173 insertions(+), 77 deletions(-) create mode 100644 integrations/uptrain/example/example.py create mode 100644 integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py rename integrations/uptrain/src/{uptrain_haystack => haystack_integrations/components/evaluators}/evaluator.py (79%) rename integrations/uptrain/src/{uptrain_haystack => haystack_integrations/components/evaluators}/metrics.py (87%) delete mode 100644 integrations/uptrain/src/uptrain_haystack/__init__.py diff --git a/integrations/uptrain/README.md b/integrations/uptrain/README.md index 96caded09..6d7605306 100644 --- a/integrations/uptrain/README.md +++ b/integrations/uptrain/README.md @@ -10,6 +10,7 @@ - [uptrain-haystack](#uptrain-haystack) - [Installation](#installation) - [Testing](#testing) + - [Examples](#examples) - [License](#license) ## Installation @@ -18,12 +19,18 @@ pip install uptrain-haystack ``` +For more information about the UpTrain evaluation framework, please refer to their [documentation](https://docs.uptrain.ai/getting-started/introduction). + ## Testing ```console hatch run test ``` +## Examples + +You can find a code example showing how to use the Evaluator under the `example/` folder of this repo. + ## License `uptrain-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/uptrain/example/example.py b/integrations/uptrain/example/example.py new file mode 100644 index 000000000..b029b9a65 --- /dev/null +++ b/integrations/uptrain/example/example.py @@ -0,0 +1,32 @@ +# A valid OpenAI API key is required to run this example. + +from haystack import Pipeline +from haystack_integrations.components.evaluators import UpTrainEvaluator, UpTrainMetric + +QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +CONTEXTS = [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.", +] +RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] + +pipeline = Pipeline() +evaluator = UpTrainEvaluator( + metric=UpTrainMetric.FACTUAL_ACCURACY, + api="openai", + api_key_env_var="OPENAI_API_KEY", +) +pipeline.add_component("evaluator", evaluator) + +# Each metric expects a specific set of parameters as input. Refer to the +# UpTrainMetric class' documentation for more details. +output = pipeline.run({"evaluator": {"questions": QUESTIONS, "contexts": CONTEXTS, "responses": RESPONSES}}) + +for output in output["evaluator"]["results"]: + print(output) diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml index 7f4c4fd98..631b7dab8 100644 --- a/integrations/uptrain/pyproject.toml +++ b/integrations/uptrain/pyproject.toml @@ -25,10 +25,13 @@ classifiers = [ dependencies = ["haystack-ai", "uptrain>=0.5"] [project.urls] -Source = "https://github.com/deepset-ai/haystack-core-integrations" +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain" Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/uptrain/README.md" Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/uptrain(?P.*)' @@ -52,7 +55,7 @@ python = ["3.7", "3.8", "3.9", "3.10", "3.11"] detached = true dependencies = ["black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/uptrain_haystack tests}" +typing = "mypy --install-types --non-interactive {args:src/}" style = ["ruff {args:.}", "black --check --diff {args:.}"] fmt = ["black {args:.}", "ruff --fix {args:.}", "style"] all = ["style", "typing"] @@ -109,15 +112,16 @@ ignore = [ "PLR0915", # Misc "S101", + "TID252", ] unfixable = [ # Don't touch unused imports "F401", ] -extend-exclude = ["tests"] +extend-exclude = ["tests", "example"] [tool.ruff.isort] -known-first-party = ["uptrain_haystack"] +known-first-party = ["src"] [tool.ruff.flake8-tidy-imports] ban-relative-imports = "all" @@ -127,21 +131,27 @@ ban-relative-imports = "all" "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["uptrain_haystack", "tests"] +source_pkgs = ["src", "tests"] branch = true parallel = true -omit = ["src/uptrain_haystack/__about__.py"] [tool.coverage.paths] uptrain_haystack = [ - "src/uptrain_haystack", + "src/haystack_integrations", "*/uptrain-haystack/src/uptrain_haystack", ] -tests = ["tests", "*uptrain-haystack/tests"] +tests = ["tests"] [tool.coverage.report] exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] [[tool.mypy.overrides]] -module = ["haystack.*", "pytest.*", "uptrain.*", "numpy", "grpc"] +module = [ + "haystack.*", + "pytest.*", + "uptrain.*", + "numpy", + "grpc", + "haystack_integrations.*", +] ignore_missing_imports = true diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py new file mode 100644 index 000000000..e8366dfc0 --- /dev/null +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py @@ -0,0 +1,7 @@ +from .evaluator import UpTrainEvaluator +from .metrics import UpTrainMetric + +__all__ = ( + "UpTrainEvaluator", + "UpTrainMetric", +) diff --git a/integrations/uptrain/src/uptrain_haystack/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py similarity index 79% rename from integrations/uptrain/src/uptrain_haystack/evaluator.py rename to integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py index a0e4f7f45..f99ec8105 100644 --- a/integrations/uptrain/src/uptrain_haystack/evaluator.py +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py @@ -1,19 +1,16 @@ import json import os -from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union from haystack import DeserializationError, component, default_from_dict, default_to_dict -from uptrain import APIClient, EvalLLM, Evals -from uptrain.framework.evals import ParametricEval - -from uptrain_haystack.metrics import ( +from haystack_integrations.components.evaluators.metrics import ( METRIC_DESCRIPTORS, InputConverters, OutputConverters, UpTrainMetric, - UpTrainMetricResult, ) +from uptrain import APIClient, EvalLLM, Evals +from uptrain.framework.evals import ParametricEval @component @@ -65,16 +62,41 @@ def __init__( expected_inputs = self.descriptor.input_parameters component.set_input_types(self, **expected_inputs) - @component.output_types(output=List["UpTrainEvaluatorOutput"]) + @component.output_types(results=List[List[Dict[str, Any]]]) def run(self, **inputs) -> Dict[str, Any]: """ Run the UpTrain evaluator. + Example: + ```python + pipeline = Pipeline() + evaluator = UpTrainEvaluator( + metric=UpTrainMetric.FACTUAL_ACCURACY, + api="openai", + api_key_env_var="OPENAI_API_KEY", + ) + pipeline.add_component("evaluator", evaluator) + + # Each metric expects a specific set of parameters as input. Refer to the + # UpTrainMetric class' documentation for more details. + output = pipeline.run({"evaluator": { + "questions": ["question], + "contexts": ["context"], + "responses": ["response"] + }}) + ``` + :param inputs: - The inputs to evaluate. Match the input parameters of the metric. + The inputs to evaluate. These are determined by the + metric being calculated. See :class:`UpTrainMetric` for more + information. :returns: - A list of :class:`UpTrainEvaluatorOutput` objects, each containing - a single input and the result of the evaluation performed on it. + A nested list of metric results. Each input can have one or more + results, depending on the metric. Each result is a dictionary + containing the following keys and values: + * `name` - The name of the metric. + * `score` - The score of the metric. + * `explanation` - An optional explanation of the score. """ # The backend requires random access to the data, so we can't stream it. InputConverters.validate_input_parameters(self.metric, self.descriptor.input_parameters, inputs) @@ -84,19 +106,18 @@ def run(self, **inputs) -> Dict[str, Any]: if self.api_params is not None: eval_args.update({k: v for k, v in self.api_params.items() if k not in eval_args}) + results: List[Dict[str, Any]] if isinstance(self._backend_client, EvalLLM): results = self._backend_client.evaluate(**eval_args) else: results = self._backend_client.log_and_evaluate(**eval_args) OutputConverters.validate_outputs(results) - converted_results = [self.descriptor.output_converter(x, self.metric_params) for x in results] - output = [ - UpTrainEvaluatorOutput(input_item, result_item) - for input_item, result_item in zip(converted_inputs, converted_results) # type: ignore + converted_results = [ + [result.to_dict() for result in self.descriptor.output_converter(x, self.metric_params)] for x in results ] - return {"output": output} + return {"results": converted_results} def to_dict(self) -> Dict[str, Any]: """ @@ -176,19 +197,3 @@ def _init_backend(self): self._backend_metric = backend_metric self._backend_client = backend_client - - -@dataclass(frozen=True) -class UpTrainEvaluatorOutput: - """ - Output of the UpTrain evaluator component. - - :param input: - The input that was evaluated. - :param result: - The result of the evaluation. Can contain - multiple results depending on the metric. - """ - - input: Dict[str, Any] - result: List[UpTrainMetricResult] diff --git a/integrations/uptrain/src/uptrain_haystack/metrics.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py similarity index 87% rename from integrations/uptrain/src/uptrain_haystack/metrics.py rename to integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py index ac1f6b2ad..e42b63e21 100644 --- a/integrations/uptrain/src/uptrain_haystack/metrics.py +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py @@ -1,3 +1,4 @@ +import dataclasses import inspect from dataclasses import dataclass from enum import Enum @@ -13,16 +14,48 @@ class UpTrainMetric(Enum): Metrics supported by UpTrain. """ + #: Context relevance. + #: Inputs - `questions: List[str], contexts: List[str]` CONTEXT_RELEVANCE = "context_relevance" + + #: Factual accuracy. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` FACTUAL_ACCURACY = "factual_accuracy" + + #: Response relevance. + #: Inputs - `questions: List[str], responses: List[str]` RESPONSE_RELEVANCE = "response_relevance" + + #: Response completeness. + #: Inputs - `questions: List[str], responses: List[str]` RESPONSE_COMPLETENESS = "response_completeness" + + #: Response completeness with respect to context. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` RESPONSE_COMPLETENESS_WRT_CONTEXT = "response_completeness_wrt_context" + + #: Response consistency. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` RESPONSE_CONSISTENCY = "response_consistency" + + #: Response conciseness. + #: Inputs - `questions: List[str], responses: List[str]` RESPONSE_CONCISENESS = "response_conciseness" + + #: Language critique. + #: Inputs - `responses: List[str]` CRITIQUE_LANGUAGE = "critique_language" + + #: Tone critique. + #: Inputs - `responses: List[str]` CRITIQUE_TONE = "critique_tone" + + #: Guideline adherence. + #: Inputs - `questions: List[str], responses: List[str]` GUIDELINE_ADHERENCE = "guideline_adherence" + + #: Response matching. + #: Inputs - `responses: List[str], ground_truths: List[str]` RESPONSE_MATCHING = "response_matching" def __str__(self): @@ -47,7 +80,7 @@ def from_str(cls, string: str) -> "UpTrainMetric": @dataclass(frozen=True) -class UpTrainMetricResult: +class MetricResult: """ Result of a metric evaluation. @@ -63,6 +96,9 @@ class UpTrainMetricResult: score: float explanation: Optional[str] = None + def to_dict(self): + return dataclasses.asdict(self) + @dataclass(frozen=True) class MetricDescriptor: @@ -88,7 +124,7 @@ class MetricDescriptor: backend: Union[Evals, Type[ParametricEval]] input_parameters: Dict[str, Type] input_converter: Callable[[Any], Iterable[Dict[str, str]]] - output_converter: Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]] + output_converter: Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]] init_parameters: Optional[Dict[str, Type[Any]]] = None @classmethod @@ -97,9 +133,7 @@ def new( metric: UpTrainMetric, backend: Union[Evals, Type[ParametricEval]], input_converter: Callable[[Any], Iterable[Dict[str, str]]], - output_converter: Optional[ - Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]] - ] = None, + output_converter: Optional[Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]] = None, *, init_parameters: Optional[Dict[str, Type]] = None, ) -> "MetricDescriptor": @@ -223,13 +257,11 @@ def validate_outputs(outputs: List[Dict[str, Any]]): raise ValueError(msg) @staticmethod - def _extract_default_results(output: Dict[str, Any], metric_name: str) -> UpTrainMetricResult: + def _extract_default_results(output: Dict[str, Any], metric_name: str) -> MetricResult: try: score_key = f"score_{metric_name}" explanation_key = f"explanation_{metric_name}" - return UpTrainMetricResult( - name=metric_name, score=output[score_key], explanation=output.get(explanation_key) - ) + return MetricResult(name=metric_name, score=output[score_key], explanation=output.get(explanation_key)) except KeyError as e: msg = f"UpTrain evaluator did not return an expected output for metric '{metric_name}'" raise ValueError(msg) from e @@ -237,10 +269,10 @@ def _extract_default_results(output: Dict[str, Any], metric_name: str) -> UpTrai @staticmethod def default( metric: UpTrainMetric, - ) -> Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[UpTrainMetricResult]]: + ) -> Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]: def inner( output: Dict[str, Any], metric_params: Optional[Dict[str, Any]], metric: UpTrainMetric # noqa: ARG001 - ) -> List[UpTrainMetricResult]: + ) -> List[MetricResult]: return [OutputConverters._extract_default_results(output, str(metric))] return partial(inner, metric=metric) @@ -248,7 +280,7 @@ def inner( @staticmethod def critique_language( output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[UpTrainMetricResult]: + ) -> List[MetricResult]: out = [] for expected_key in ("fluency", "coherence", "grammar", "politeness"): out.append(OutputConverters._extract_default_results(output, expected_key)) @@ -257,20 +289,18 @@ def critique_language( @staticmethod def critique_tone( output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[UpTrainMetricResult]: + ) -> List[MetricResult]: return [OutputConverters._extract_default_results(output, "tone")] @staticmethod - def guideline_adherence( - output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] - ) -> List[UpTrainMetricResult]: + def guideline_adherence(output: Dict[str, Any], metric_params: Optional[Dict[str, Any]]) -> List[MetricResult]: assert metric_params is not None return [OutputConverters._extract_default_results(output, f'{metric_params["guideline_name"]}_adherence')] @staticmethod def response_matching( output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[UpTrainMetricResult]: + ) -> List[MetricResult]: metric_str = "response_match" out = [OutputConverters._extract_default_results(output, metric_str)] @@ -278,7 +308,7 @@ def response_matching( score_key = f"score_{metric_str}" for k, v in output.items(): if k != score_key and metric_str in k and isinstance(v, float): - out.append(UpTrainMetricResult(name=k, score=v)) + out.append(MetricResult(name=k, score=v)) return out diff --git a/integrations/uptrain/src/uptrain_haystack/__init__.py b/integrations/uptrain/src/uptrain_haystack/__init__.py deleted file mode 100644 index 14d8700fe..000000000 --- a/integrations/uptrain/src/uptrain_haystack/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from uptrain_haystack.evaluator import UpTrainEvaluator, UpTrainEvaluatorOutput, UpTrainMetric - -__all__ = ("UpTrainEvaluator", "UpTrainMetric", "UpTrainEvaluatorOutput") diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py index 14101c5f4..2128e0634 100644 --- a/integrations/uptrain/tests/test_evaluator.py +++ b/integrations/uptrain/tests/test_evaluator.py @@ -7,7 +7,7 @@ import pytest from haystack import DeserializationError -from uptrain_haystack import UpTrainEvaluator, UpTrainMetric +from haystack_integrations.components.evaluators import UpTrainEvaluator, UpTrainMetric DEFAULT_QUESTIONS = [ "Which is the most popular global sport?", @@ -28,6 +28,7 @@ class Unserializable: something: str +# Only returns results for the passed metrics. class MockBackend: def __init__(self, metric_outputs: List[UpTrainMetric]) -> None: self.metrics = metric_outputs @@ -230,8 +231,12 @@ def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, output = eval.run(**inputs) +# This test validates the expected outputs of the evaluator. +# Each output is parameterized as a list of tuples, where each tuple is +# (name, score, explanation). The name and explanation are optional. If +# the name is None, then the metric name is used. @pytest.mark.parametrize( - "metric, inputs, outputs, params", + "metric, inputs, expected_outputs, metric_params", [ (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": ["q1"], "contexts": ["c1"]}, [[(None, 0.5, "1")]], None), ( @@ -290,33 +295,36 @@ def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, ], ) @patch("os.environ.get") -def test_evaluator_outputs(os_environ_get, metric, inputs, outputs, params): +def test_evaluator_outputs(os_environ_get, metric, inputs, expected_outputs, metric_params): os_environ_get.return_value = "abacab" init_params = { "metric": metric, - "metric_params": params, + "metric_params": metric_params, "api": "uptrain", "api_key_env_var": "abacab", "api_params": None, } eval = UpTrainEvaluator(**init_params) eval._backend_client = MockBackend([metric]) - results = eval.run(**inputs)["output"] + results = eval.run(**inputs)["results"] - assert type(results) == type(outputs) - assert len(results) == len(outputs) + assert type(results) == type(expected_outputs) + assert len(results) == len(expected_outputs) - for r, o in zip(results, outputs): - assert len(r.result) == len(o) + for r, o in zip(results, expected_outputs): + assert len(r) == len(o) expected = {(name if name is not None else str(metric), score, exp) for name, score, exp in o} - got = {(x.name, x.score, x.explanation) for x in r.result} + got = {(x["name"], x["score"], x["explanation"]) for x in r} assert got == expected +# This integration test validates the evaluator by running it against the +# OpenAI API. It is parameterized by the metric, the inputs to the evalutor +# and the metric parameters. @pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") @pytest.mark.parametrize( - "metric, inputs, params", + "metric, inputs, metric_params", [ (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS}, None), ( @@ -357,10 +365,10 @@ def test_evaluator_outputs(os_environ_get, metric, inputs, outputs, params): ), ], ) -def test_integration_run(metric, inputs, params): +def test_integration_run(metric, inputs, metric_params): init_params = { "metric": metric, - "metric_params": params, + "metric_params": metric_params, "api": "openai", } eval = UpTrainEvaluator(**init_params) @@ -368,5 +376,5 @@ def test_integration_run(metric, inputs, params): assert type(output) == dict assert len(output) == 1 - assert "output" in output - assert len(output["output"]) == len(next(iter(inputs.values()))) + assert "results" in output + assert len(output["results"]) == len(next(iter(inputs.values()))) diff --git a/integrations/uptrain/tests/test_metrics.py b/integrations/uptrain/tests/test_metrics.py index 1dddb651e..b73b2aa92 100644 --- a/integrations/uptrain/tests/test_metrics.py +++ b/integrations/uptrain/tests/test_metrics.py @@ -1,6 +1,6 @@ import pytest -from uptrain_haystack import UpTrainMetric +from haystack_integrations.components.evaluators import UpTrainMetric def test_uptrain_metric(): From ff3173325df665f26a974925449b777f4e9a87be Mon Sep 17 00:00:00 2001 From: shadeMe Date: Fri, 26 Jan 2024 14:31:35 +0100 Subject: [PATCH 3/4] Update README --- README.md | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index d4d34fd7d..da04de618 100644 --- a/README.md +++ b/README.md @@ -60,21 +60,22 @@ deepset-haystack ## Inventory -| Package | Type | PyPi Package | Status | -| ------------------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | -| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | -| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | -| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | -| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | -| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | -| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | -| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | -| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | -| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | -| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | -| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | -| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | -| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | -| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | -| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | +| Package | Type | PyPi Package | Status | +| ------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | +| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | +| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | +| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | +| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | +| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | +| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | +| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | +| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | +| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | +| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | +| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | +| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | +| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | +| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | +| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | +| [uptrainr-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) | From 84958f5f5bcb557d31f4da554e2ca67a407125da Mon Sep 17 00:00:00 2001 From: shadeMe Date: Fri, 26 Jan 2024 14:33:57 +0100 Subject: [PATCH 4/4] Fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 260bd3d8c..20b17b377 100644 --- a/README.md +++ b/README.md @@ -79,4 +79,4 @@ deepset-haystack | [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | | [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | | [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | -| [uptrainr-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) | +| [uptrain-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) |