diff --git a/.github/labeler.yml b/.github/labeler.yml index 0c97df7e2..cbfe6567e 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -119,11 +119,6 @@ integration:unstructured-fileconverter: - any-glob-to-any-file: "integrations/unstructured/**/*" - any-glob-to-any-file: ".github/workflows/unstructured.yml" -integration:uptrain: - - changed-files: - - any-glob-to-any-file: "integrations/uptrain/**/*" - - any-glob-to-any-file: ".github/workflows/uptrain.yml" - integration:weaviate: - changed-files: - any-glob-to-any-file: "integrations/weaviate/**/*" diff --git a/.github/workflows/uptrain.yml b/.github/workflows/uptrain.yml deleted file mode 100644 index 0525dbc55..000000000 --- a/.github/workflows/uptrain.yml +++ /dev/null @@ -1,76 +0,0 @@ -# This workflow comes from https://github.com/ofek/hatch-mypyc -# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml -name: Test / uptrain - -on: - # Temporarily disable nightly tests as failures are currently expected - # schedule: - # - cron: "0 0 * * *" - pull_request: - paths: - - "integrations/uptrain/**" - - ".github/workflows/uptrain.yml" - -defaults: - run: - working-directory: integrations/uptrain - -concurrency: - group: uptrain-${{ github.head_ref }} - cancel-in-progress: true - -env: - PYTHONUNBUFFERED: "1" - FORCE_COLOR: "1" - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - -jobs: - run: - name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.9", "3.10"] - - steps: - - name: Support longpaths - if: matrix.os == 'windows-latest' - working-directory: . - run: git config --system core.longpaths true - - - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Hatch - run: pip install --upgrade hatch - - - name: Lint - if: matrix.python-version == '3.9' && runner.os == 'Linux' - run: hatch run lint:all - - - name: Run tests - id: tests - run: hatch run cov - - - name: Nightly - run unit tests with Haystack main branch - if: github.event_name == 'schedule' - id: nightly-haystack-main - run: | - hatch run pip install git+https://github.com/deepset-ai/haystack.git - hatch run test -m "not integration" - - - name: Send event to Datadog for nightly failures - if: failure() && github.event_name == 'schedule' - uses: ./.github/actions/send_failure - with: - title: | - core-integrations failure: - ${{ (steps.tests.conclusion == 'nightly-haystack-main') && 'nightly-haystack-main' || 'tests' }} - - ${{ github.workflow }} - api-key: ${{ secrets.CORE_DATADOG_API_KEY }} \ No newline at end of file diff --git a/README.md b/README.md index 50a4bebc4..734672371 100644 --- a/README.md +++ b/README.md @@ -22,33 +22,33 @@ Please check out our [Contribution Guidelines](CONTRIBUTING.md) for all the deta ## Inventory -| Package | Type | PyPi Package | Status | -| ------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | -| [amazon-sagemaker-haystack](integrations/amazon_sagemaker/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-sagemaker-haystack.svg)](https://pypi.org/project/amazon-sagemaker-haystack) | [![Test / amazon_sagemaker](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml) | -| [anthropic-haystack](integrations/anthropic/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/anthropic-haystack.svg)](https://pypi.org/project/anthropic-haystack) | [![Test / anthropic](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/anthropic.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/anthropic.yml) | -| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | -| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | -| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | -| [deepeval-haystack](integrations/deepeval/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) | [![Test / deepeval](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml) | -| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | -| [fastembed-haystack](integrations/fastembed/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/fastembed-haystack.svg)](https://pypi.org/project/fastembed-haystack/) | [![Test / fastembed](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml) | -| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | -| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | -| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | -| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | -| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | -| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/llama-cpp-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | -| [mistral-haystack](integrations/mistral/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/mistral-haystack.svg)](https://pypi.org/project/mistral-haystack) | [![Test / mistral](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mistral.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mistral.yml) | -| [mongodb-atlas-haystack](integrations/mongodb_atlas/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/mongodb-atlas-haystack.svg?color=orange)](https://pypi.org/project/mongodb-atlas-haystack) | [![Test / mongodb-atlas](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mongodb_atlas.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mongodb_atlas.yml) | -| [nvidia-haystack](integrations/nvidia/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/nvidia-haystack.svg?color=orange)](https://pypi.org/project/nvidia-haystack) | [![Test / nvidia](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/nvidia.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/nvidia.yml) | -| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | -| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | -| [optimum-haystack](integrations/optimum/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/optimum-haystack.svg)](https://pypi.org/project/optimum-haystack) | [![Test / optimum](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml) | -| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | -| [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | -| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | -| [ragas-haystack](integrations/ragas/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/ragas-haystack.svg)](https://pypi.org/project/ragas-haystack) | [![Test / ragas](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ragas.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ragas.yml) | -| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | -| [uptrain-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) | -| [weaviate-haystack](integrations/weaviate/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/weaviate-haystack.svg)](https://pypi.org/project/weaviate-haystack) | [![Test / weaviate](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/weaviate.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/weaviate.yml) | +| Package | Type | PyPi Package | Status | +| -------------------------------------------------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | +| [amazon-sagemaker-haystack](integrations/amazon_sagemaker/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-sagemaker-haystack.svg)](https://pypi.org/project/amazon-sagemaker-haystack) | [![Test / amazon_sagemaker](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml) | +| [anthropic-haystack](integrations/anthropic/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/anthropic-haystack.svg)](https://pypi.org/project/anthropic-haystack) | [![Test / anthropic](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/anthropic.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/anthropic.yml) | +| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | +| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | +| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | +| [deepeval-haystack](integrations/deepeval/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) | [![Test / deepeval](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml) | +| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | +| [fastembed-haystack](integrations/fastembed/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/fastembed-haystack.svg)](https://pypi.org/project/fastembed-haystack/) | [![Test / fastembed](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml) | +| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | +| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | +| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | +| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | +| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | +| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/llama-cpp-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | +| [mistral-haystack](integrations/mistral/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/mistral-haystack.svg)](https://pypi.org/project/mistral-haystack) | [![Test / mistral](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mistral.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mistral.yml) | +| [mongodb-atlas-haystack](integrations/mongodb_atlas/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/mongodb-atlas-haystack.svg?color=orange)](https://pypi.org/project/mongodb-atlas-haystack) | [![Test / mongodb-atlas](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mongodb_atlas.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/mongodb_atlas.yml) | +| [nvidia-haystack](integrations/nvidia/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/nvidia-haystack.svg?color=orange)](https://pypi.org/project/nvidia-haystack) | [![Test / nvidia](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/nvidia.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/nvidia.yml) | +| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | +| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | +| [optimum-haystack](integrations/optimum/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/optimum-haystack.svg)](https://pypi.org/project/optimum-haystack) | [![Test / optimum](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml) | +| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | +| [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | +| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | +| [ragas-haystack](integrations/ragas/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/ragas-haystack.svg)](https://pypi.org/project/ragas-haystack) | [![Test / ragas](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ragas.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ragas.yml) | +| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | +| [uptrain-haystack](https://github.com/deepset-ai/haystack-core-integrations/tree/staging/integrations/uptrain) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | Staged | +| [weaviate-haystack](integrations/weaviate/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/weaviate-haystack.svg)](https://pypi.org/project/weaviate-haystack) | [![Test / weaviate](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/weaviate.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/weaviate.yml) | diff --git a/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py index c410d369e..ed6422bfe 100644 --- a/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py +++ b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py @@ -2,6 +2,6 @@ # # SPDX-License-Identifier: Apache-2.0 -from .retriever import QdrantEmbeddingRetriever, QdrantHybridRetriever, QdrantSparseRetriever +from .retriever import QdrantEmbeddingRetriever, QdrantHybridRetriever, QdrantSparseEmbeddingRetriever -__all__ = ("QdrantEmbeddingRetriever", "QdrantSparseRetriever", "QdrantHybridRetriever") +__all__ = ("QdrantEmbeddingRetriever", "QdrantSparseEmbeddingRetriever", "QdrantHybridRetriever") diff --git a/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py index e18c0eedf..6eac7bc50 100644 --- a/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py +++ b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py @@ -126,13 +126,13 @@ def run( @component -class QdrantSparseRetriever: +class QdrantSparseEmbeddingRetriever: """ A component for retrieving documents from an QdrantDocumentStore using sparse vectors. Usage example: ```python - from haystack_integrations.components.retrievers.qdrant import QdrantSparseRetriever + from haystack_integrations.components.retrievers.qdrant import QdrantSparseEmbeddingRetriever from haystack_integrations.document_stores.qdrant import QdrantDocumentStore from haystack.dataclasses.sparse_embedding import SparseEmbedding @@ -146,7 +146,7 @@ class QdrantSparseRetriever: doc = Document(content="test", sparse_embedding=SparseEmbedding(indices=[0, 3, 5], values=[0.1, 0.5, 0.12])) document_store.write_documents([doc]) - retriever = QdrantSparseRetriever(document_store=document_store) + retriever = QdrantSparseEmbeddingRetriever(document_store=document_store) sparse_embedding = SparseEmbedding(indices=[0, 1, 2, 3], values=[0.1, 0.8, 0.05, 0.33]) retriever.run(query_sparse_embedding=sparse_embedding) ``` @@ -161,7 +161,7 @@ def __init__( return_embedding: bool = False, ): """ - Create a QdrantSparseRetriever component. + Create a QdrantSparseEmbeddingRetriever component. :param document_store: An instance of QdrantDocumentStore. :param filters: A dictionary with filters to narrow down the search space. Default is None. @@ -202,7 +202,7 @@ def to_dict(self) -> Dict[str, Any]: return d @classmethod - def from_dict(cls, data: Dict[str, Any]) -> "QdrantSparseRetriever": + def from_dict(cls, data: Dict[str, Any]) -> "QdrantSparseEmbeddingRetriever": """ Deserializes the component from a dictionary. diff --git a/integrations/qdrant/tests/test_retriever.py b/integrations/qdrant/tests/test_retriever.py index 687531ba5..a2aa2b090 100644 --- a/integrations/qdrant/tests/test_retriever.py +++ b/integrations/qdrant/tests/test_retriever.py @@ -9,7 +9,7 @@ from haystack_integrations.components.retrievers.qdrant import ( QdrantEmbeddingRetriever, QdrantHybridRetriever, - QdrantSparseRetriever, + QdrantSparseEmbeddingRetriever, ) from haystack_integrations.document_stores.qdrant import QdrantDocumentStore @@ -138,10 +138,10 @@ def test_run_with_sparse_activated(self, filterable_docs: List[Document]): assert document.embedding is None -class TestQdrantSparseRetriever(FilterableDocsFixtureMixin): +class TestQdrantSparseEmbeddingRetriever(FilterableDocsFixtureMixin): def test_init_default(self): document_store = QdrantDocumentStore(location=":memory:", index="test") - retriever = QdrantSparseRetriever(document_store=document_store) + retriever = QdrantSparseEmbeddingRetriever(document_store=document_store) assert retriever._document_store == document_store assert retriever._filters is None assert retriever._top_k == 10 @@ -149,10 +149,10 @@ def test_init_default(self): def test_to_dict(self): document_store = QdrantDocumentStore(location=":memory:", index="test") - retriever = QdrantSparseRetriever(document_store=document_store) + retriever = QdrantSparseEmbeddingRetriever(document_store=document_store) res = retriever.to_dict() assert res == { - "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantSparseRetriever", + "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantSparseEmbeddingRetriever", "init_parameters": { "document_store": { "type": "haystack_integrations.document_stores.qdrant.document_store.QdrantDocumentStore", @@ -205,7 +205,7 @@ def test_to_dict(self): def test_from_dict(self): data = { - "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantSparseRetriever", + "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantSparseEmbeddingRetriever", "init_parameters": { "document_store": { "init_parameters": {"location": ":memory:", "index": "test"}, @@ -217,7 +217,7 @@ def test_from_dict(self): "return_embedding": True, }, } - retriever = QdrantSparseRetriever.from_dict(data) + retriever = QdrantSparseEmbeddingRetriever.from_dict(data) assert isinstance(retriever._document_store, QdrantDocumentStore) assert retriever._document_store.index == "test" assert retriever._filters is None @@ -233,7 +233,7 @@ def test_run(self, filterable_docs: List[Document]): doc.sparse_embedding = SparseEmbedding.from_dict(_generate_mocked_sparse_embedding(1)[0]) document_store.write_documents(filterable_docs) - retriever = QdrantSparseRetriever(document_store=document_store) + retriever = QdrantSparseEmbeddingRetriever(document_store=document_store) sparse_embedding = SparseEmbedding(indices=[0, 1, 2, 3], values=[0.1, 0.8, 0.05, 0.33]) results: List[Document] = retriever.run(query_sparse_embedding=sparse_embedding)["documents"] diff --git a/integrations/ragas/pyproject.toml b/integrations/ragas/pyproject.toml index 2db2770bd..f8d55023f 100644 --- a/integrations/ragas/pyproject.toml +++ b/integrations/ragas/pyproject.toml @@ -21,7 +21,7 @@ classifiers = [ "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] -dependencies = ["haystack-ai", "ragas==0.1.1"] +dependencies = ["haystack-ai", "ragas"] [project.urls] Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/ragas" @@ -152,6 +152,7 @@ module = [ "haystack.*", "pytest.*", "ragas.*", + "datasets.*", "numpy", "grpc", "haystack_integrations.*", diff --git a/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/evaluator.py b/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/evaluator.py index 5c8613553..c44c446e6 100644 --- a/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/evaluator.py +++ b/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/evaluator.py @@ -1,12 +1,12 @@ import json from typing import Any, Callable, Dict, List, Optional, Union -from datasets import Dataset # type: ignore +from datasets import Dataset from haystack import DeserializationError, component, default_from_dict, default_to_dict from ragas import evaluate # type: ignore -from ragas.evaluation import Result # type: ignore -from ragas.metrics.base import Metric # type: ignore +from ragas.evaluation import Result +from ragas.metrics.base import Metric from .metrics import ( METRIC_DESCRIPTORS, diff --git a/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/metrics.py b/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/metrics.py index ed807aa81..06b29bedf 100644 --- a/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/metrics.py +++ b/integrations/ragas/src/haystack_integrations/components/evaluators/ragas/metrics.py @@ -16,7 +16,7 @@ ContextUtilization, # type: ignore Faithfulness, # type: ignore ) -from ragas.metrics.base import Metric # type: ignore +from ragas.metrics.base import Metric class RagasBaseEnum(Enum): diff --git a/integrations/uptrain/LICENSE.txt b/integrations/uptrain/LICENSE.txt deleted file mode 100644 index 137069b82..000000000 --- a/integrations/uptrain/LICENSE.txt +++ /dev/null @@ -1,73 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - - (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. - - You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/integrations/uptrain/README.md b/integrations/uptrain/README.md deleted file mode 100644 index 6d7605306..000000000 --- a/integrations/uptrain/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# uptrain-haystack - -[![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) -[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) - ---- - -**Table of Contents** - -- [uptrain-haystack](#uptrain-haystack) - - [Installation](#installation) - - [Testing](#testing) - - [Examples](#examples) - - [License](#license) - -## Installation - -```console -pip install uptrain-haystack -``` - -For more information about the UpTrain evaluation framework, please refer to their [documentation](https://docs.uptrain.ai/getting-started/introduction). - -## Testing - -```console -hatch run test -``` - -## Examples - -You can find a code example showing how to use the Evaluator under the `example/` folder of this repo. - -## License - -`uptrain-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/uptrain/example/example.py b/integrations/uptrain/example/example.py deleted file mode 100644 index 36387b06c..000000000 --- a/integrations/uptrain/example/example.py +++ /dev/null @@ -1,37 +0,0 @@ -# A valid OpenAI API key is required to run this example. - -from haystack import Pipeline -from haystack_integrations.components.evaluators.uptrain import UpTrainEvaluator, UpTrainMetric -from haystack.utils import Secret - -QUESTIONS = [ - "Which is the most popular global sport?", - "Who created the Python language?", -] -CONTEXTS = [ - [ - "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people." - ], - [ - "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects." - ], -] -RESPONSES = [ - "Football is the most popular sport with around 4 billion followers worldwide", - "Python language was created by Guido van Rossum.", -] - -pipeline = Pipeline() -evaluator = UpTrainEvaluator( - metric=UpTrainMetric.FACTUAL_ACCURACY, - api="openai", - api_key=Secret.from_env_var("OPENAI_API_KEY"), -) -pipeline.add_component("evaluator", evaluator) - -# Each metric expects a specific set of parameters as input. Refer to the -# UpTrainMetric class' documentation for more details. -results = pipeline.run({"evaluator": {"questions": QUESTIONS, "contexts": CONTEXTS, "responses": RESPONSES}}) - -for output in results["evaluator"]["results"]: - print(output) diff --git a/integrations/uptrain/pydoc/config.yml b/integrations/uptrain/pydoc/config.yml deleted file mode 100644 index 6e5c9a2b8..000000000 --- a/integrations/uptrain/pydoc/config.yml +++ /dev/null @@ -1,33 +0,0 @@ -loaders: - - type: haystack_pydoc_tools.loaders.CustomPythonLoader - search_path: [../src] - modules: - [ - "haystack_integrations.components.evaluators.uptrain.evaluator", - "haystack_integrations.components.evaluators.uptrain.metrics", - ] - ignore_when_discovered: ["__init__"] -processors: - - type: filter - expression: - documented_only: true - do_not_filter_modules: false - skip_empty_modules: true - - type: filter - expression: "name not in ['MetricResult', 'MetricDescriptor', 'OutputConverters', 'InputConverters', 'METRIC_DESCRIPTORS']" - - type: smart - - type: crossref -renderer: - type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer - excerpt: UpTrain integration for Haystack - category_slug: integrations-api - title: UpTrain - slug: integrations-uptrain - order: 240 - markdown: - descriptive_class_title: false - classdef_code_block: false - descriptive_module_title: true - add_method_class_prefix: true - add_member_class_prefix: false - filename: _readme_uptrain.md diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml deleted file mode 100644 index d43c77f28..000000000 --- a/integrations/uptrain/pyproject.toml +++ /dev/null @@ -1,164 +0,0 @@ -[build-system] -requires = ["hatchling", "hatch-vcs"] -build-backend = "hatchling.build" - -[project] -name = "uptrain-haystack" -dynamic = ["version"] -description = 'An integration of UpTrain LLM evaluation framework with Haystack' -readme = "README.md" -requires-python = ">=3.8" -license = "Apache-2.0" -keywords = [] -authors = [{ name = "deepset GmbH", email = "info@deepset.ai" }] -classifiers = [ - "Development Status :: 4 - Beta", - "Programming Language :: Python", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", -] -dependencies = [ - "haystack-ai", - "uptrain==0.5.0", - "nest_asyncio", - "litellm", -] - -[project.urls] -Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain" -Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/uptrain/README.md" -Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" - -[tool.hatch.build.targets.wheel] -packages = ["src/haystack_integrations"] - -[tool.hatch.version] -source = "vcs" -tag-pattern = 'integrations\/uptrain-v(?P.*)' - -[tool.hatch.version.raw-options] -root = "../.." -git_describe_command = 'git describe --tags --match="integrations/uptrain-v[0-9]*"' - -[tool.hatch.envs.default] -dependencies = ["coverage[toml]>=6.5", "pytest", "haystack-pydoc-tools"] -[tool.hatch.envs.default.scripts] -test = "pytest {args:tests}" -test-cov = "coverage run -m pytest {args:tests}" -cov-report = ["- coverage combine", "coverage report"] -cov = ["test-cov", "cov-report"] -docs = ["pydoc-markdown pydoc/config.yml"] - -[[tool.hatch.envs.all.matrix]] -python = ["3.8", "3.9", "3.10", "3.11"] - -[tool.hatch.envs.lint] -detached = true -dependencies = ["black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"] -[tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/}" -style = ["ruff {args:.}", "black --check --diff {args:.}"] -fmt = ["black {args:.}", "ruff --fix {args:.}", "style"] -all = ["style", "typing"] - -[tool.black] -target-version = ["py38"] -line-length = 120 -skip-string-normalization = true - -[tool.ruff] -target-version = "py38" -line-length = 120 -select = [ - "A", - "ARG", - "B", - "C", - "DTZ", - "E", - "EM", - "F", - "FBT", - "I", - "ICN", - "ISC", - "N", - "PLC", - "PLE", - "PLR", - "PLW", - "Q", - "RUF", - "S", - "T", - "TID", - "UP", - "W", - "YTT", -] -ignore = [ - # Allow non-abstract empty methods in abstract base classes - "B027", - # Allow boolean positional values in function calls, like `dict.get(... True)` - "FBT003", - # Ignore checks for possible passwords - "S105", - "S106", - "S107", - # Ignore complexity - "C901", - "PLR0911", - "PLR0912", - "PLR0913", - "PLR0915", - # Misc - "S101", - "TID252", - "B008", -] -unfixable = [ - # Don't touch unused imports - "F401", -] -extend-exclude = ["tests", "example"] - -[tool.ruff.isort] -known-first-party = ["src"] - -[tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" - -[tool.ruff.per-file-ignores] -# Tests can use magic values, assertions, and relative imports -"tests/**/*" = ["PLR2004", "S101", "TID252"] - -[tool.coverage.run] -source = ["haystack_integrations"] -branch = true -parallel = false - - -[tool.coverage.report] -omit = ["*/tests/*", "*/__init__.py"] -show_missing=true -exclude_lines = [ - "no cov", - "if __name__ == .__main__.:", - "if TYPE_CHECKING:", -] - - -[[tool.mypy.overrides]] -module = [ - "haystack.*", - "pytest.*", - "uptrain.*", - "numpy", - "grpc", - "haystack_integrations.*", -] -ignore_missing_imports = true diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/__init__.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/__init__.py deleted file mode 100644 index e8366dfc0..000000000 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .evaluator import UpTrainEvaluator -from .metrics import UpTrainMetric - -__all__ = ( - "UpTrainEvaluator", - "UpTrainMetric", -) diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py deleted file mode 100644 index 877706786..000000000 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/evaluator.py +++ /dev/null @@ -1,215 +0,0 @@ -import json -from typing import Any, Dict, List, Optional, Union - -from haystack import DeserializationError, component, default_from_dict, default_to_dict -from haystack.utils import Secret, deserialize_secrets_inplace - -from uptrain import APIClient, EvalLLM, Evals # type: ignore -from uptrain.framework.evals import ParametricEval - -from .metrics import ( - METRIC_DESCRIPTORS, - InputConverters, - OutputConverters, - UpTrainMetric, -) - - -@component -class UpTrainEvaluator: - """ - A component that uses the [UpTrain framework](https://docs.uptrain.ai/getting-started/introduction) - to evaluate inputs against a specific metric. Supported metrics are defined by `UpTrainMetric`. - - Usage example: - ```python - from haystack_integrations.components.evaluators.uptrain import UpTrainEvaluator, UpTrainMetric - from haystack.utils import Secret - - evaluator = UpTrainEvaluator( - metric=UpTrainMetric.FACTUAL_ACCURACY, - api="openai", - api_key=Secret.from_env_var("OPENAI_API_KEY"), - ) - output = evaluator.run( - questions=["Which is the most popular global sport?"], - contexts=[ - [ - "Football is undoubtedly the world's most popular sport with" - "major events like the FIFA World Cup and sports personalities" - "like Ronaldo and Messi, drawing a followership of more than 4" - "billion people." - ] - ], - responses=["Football is the most popular sport with around 4 billion" "followers worldwide"], - ) - print(output["results"]) - ``` - """ - - _backend_metric: Union[Evals, ParametricEval] - _backend_client: Union[APIClient, EvalLLM] - - def __init__( - self, - metric: Union[str, UpTrainMetric], - metric_params: Optional[Dict[str, Any]] = None, - *, - api: str = "openai", - api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"), - api_params: Optional[Dict[str, Any]] = None, - ): - """ - Construct a new UpTrain evaluator. - - :param metric: - The metric to use for evaluation. - :param metric_params: - Parameters to pass to the metric's constructor. - Refer to the `UpTrainMetric` class for more details - on required parameters. - :param api: - The API to use for evaluation. Supported APIs: - `openai`, `uptrain`. - :param api_key: - The API key to use. - :param api_params: - Additional parameters to pass to the API client. - Required parameters for the UpTrain API: `project_name`. - """ - self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) - self.metric_params = metric_params - self.descriptor = METRIC_DESCRIPTORS[self.metric] - self.api = api - self.api_key = api_key - self.api_params = api_params - - self._init_backend() - expected_inputs = self.descriptor.input_parameters - component.set_input_types(self, **expected_inputs) - - @component.output_types(results=List[List[Dict[str, Any]]]) - def run(self, **inputs) -> Dict[str, Any]: - """ - Run the UpTrain evaluator on the provided inputs. - - :param inputs: - The inputs to evaluate. These are determined by the - metric being calculated. See `UpTrainMetric` for more - information. - :returns: - A dictionary with a single `results` entry that contains - a nested list of metric results. Each input can have one or more - results, depending on the metric. Each result is a dictionary - containing the following keys and values: - - `name` - The name of the metric. - - `score` - The score of the metric. - - `explanation` - An optional explanation of the score. - """ - # The backend requires random access to the data, so we can't stream it. - InputConverters.validate_input_parameters(self.metric, self.descriptor.input_parameters, inputs) - converted_inputs: List[Dict[str, str]] = list(self.descriptor.input_converter(**inputs)) # type: ignore - - eval_args = {"data": converted_inputs, "checks": [self._backend_metric]} - if self.api_params is not None: - eval_args.update({k: v for k, v in self.api_params.items() if k not in eval_args}) - - results: List[Dict[str, Any]] - if isinstance(self._backend_client, EvalLLM): - results = self._backend_client.evaluate(**eval_args) - else: - results = self._backend_client.log_and_evaluate(**eval_args) - - OutputConverters.validate_outputs(results) - converted_results = [ - [result.to_dict() for result in self.descriptor.output_converter(x, self.metric_params)] for x in results - ] - - return {"results": converted_results} - - def to_dict(self) -> Dict[str, Any]: - """ - Serializes the component to a dictionary. - - :returns: - Dictionary with serialized data. - :raises DeserializationError: - If the component cannot be serialized. - """ - - def check_serializable(obj: Any): - try: - json.dumps(obj) - return True - except (TypeError, OverflowError): - return False - - if not check_serializable(self.api_params) or not check_serializable(self.metric_params): - msg = "UpTrain evaluator cannot serialize the API/metric parameters" - raise DeserializationError(msg) - - return default_to_dict( - self, - metric=self.metric, - metric_params=self.metric_params, - api=self.api, - api_key=self.api_key.to_dict(), - api_params=self.api_params, - ) - - @classmethod - def from_dict(cls, data: Dict[str, Any]) -> "UpTrainEvaluator": - """ - Deserializes the component from a dictionary. - - :param data: - Dictionary to deserialize from. - :returns: - Deserialized component. - """ - deserialize_secrets_inplace(data["init_parameters"], ["api_key"]) - return default_from_dict(cls, data) - - def _init_backend(self): - if isinstance(self.descriptor.backend, Evals): - if self.metric_params is not None: - msg = ( - f"Uptrain metric '{self.metric}' received the following unexpected init parameters:" - f"{self.metric_params}" - ) - raise ValueError(msg) - backend_metric = self.descriptor.backend - else: - assert issubclass(self.descriptor.backend, ParametricEval) - if self.metric_params is None: - msg = f"Uptrain metric '{self.metric}' expected init parameters but got none" - raise ValueError(msg) - elif not all(k in self.descriptor.init_parameters for k in self.metric_params.keys()): - msg = ( - f"Invalid init parameters for UpTrain metric '{self.metric}'. " - f"Expected: {list(self.descriptor.init_parameters.keys())}" - ) - - raise ValueError(msg) - backend_metric = self.descriptor.backend(**self.metric_params) - - supported_apis = ("openai", "uptrain") - if self.api not in supported_apis: - msg = f"Unsupported API '{self.api}' for UpTrain evaluator. Supported APIs: {supported_apis}" - raise ValueError(msg) - - api_key = self.api_key.resolve_value() - assert api_key is not None - if self.api == "openai": - backend_client = EvalLLM(openai_api_key=api_key) - if self.api_params is not None: - msg = "OpenAI API does not support additional parameters" - raise ValueError(msg) - elif self.api == "uptrain": - if self.api_params is None or "project_name" not in self.api_params: - msg = "UpTrain API requires a 'project_name' API parameter" - raise ValueError(msg) - backend_client = APIClient(uptrain_api_key=api_key) - - self._backend_metric = backend_metric - self._backend_client = backend_client diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/metrics.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/metrics.py deleted file mode 100644 index a13843d4a..000000000 --- a/integrations/uptrain/src/haystack_integrations/components/evaluators/uptrain/metrics.py +++ /dev/null @@ -1,387 +0,0 @@ -import dataclasses -import inspect -from dataclasses import dataclass -from enum import Enum -from functools import partial -from typing import Any, Callable, Dict, Iterable, List, Optional, Type, Union - -from uptrain import CritiqueTone, Evals, GuidelineAdherence, ResponseMatching # type: ignore -from uptrain.framework.evals import ParametricEval - - -class UpTrainMetric(Enum): - """ - Metrics supported by UpTrain. - """ - - #: Context relevance.\ - #: Inputs - `questions: List[str], contexts: List[List[str]]` - CONTEXT_RELEVANCE = "context_relevance" - - #: Factual accuracy.\ - #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` - FACTUAL_ACCURACY = "factual_accuracy" - - #: Response relevance.\ - #: Inputs - `questions: List[str], responses: List[str]` - RESPONSE_RELEVANCE = "response_relevance" - - #: Response completeness.\ - #: Inputs - `questions: List[str], responses: List[str]` - RESPONSE_COMPLETENESS = "response_completeness" - - #: Response completeness with respect to context.\ - #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` - RESPONSE_COMPLETENESS_WRT_CONTEXT = "response_completeness_wrt_context" - - #: Response consistency.\ - #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` - RESPONSE_CONSISTENCY = "response_consistency" - - #: Response conciseness.\ - #: Inputs - `questions: List[str], responses: List[str]` - RESPONSE_CONCISENESS = "response_conciseness" - - #: Language critique.\ - #: Inputs - `responses: List[str]` - CRITIQUE_LANGUAGE = "critique_language" - - #: Tone critique.\ - #: Inputs - `responses: List[str]`\ - #: Parameters - `llm_persona: str` - CRITIQUE_TONE = "critique_tone" - - #: Guideline adherence.\ - #: Inputs - `questions: List[str], responses: List[str]`\ - #: Parameters - `guideline: str`, `guideline_name: str`, `response_schema: Optional[str]` - GUIDELINE_ADHERENCE = "guideline_adherence" - - #: Response matching.\ - #: Inputs - `responses: List[str], ground_truths: List[str]`\ - #: Parameters - `method: str` - RESPONSE_MATCHING = "response_matching" - - def __str__(self): - return self.value - - @classmethod - def from_str(cls, string: str) -> "UpTrainMetric": - """ - Create a metric type from a string. - - :param string: - The string to convert. - :returns: - The metric. - """ - enum_map = {e.value: e for e in UpTrainMetric} - metric = enum_map.get(string) - if metric is None: - msg = f"Unknown UpTrain metric '{string}'. Supported metrics: {list(enum_map.keys())}" - raise ValueError(msg) - return metric - - -@dataclass(frozen=True) -class MetricResult: - """ - Result of a metric evaluation. - - :param name: - The name of the metric. - :param score: - The score of the metric. - :param explanation: - An optional explanation of the metric. - """ - - name: str - score: float - explanation: Optional[str] = None - - def to_dict(self): - return dataclasses.asdict(self) - - -@dataclass(frozen=True) -class MetricDescriptor: - """ - Descriptor for a metric. - - :param metric: - The metric. - :param backend: - The associated UpTrain metric class. - :param input_parameters: - Parameters accepted by the metric. This is used - to set the input types of the evaluator component. - :param input_converter: - Callable that converts input parameters to the UpTrain input format. - :param output_converter: - Callable that converts the UpTrain output format to our output format. - :param init_parameters: - Additional parameters that need to be passed to the metric class during initialization. - """ - - metric: UpTrainMetric - backend: Union[Evals, Type[ParametricEval]] - input_parameters: Dict[str, Type] - input_converter: Callable[[Any], Iterable[Dict[str, str]]] - output_converter: Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]] - init_parameters: Optional[Dict[str, Type[Any]]] = None - - @classmethod - def new( - cls, - metric: UpTrainMetric, - backend: Union[Evals, Type[ParametricEval]], - input_converter: Callable[[Any], Iterable[Dict[str, str]]], - output_converter: Optional[Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]] = None, - *, - init_parameters: Optional[Dict[str, Type]] = None, - ) -> "MetricDescriptor": - input_converter_signature = inspect.signature(input_converter) - input_parameters = {} - for name, param in input_converter_signature.parameters.items(): - if name in ("cls", "self"): - continue - elif param.kind not in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): - continue - input_parameters[name] = param.annotation - - return cls( - metric=metric, - backend=backend, - input_parameters=input_parameters, - input_converter=input_converter, - output_converter=output_converter if output_converter is not None else OutputConverters.default(metric), - init_parameters=init_parameters, - ) - - -class InputConverters: - """ - Converters for input parameters. - - The signature of the converter functions serves as the ground-truth of the - expected input parameters of a given metric. They are also responsible for validating - the input parameters and converting them to the format expected by UpTrain. - """ - - @staticmethod - def _validate_input_elements(**kwargs): - for k, collection in kwargs.items(): - if not isinstance(collection, list): - msg = ( - f"UpTrain evaluator expected input '{k}' to be a collection of type 'list', " - f"got '{type(collection).__name__}' instead" - ) - raise ValueError(msg) - elif not all(isinstance(x, str) for x in collection) and not all(isinstance(x, list) for x in collection): - msg = f"UpTrain evaluator expects inputs to be of type 'str' or 'list' in '{k}'" - raise ValueError(msg) - - same_length = len({len(x) for x in kwargs.values()}) == 1 - if not same_length: - msg = f"Mismatching counts in the following inputs: {({k: len(v) for k, v in kwargs.items()})}" - raise ValueError(msg) - - @staticmethod - def validate_input_parameters(metric: UpTrainMetric, expected: Dict[str, Any], received: Dict[str, Any]): - for param, _ in expected.items(): - if param not in received: - msg = f"UpTrain evaluator expected input parameter '{param}' for metric '{metric}'" - raise ValueError(msg) - - @staticmethod - def _convert_contexts(contexts: List[List[str]]) -> List[str]: - if not all(isinstance(x, list) for x in contexts): - msg = "UpTrain evaluator expected 'contexts' to be a nested list of strings" - raise ValueError(msg) - return ["\n\n".join(c) for c in contexts] - - @staticmethod - def question_context_response( - questions: List[str], contexts: List[List[str]], responses: List[str] - ) -> Iterable[Dict[str, str]]: - InputConverters._validate_input_elements(questions=questions, contexts=contexts, responses=responses) - for q, c, r in zip(questions, InputConverters._convert_contexts(contexts), responses): # type: ignore - yield {"question": q, "context": c, "response": r} - - @staticmethod - def question_context( - questions: List[str], - contexts: List[List[str]], - ) -> Iterable[Dict[str, str]]: - InputConverters._validate_input_elements(questions=questions, contexts=contexts) - for q, c in zip(questions, InputConverters._convert_contexts(contexts)): # type: ignore - yield {"question": q, "context": c} - - @staticmethod - def question_response( - questions: List[str], - responses: List[str], - ) -> Iterable[Dict[str, str]]: - InputConverters._validate_input_elements(questions=questions, responses=responses) - for q, r in zip(questions, responses): # type: ignore - yield {"question": q, "response": r} - - @staticmethod - def response( - responses: List[str], - ) -> Iterable[Dict[str, str]]: - InputConverters._validate_input_elements(responses=responses) - for r in responses: - yield {"response": r} - - @staticmethod - def response_ground_truth( - responses: List[str], - ground_truths: List[str], - ) -> Iterable[Dict[str, str]]: - InputConverters._validate_input_elements(ground_truths=ground_truths, responses=responses) - for r, gt in zip(responses, ground_truths): # type: ignore - yield {"response": r, "ground_truth": gt} - - -class OutputConverters: - """ - Converters for results returned by UpTrain. - - They are responsible for converting the results to our output format. - """ - - @staticmethod - def validate_outputs(outputs: List[Dict[str, Any]]): - msg = None - if not isinstance(outputs, list): - msg = f"Expected response from UpTrain evaluator to be a 'list', got '{type(outputs).__name__}'" - elif not all(isinstance(x, dict) for x in outputs): - msg = "UpTrain evaluator expects outputs to be a list of `dict`s" - elif not all(isinstance(y, str) for x in outputs for y in x.keys()): - msg = "UpTrain evaluator expects keys in the output dicts to be `str`" - elif not all( - y is None - or isinstance( - y, - ( - float, - str, - ), - ) - for x in outputs - for y in x.values() - ): - msg = "UpTrain evaluator expects values in the output dicts to be either `str` or `float`" - - if msg is not None: - raise ValueError(msg) - - @staticmethod - def _extract_default_results(output: Dict[str, Any], metric_name: str) -> MetricResult: - try: - score_key = f"score_{metric_name}" - explanation_key = f"explanation_{metric_name}" - return MetricResult(name=metric_name, score=output[score_key], explanation=output.get(explanation_key)) - except KeyError as e: - msg = f"UpTrain evaluator did not return an expected output for metric '{metric_name}'" - raise ValueError(msg) from e - - @staticmethod - def default( - metric: UpTrainMetric, - ) -> Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]: - def inner( - output: Dict[str, Any], metric_params: Optional[Dict[str, Any]], metric: UpTrainMetric # noqa: ARG001 - ) -> List[MetricResult]: - return [OutputConverters._extract_default_results(output, str(metric))] - - return partial(inner, metric=metric) - - @staticmethod - def critique_language( - output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[MetricResult]: - out = [] - for expected_key in ("fluency", "coherence", "grammar", "politeness"): - out.append(OutputConverters._extract_default_results(output, expected_key)) - return out - - @staticmethod - def critique_tone( - output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[MetricResult]: - return [OutputConverters._extract_default_results(output, "tone")] - - @staticmethod - def guideline_adherence(output: Dict[str, Any], metric_params: Optional[Dict[str, Any]]) -> List[MetricResult]: - assert metric_params is not None - return [OutputConverters._extract_default_results(output, f'{metric_params["guideline_name"]}_adherence')] - - @staticmethod - def response_matching( - output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 - ) -> List[MetricResult]: - metric_str = "response_match" - out = [OutputConverters._extract_default_results(output, metric_str)] - - # Enumerate other relevant keys. - score_key = f"score_{metric_str}" - for k, v in output.items(): - if k != score_key and metric_str in k and isinstance(v, float): - out.append(MetricResult(name=k, score=v)) - return out - - -METRIC_DESCRIPTORS = { - UpTrainMetric.CONTEXT_RELEVANCE: MetricDescriptor.new( - UpTrainMetric.CONTEXT_RELEVANCE, Evals.CONTEXT_RELEVANCE, InputConverters.question_context # type: ignore - ), - UpTrainMetric.FACTUAL_ACCURACY: MetricDescriptor.new( - UpTrainMetric.FACTUAL_ACCURACY, Evals.FACTUAL_ACCURACY, InputConverters.question_context_response # type: ignore - ), - UpTrainMetric.RESPONSE_RELEVANCE: MetricDescriptor.new( - UpTrainMetric.RESPONSE_RELEVANCE, Evals.RESPONSE_RELEVANCE, InputConverters.question_response # type: ignore - ), - UpTrainMetric.RESPONSE_COMPLETENESS: MetricDescriptor.new( - UpTrainMetric.RESPONSE_COMPLETENESS, Evals.RESPONSE_COMPLETENESS, InputConverters.question_response # type: ignore - ), - UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: MetricDescriptor.new( - UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, - Evals.RESPONSE_COMPLETENESS_WRT_CONTEXT, - InputConverters.question_context_response, # type: ignore - ), - UpTrainMetric.RESPONSE_CONSISTENCY: MetricDescriptor.new( - UpTrainMetric.RESPONSE_CONSISTENCY, Evals.RESPONSE_CONSISTENCY, InputConverters.question_context_response # type: ignore - ), - UpTrainMetric.RESPONSE_CONCISENESS: MetricDescriptor.new( - UpTrainMetric.RESPONSE_CONCISENESS, Evals.RESPONSE_CONCISENESS, InputConverters.question_response # type: ignore - ), - UpTrainMetric.CRITIQUE_LANGUAGE: MetricDescriptor.new( - UpTrainMetric.CRITIQUE_LANGUAGE, - Evals.CRITIQUE_LANGUAGE, - InputConverters.response, - OutputConverters.critique_language, - ), - UpTrainMetric.CRITIQUE_TONE: MetricDescriptor.new( - UpTrainMetric.CRITIQUE_TONE, - CritiqueTone, - InputConverters.response, - OutputConverters.critique_tone, - init_parameters={"llm_persona": str}, - ), - UpTrainMetric.GUIDELINE_ADHERENCE: MetricDescriptor.new( - UpTrainMetric.GUIDELINE_ADHERENCE, - GuidelineAdherence, - InputConverters.question_response, # type: ignore - OutputConverters.guideline_adherence, - init_parameters={"guideline": str, "guideline_name": str, "response_schema": Optional[str]}, # type: ignore - ), - UpTrainMetric.RESPONSE_MATCHING: MetricDescriptor.new( - UpTrainMetric.RESPONSE_MATCHING, - ResponseMatching, - InputConverters.response_ground_truth, # type: ignore - OutputConverters.response_matching, - init_parameters={"method": Optional[str]}, # type: ignore - ), -} diff --git a/integrations/uptrain/tests/__init__.py b/integrations/uptrain/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py deleted file mode 100644 index d7566c795..000000000 --- a/integrations/uptrain/tests/test_evaluator.py +++ /dev/null @@ -1,400 +0,0 @@ -import copy -import os -from dataclasses import dataclass -from typing import List -from unittest.mock import patch - -import pytest -from haystack import DeserializationError - -from haystack_integrations.components.evaluators.uptrain import UpTrainEvaluator, UpTrainMetric -from haystack.utils import Secret - -DEFAULT_QUESTIONS = [ - "Which is the most popular global sport?", - "Who created the Python language?", -] -DEFAULT_CONTEXTS = [ - [ - "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact.", - "Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", - ], - [ - "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects." - ], -] -DEFAULT_RESPONSES = [ - "Football is the most popular sport with around 4 billion followers worldwide", - "Python language was created by Guido van Rossum.", -] - - -@dataclass(frozen=True) -class Unserializable: - something: str - - -# Only returns results for the passed metrics. -class MockBackend: - def __init__(self, metric_outputs: List[UpTrainMetric]) -> None: - self.metrics = metric_outputs - if not self.metrics: - self.metrics = [e for e in UpTrainMetric] - - def log_and_evaluate(self, data, checks, **kwargs): - output_map = { - UpTrainMetric.CONTEXT_RELEVANCE: { - "score_context_relevance": 0.5, - "explanation_context_relevance": "1", - }, - UpTrainMetric.FACTUAL_ACCURACY: { - "score_factual_accuracy": 1.0, - "explanation_factual_accuracy": "2", - }, - UpTrainMetric.RESPONSE_RELEVANCE: { - "score_response_relevance": 1.0, - "explanation_response_relevance": "3", - }, - UpTrainMetric.RESPONSE_COMPLETENESS: { - "score_response_completeness": 0.5, - "explanation_response_completeness": "4", - }, - UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: { - "score_response_completeness_wrt_context": 1.0, - "explanation_response_completeness_wrt_context": "5", - }, - UpTrainMetric.RESPONSE_CONSISTENCY: { - "score_response_consistency": 0.9, - "explanation_response_consistency": "6", - }, - UpTrainMetric.RESPONSE_CONCISENESS: { - "score_response_conciseness": 1.0, - "explanation_response_conciseness": "7", - }, - UpTrainMetric.CRITIQUE_LANGUAGE: { - "score_fluency": 1.0, - "score_coherence": 1.0, - "score_grammar": 1.0, - "score_politeness": 1.0, - "explanation_fluency": "8", - "explanation_coherence": "9", - "explanation_grammar": "10", - "explanation_politeness": "11", - }, - UpTrainMetric.CRITIQUE_TONE: { - "score_tone": 0.4, - "explanation_tone": "12", - }, - UpTrainMetric.GUIDELINE_ADHERENCE: { - "score_guideline_adherence": 1.0, - "explanation_guideline_adherence": "13", - }, - UpTrainMetric.RESPONSE_MATCHING: { - "response_match_precision": 1.0, - "response_match_recall": 0.6666666666666666, - "score_response_match": 0.7272727272727273, - }, - } - - data = copy.deepcopy(data) - for x in data: - for m in self.metrics: - x.update(output_map[m]) - return data - - -def test_evaluator_api(monkeypatch): - monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") - monkeypatch.setenv("UPTRAIN_API_KEY", "test-api-key") - - eval = UpTrainEvaluator(UpTrainMetric.RESPONSE_COMPLETENESS) - assert eval.api == "openai" - assert eval.api_key == Secret.from_env_var("OPENAI_API_KEY") - - eval = UpTrainEvaluator( - UpTrainMetric.RESPONSE_COMPLETENESS, - api="uptrain", - api_key=Secret.from_env_var("UPTRAIN_API_KEY"), - api_params={"project_name": "test"}, - ) - assert eval.api == "uptrain" - assert eval.api_key == Secret.from_env_var("UPTRAIN_API_KEY") - assert eval.api_params == {"project_name": "test"} - - with pytest.raises(ValueError, match="Unsupported API"): - UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere") - - with pytest.raises(ValueError, match="None of the following authentication environment variables are set"): - UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain", api_key=Secret.from_env_var("asd39920qqq")) - - with pytest.raises(ValueError, match="does not support additional parameters"): - UpTrainEvaluator( - UpTrainMetric.CONTEXT_RELEVANCE, - api_params={"project_name": "test"}, - api="openai", - ) - - with pytest.raises(ValueError, match="requires .* API parameter"): - UpTrainEvaluator( - UpTrainMetric.CONTEXT_RELEVANCE, - api_params=None, - api="uptrain", - ) - - -def test_evaluator_metric_init_params(): - eval = UpTrainEvaluator( - UpTrainMetric.CRITIQUE_TONE, - metric_params={"llm_persona": "village idiot"}, - api_key=Secret.from_token("Aaa"), - ) - assert eval._backend_metric.llm_persona == "village idiot" - - with pytest.raises(ValueError, match="Invalid init parameters"): - UpTrainEvaluator( - UpTrainMetric.CRITIQUE_TONE, metric_params={"role": "village idiot"}, api_key=Secret.from_token("Aaa") - ) - - with pytest.raises(ValueError, match="unexpected init parameters"): - UpTrainEvaluator( - UpTrainMetric.FACTUAL_ACCURACY, metric_params={"check_numbers": True}, api_key=Secret.from_token("Aaa") - ) - - with pytest.raises(ValueError, match="expected init parameters"): - UpTrainEvaluator(UpTrainMetric.RESPONSE_MATCHING, api_key=Secret.from_token("Aaa")) - - -@patch("os.environ.get") -def test_evaluator_serde(os_environ_get): - os_environ_get.return_value = "abacab" - - init_params = { - "metric": UpTrainMetric.RESPONSE_MATCHING, - "metric_params": {"method": "rouge"}, - "api": "uptrain", - "api_key": Secret.from_env_var("ENV_VAR", strict=False), - "api_params": {"project_name": "test"}, - } - eval = UpTrainEvaluator(**init_params) - serde_data = eval.to_dict() - new_eval = UpTrainEvaluator.from_dict(serde_data) - - assert eval.metric == new_eval.metric - assert eval.api == new_eval.api - assert eval.api_key == new_eval.api_key - assert eval.metric_params == new_eval.metric_params - assert eval.api_params == new_eval.api_params - assert type(new_eval._backend_client) == type(eval._backend_client) - assert type(new_eval._backend_metric) == type(eval._backend_metric) - - with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"): - init_params3 = copy.deepcopy(init_params) - init_params3["api_params"] = {"arg": Unserializable(""), "project_name": "test"} - eval = UpTrainEvaluator(**init_params3) - eval.to_dict() - - -@pytest.mark.parametrize( - "metric, inputs, params", - [ - (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": [], "contexts": []}, None), - (UpTrainMetric.FACTUAL_ACCURACY, {"questions": [], "contexts": [], "responses": []}, None), - (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [], "responses": []}, None), - (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": [], "responses": []}, None), - (UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, {"questions": [], "contexts": [], "responses": []}, None), - (UpTrainMetric.RESPONSE_CONSISTENCY, {"questions": [], "contexts": [], "responses": []}, None), - (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": [], "responses": []}, None), - (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": []}, None), - (UpTrainMetric.CRITIQUE_TONE, {"responses": []}, {"llm_persona": "idiot"}), - ( - UpTrainMetric.GUIDELINE_ADHERENCE, - {"questions": [], "responses": []}, - {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, - ), - (UpTrainMetric.RESPONSE_MATCHING, {"ground_truths": [], "responses": []}, {"method": "llm"}), - ], -) -def test_evaluator_valid_inputs(metric, inputs, params): - init_params = { - "metric": metric, - "metric_params": params, - "api_key": Secret.from_token("Aaa"), - "api_params": None, - } - eval = UpTrainEvaluator(**init_params) - eval._backend_client = MockBackend([metric]) - output = eval.run(**inputs) - - -@pytest.mark.parametrize( - "metric, inputs, error_string, params", - [ - (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": {}, "contexts": []}, "to be a collection of type 'list'", None), - ( - UpTrainMetric.FACTUAL_ACCURACY, - {"questions": [1], "contexts": [2], "responses": [3]}, - "expects inputs to be of type 'str'", - None, - ), - (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [""], "responses": []}, "Mismatching counts ", None), - (UpTrainMetric.RESPONSE_RELEVANCE, {"responses": []}, "expected input parameter ", None), - ], -) -def test_evaluator_invalid_inputs(metric, inputs, error_string, params): - with pytest.raises(ValueError, match=error_string): - init_params = { - "metric": metric, - "metric_params": params, - "api_key": Secret.from_token("Aaa"), - "api_params": None, - } - eval = UpTrainEvaluator(**init_params) - eval._backend_client = MockBackend([metric]) - output = eval.run(**inputs) - - -# This test validates the expected outputs of the evaluator. -# Each output is parameterized as a list of tuples, where each tuple is -# (name, score, explanation). The name and explanation are optional. If -# the name is None, then the metric name is used. -@pytest.mark.parametrize( - "metric, inputs, expected_outputs, metric_params", - [ - (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": ["q1"], "contexts": [["c1"]]}, [[(None, 0.5, "1")]], None), - ( - UpTrainMetric.FACTUAL_ACCURACY, - {"questions": ["q2"], "contexts": [["c2"]], "responses": ["r2"]}, - [[(None, 1.0, "2")]], - None, - ), - (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": ["q3"], "responses": ["r3"]}, [[(None, 1.0, "3")]], None), - (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": ["q4"], "responses": ["r4"]}, [[(None, 0.5, "4")]], None), - ( - UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, - {"questions": ["q5"], "contexts": [["c5"]], "responses": ["r5"]}, - [[(None, 1.0, "5")]], - None, - ), - ( - UpTrainMetric.RESPONSE_CONSISTENCY, - {"questions": ["q6"], "contexts": [["c6"]], "responses": ["r6"]}, - [[(None, 0.9, "6")]], - None, - ), - (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": ["q7"], "responses": ["r7"]}, [[(None, 1.0, "7")]], None), - ( - UpTrainMetric.CRITIQUE_LANGUAGE, - {"responses": ["r8"]}, - [ - [ - ("fluency", 1.0, "8"), - ("coherence", 1.0, "9"), - ("grammar", 1.0, "10"), - ("politeness", 1.0, "11"), - ] - ], - None, - ), - (UpTrainMetric.CRITIQUE_TONE, {"responses": ["r9"]}, [[("tone", 0.4, "12")]], {"llm_persona": "idiot"}), - ( - UpTrainMetric.GUIDELINE_ADHERENCE, - {"questions": ["q10"], "responses": ["r10"]}, - [[(None, 1.0, "13")]], - {"guideline": "Do nothing", "guideline_name": "guideline", "response_schema": None}, - ), - ( - UpTrainMetric.RESPONSE_MATCHING, - {"ground_truths": ["g11"], "responses": ["r11"]}, - [ - [ - ("response_match_precision", 1.0, None), - ("response_match_recall", 0.6666666666666666, None), - ("response_match", 0.7272727272727273, None), - ] - ], - {"method": "llm"}, - ), - ], -) -def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params): - init_params = { - "metric": metric, - "metric_params": metric_params, - "api_key": Secret.from_token("Aaa"), - "api_params": None, - } - eval = UpTrainEvaluator(**init_params) - eval._backend_client = MockBackend([metric]) - results = eval.run(**inputs)["results"] - - assert type(results) == type(expected_outputs) - assert len(results) == len(expected_outputs) - - for r, o in zip(results, expected_outputs): - assert len(r) == len(o) - - expected = {(name if name is not None else str(metric), score, exp) for name, score, exp in o} - got = {(x["name"], x["score"], x["explanation"]) for x in r} - assert got == expected - - -# This integration test validates the evaluator by running it against the -# OpenAI API. It is parameterized by the metric, the inputs to the evalutor -# and the metric parameters. -@pytest.mark.integration -@pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") -@pytest.mark.parametrize( - "metric, inputs, metric_params", - [ - (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS}, None), - ( - UpTrainMetric.FACTUAL_ACCURACY, - {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, - None, - ), - (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), - (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), - ( - UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, - {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, - None, - ), - ( - UpTrainMetric.RESPONSE_CONSISTENCY, - {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, - None, - ), - (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), - (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": DEFAULT_RESPONSES}, None), - (UpTrainMetric.CRITIQUE_TONE, {"responses": DEFAULT_RESPONSES}, {"llm_persona": "idiot"}), - ( - UpTrainMetric.GUIDELINE_ADHERENCE, - {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, - {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, - ), - ( - UpTrainMetric.RESPONSE_MATCHING, - { - "ground_truths": [ - "Consumerism is the most popular sport in the world", - "Python language was created by some dude.", - ], - "responses": DEFAULT_RESPONSES, - }, - {"method": "llm"}, - ), - ], -) -def test_integration_run(metric, inputs, metric_params): - init_params = { - "metric": metric, - "metric_params": metric_params, - "api": "openai", - } - eval = UpTrainEvaluator(**init_params) - output = eval.run(**inputs) - - assert type(output) == dict - assert len(output) == 1 - assert "results" in output - assert len(output["results"]) == len(next(iter(inputs.values()))) diff --git a/integrations/uptrain/tests/test_metrics.py b/integrations/uptrain/tests/test_metrics.py deleted file mode 100644 index 805c82fbf..000000000 --- a/integrations/uptrain/tests/test_metrics.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest - -from haystack_integrations.components.evaluators.uptrain import UpTrainMetric - - -def test_uptrain_metric(): - for e in UpTrainMetric: - assert e == UpTrainMetric.from_str(e.value) - - with pytest.raises(ValueError, match="Unknown UpTrain metric"): - UpTrainMetric.from_str("smugness")