Skip to content

Commit

Permalink
Removed uneeded references to LangChain
Browse files Browse the repository at this point in the history
  • Loading branch information
alexthomas93 committed Oct 16, 2024
1 parent 06d9889 commit 4a0a220
Show file tree
Hide file tree
Showing 11 changed files with 47 additions and 29 deletions.
2 changes: 1 addition & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ While the library has more retrievers than shown here, the following examples sh
.. code:: python
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.retrievers import VectorRetriever
from langchain_openai import OpenAIEmbeddings
URI = "neo4j://localhost:7687"
AUTH = ("neo4j", "password")
Expand Down
5 changes: 2 additions & 3 deletions docs/source/user_guide_rag.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ To perform a GraphRAG query using the `neo4j-graphrag` package, a few components

1. A Neo4j driver: used to query your Neo4j database.
2. A Retriever: the `neo4j-graphrag` package provides some implementations (see the :ref:`dedicated section <retriever-configuration>`) and lets you write your own if none of the provided implementations matches your needs (see :ref:`how to write a custom retriever <custom-retriever>`).
3. An LLM: to generate the answer, we need to call an LLM model. The neo4j-graphrag package currently only provides implementation for the OpenAI LLMs, but its interface is compatible with LangChain and let developers write their own interface if needed.
3. An LLM: to generate the answer, we need to call an LLM model. The neo4j-graphrag package's LLM interface is compatible with LangChain. Developers can also write their own interface if needed.

In practice, it's done with only a few lines of code:

Expand Down Expand Up @@ -223,8 +223,7 @@ Its interface is compatible with our `GraphRAG` interface, facilitating integrat
print(response.answer)
It is however not mandatory to use LangChain. The alternative is to implement
a custom model.
It is however not mandatory to use LangChain.

Using a Custom Model
--------------------
Expand Down
6 changes: 3 additions & 3 deletions examples/graphrag_with_langchain_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
import logging

import neo4j
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.generation import GraphRAG
from neo4j_graphrag.llm import OpenAILLM
from neo4j_graphrag.retrievers import VectorCypherRetriever
from neo4j_graphrag.types import RetrieverResultItem

Expand Down Expand Up @@ -48,7 +48,7 @@ def formatter(record: neo4j.Record) -> RetrieverResultItem:
embedder=embedder, # type: ignore
)

llm = ChatOpenAI(model_name="gpt-4o", temperature=0) # type: ignore
llm = OpenAILLM(model_name="gpt-4o", model_params={"temperature": 0})

rag = GraphRAG(retriever=retriever, llm=llm)

Expand Down
4 changes: 2 additions & 2 deletions examples/openai_search.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from random import random

from langchain_openai import OpenAIEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.openai import OpenAIEmbeddings
from neo4j_graphrag.indexes import create_vector_index
from neo4j_graphrag.retrievers import VectorRetriever

Expand All @@ -19,7 +19,7 @@
embedder = OpenAIEmbeddings(model="text-embedding-3-large")

# Initialize the retriever
retriever = VectorRetriever(driver, INDEX_NAME, embedder) # type: ignore
retriever = VectorRetriever(driver, INDEX_NAME, embedder)

# Creating the index
create_vector_index(
Expand Down
9 changes: 5 additions & 4 deletions examples/pinecone/text_search.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import PineconeNeo4jRetriever
from pinecone import Pinecone

Expand All @@ -11,14 +13,13 @@
def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
pc_client = Pinecone(PC_API_KEY)
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")

embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = PineconeNeo4jRetriever(
driver=neo4j_driver,
client=pc_client,
index_name="jeopardy",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
8 changes: 5 additions & 3 deletions examples/qdrant/text_search.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import QdrantNeo4jRetriever
from qdrant_client import QdrantClient

Expand All @@ -9,14 +11,14 @@

def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = QdrantNeo4jRetriever(
driver=neo4j_driver,
client=QdrantClient(url="http://localhost:6333"),
collection_name="Jeopardy",
id_property_external="neo4j_id",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
8 changes: 5 additions & 3 deletions examples/weaviate/text_search_local_embedder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import GraphDatabase
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import WeaviateNeo4jRetriever
from weaviate.connect.helpers import connect_to_local

Expand All @@ -10,14 +12,14 @@
def main() -> None:
with GraphDatabase.driver(NEO4J_URL, auth=NEO4J_AUTH) as neo4j_driver:
with connect_to_local() as w_client:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
retriever = WeaviateNeo4jRetriever(
driver=neo4j_driver,
client=w_client,
collection="Jeopardy",
id_property_external="neo4j_id",
id_property_neo4j="id",
embedder=embedder, # type: ignore
embedder=embedder,
)

res = retriever.search(query_text="biology", top_k=2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ class LlamaIndexTextSplitterAdapter(TextSplitter):
.. code-block:: python
from llama_index.core.node_parser.text.sentence import SentenceSplitter
from neo4j_graphrag.experimental.components.text_splitters.langchain import LangChainTextSplitterAdapter
from neo4j_graphrag.experimental.components.text_splitters.llamaindex import (
LlamaIndexTextSplitterAdapter,
)
from neo4j_graphrag.experimental.pipeline import Pipeline
pipeline = Pipeline()
Expand Down
10 changes: 7 additions & 3 deletions tests/e2e/pinecone_e2e/test_pinecone_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@
from unittest.mock import MagicMock

import pytest
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j_graphrag.embeddings.base import Embedder
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import PineconeNeo4jRetriever
from neo4j_graphrag.types import RetrieverResult, RetrieverResultItem
from pinecone import Pinecone
Expand All @@ -28,8 +30,10 @@


@pytest.fixture(scope="module")
def sentence_transformer_embedder() -> Generator[HuggingFaceEmbeddings, Any, Any]:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
def sentence_transformer_embedder() -> (
Generator[SentenceTransformerEmbeddings, Any, Any]
):
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
yield embedder


Expand Down
10 changes: 7 additions & 3 deletions tests/e2e/qdrant_e2e/test_qdrant_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@
from typing import Any, Generator

import pytest
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import Driver
from neo4j_graphrag.embeddings.base import Embedder
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import QdrantNeo4jRetriever
from neo4j_graphrag.types import RetrieverResult, RetrieverResultItem
from qdrant_client import QdrantClient
Expand All @@ -29,8 +31,10 @@


@pytest.fixture(scope="module")
def sentence_transformer_embedder() -> Generator[HuggingFaceEmbeddings, Any, Any]:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
def sentence_transformer_embedder() -> (
Generator[SentenceTransformerEmbeddings, Any, Any]
):
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
yield embedder


Expand Down
10 changes: 7 additions & 3 deletions tests/e2e/weaviate_e2e/test_weaviate_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@
from typing import Any, Generator

import pytest
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from neo4j import Driver
from neo4j_graphrag.embeddings.base import Embedder
from neo4j_graphrag.embeddings.sentence_transformers import (
SentenceTransformerEmbeddings,
)
from neo4j_graphrag.retrievers import WeaviateNeo4jRetriever
from neo4j_graphrag.types import RetrieverResult, RetrieverResultItem
from weaviate.client import WeaviateClient
Expand All @@ -30,8 +32,10 @@


@pytest.fixture(scope="module")
def sentence_transformer_embedder() -> Generator[HuggingFaceEmbeddings, Any, Any]:
embedder = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
def sentence_transformer_embedder() -> (
Generator[SentenceTransformerEmbeddings, Any, Any]
):
embedder = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
yield embedder


Expand Down

0 comments on commit 4a0a220

Please sign in to comment.