Skip to content

Commit

Permalink
Add telemery for all services (opea-project#59)
Browse files Browse the repository at this point in the history
* add telemery for all services

Signed-off-by: Spycsh <[email protected]>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Signed-off-by: Spycsh <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Spycsh and pre-commit-ci[bot] authored May 16, 2024
1 parent a7c5ebd commit 9ddc8ec
Show file tree
Hide file tree
Showing 9 changed files with 28 additions and 9 deletions.
3 changes: 2 additions & 1 deletion comps/dataprep/langchain/qdrant/prepare_doc_qdrant.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceEmbeddings, HuggingFaceHubEmbeddings
from langchain_community.vectorstores import Qdrant

from comps import DocPath, opea_microservices, register_microservice
from comps import DocPath, opea_microservices, opea_telemetry, register_microservice
from comps.dataprep.langchain.utils import docment_loader

tei_embedding_endpoint = os.getenv("TEI_ENDPOINT")
Expand All @@ -33,6 +33,7 @@
input_datatype=DocPath,
output_datatype=None,
)
@opea_telemetry
def ingest_documents(doc_path: DocPath):
"""Ingest document to Qdrant."""
doc_path = doc_path.path
Expand Down
3 changes: 2 additions & 1 deletion comps/dataprep/langchain/redis/prepare_doc_redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceEmbeddings, HuggingFaceHubEmbeddings
from langchain_community.vectorstores import Redis

from comps import DocPath, opea_microservices, register_microservice
from comps import DocPath, opea_microservices, opea_telemetry, register_microservice
from comps.dataprep.langchain.utils import docment_loader

tei_embedding_endpoint = os.getenv("TEI_ENDPOINT")
Expand All @@ -33,6 +33,7 @@
input_datatype=DocPath,
output_datatype=None,
)
@opea_telemetry
def ingest_documents(doc_path: DocPath):
"""Ingest document to Redis."""
doc_path = doc_path.path
Expand Down
3 changes: 2 additions & 1 deletion comps/embeddings/langchain/embedding_tei_gaudi.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from langchain_community.embeddings import HuggingFaceHubEmbeddings

from comps import EmbedDoc768, ServiceType, TextDoc, opea_microservices, register_microservice
from comps import EmbedDoc768, ServiceType, TextDoc, opea_microservices, opea_telemetry, register_microservice


@register_microservice(
Expand All @@ -28,6 +28,7 @@
input_datatype=TextDoc,
output_datatype=EmbedDoc768,
)
@opea_telemetry
def embedding(input: TextDoc) -> EmbedDoc768:
embed_vector = embeddings.embed_query(input.text)
embed_vector = embed_vector[:768] # Keep only the first 768 elements
Expand Down
3 changes: 2 additions & 1 deletion comps/embeddings/langchain/local_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from langchain_community.embeddings import HuggingFaceBgeEmbeddings

from comps import EmbedDoc1024, ServiceType, TextDoc, opea_microservices, register_microservice
from comps import EmbedDoc1024, ServiceType, TextDoc, opea_microservices, opea_telemetry, register_microservice


@register_microservice(
Expand All @@ -26,6 +26,7 @@
input_datatype=TextDoc,
output_datatype=EmbedDoc1024,
)
@opea_telemetry
def embedding(input: TextDoc) -> EmbedDoc1024:
embeddings = HuggingFaceBgeEmbeddings(model_name="BAAI/bge-large-en-v1.5")
embed_vector = embeddings.embed_query(input.text)
Expand Down
4 changes: 3 additions & 1 deletion comps/guardrails/langchain/guardrails_tgi_gaudi.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from langchain_community.llms import HuggingFaceEndpoint

from comps import ServiceType, TextDoc, opea_microservices, register_microservice
from comps import ServiceType, TextDoc, opea_microservices, opea_telemetry, register_microservice

unsafe_categories = """O1: Violence and Hate.
Should not
Expand Down Expand Up @@ -75,6 +75,7 @@
}


@opea_telemetry
def moderation_prompt_for_chat(chat):
# For simplicity, we assume the chat dictionary correctly alternates "user" and "assistant" roles
# Feel free to include error checking if needed
Expand Down Expand Up @@ -112,6 +113,7 @@ def moderation_prompt_for_chat(chat):
input_datatype=TextDoc,
output_datatype=TextDoc,
)
@opea_telemetry
def safety_guard(input: TextDoc) -> TextDoc:
# prompt guardrails
response_input_guard = llm_guard(moderation_prompt_for_chat([{"role": "User", "content": input.text}]))
Expand Down
4 changes: 3 additions & 1 deletion comps/llms/langchain/llm_tgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@
from fastapi.responses import StreamingResponse
from langchain_community.llms import HuggingFaceEndpoint

from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice
from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, opea_telemetry, register_microservice


@opea_telemetry
def post_process_text(text: str):
if text == " ":
return "data: @#$\n\n"
Expand All @@ -38,6 +39,7 @@ def post_process_text(text: str):
host="0.0.0.0",
port=9000,
)
@opea_telemetry
def llm_generate(input: LLMParamsDoc):
llm_endpoint = os.getenv("TGI_LLM_ENDPOINT", "http://localhost:8080")
llm = HuggingFaceEndpoint(
Expand Down
3 changes: 2 additions & 1 deletion comps/reranks/langchain/local_reranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from sentence_transformers import CrossEncoder

from comps import RerankedDoc, SearchedDoc, ServiceType, opea_microservices, register_microservice
from comps import RerankedDoc, SearchedDoc, ServiceType, opea_microservices, opea_telemetry, register_microservice


@register_microservice(
Expand All @@ -26,6 +26,7 @@
input_datatype=SearchedDoc,
output_datatype=RerankedDoc,
)
@opea_telemetry
def reranking(input: SearchedDoc) -> RerankedDoc:
query_and_docs = [(input.initial_query, doc.text) for doc in input.retrieved_docs]
scores = reranker_model.predict(query_and_docs)
Expand Down
3 changes: 2 additions & 1 deletion comps/reranks/langchain/reranking_tei_xeon.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import requests
from langchain_core.prompts import ChatPromptTemplate

from comps import LLMParamsDoc, SearchedDoc, ServiceType, opea_microservices, register_microservice
from comps import LLMParamsDoc, SearchedDoc, ServiceType, opea_microservices, opea_telemetry, register_microservice


@register_microservice(
Expand All @@ -31,6 +31,7 @@
input_datatype=SearchedDoc,
output_datatype=LLMParamsDoc,
)
@opea_telemetry
def reranking(input: SearchedDoc) -> LLMParamsDoc:
docs = [doc.text for doc in input.retrieved_docs]
url = tei_reranking_endpoint + "/rerank"
Expand Down
11 changes: 10 additions & 1 deletion comps/retrievers/langchain/retriever_redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,15 @@
from langchain_community.vectorstores import Redis
from redis_config import INDEX_NAME, INDEX_SCHEMA, REDIS_URL

from comps import EmbedDoc768, SearchedDoc, ServiceType, TextDoc, opea_microservices, register_microservice
from comps import (
EmbedDoc768,
SearchedDoc,
ServiceType,
TextDoc,
opea_microservices,
opea_telemetry,
register_microservice,
)


@register_microservice(
Expand All @@ -27,6 +35,7 @@
host="0.0.0.0",
port=7000,
)
@opea_telemetry
def retrieve(input: EmbedDoc768) -> SearchedDoc:
embeddings = HuggingFaceBgeEmbeddings(model_name="BAAI/bge-base-en-v1.5")
vector_db = Redis.from_existing_index(
Expand Down

0 comments on commit 9ddc8ec

Please sign in to comment.