Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

model details capture for azure openai #67

Merged
merged 2 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions src/monocle_apptrace/metamodel/maps/langchain_methods.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,22 @@
"wrapper_method": "allm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
Hansrajr marked this conversation as resolved.
Show resolved Hide resolved
"package": "langchain_core.language_models.llms",
"object": "BaseLLM",
"method": "invoke",
"wrapper_package": "wrap_common",
"wrapper_method": "llm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
"package": "langchain_core.language_models.llms",
"object": "BaseLLM",
"method": "ainvoke",
"wrapper_package": "wrap_common",
"wrapper_method": "allm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
"package": "langchain_core.retrievers",
"object": "BaseRetriever",
Expand Down
69 changes: 42 additions & 27 deletions tests/langchain_custom_output_processor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@

import unittest
from unittest.mock import ANY, MagicMock, patch

from urllib.parse import urlparse
import pytest
import requests
from dummy_class import DummyClass
from langchain_openai import AzureOpenAI
from embeddings_wrapper import HuggingFaceEmbeddings
from http_span_exporter import HttpSpanExporter
from langchain.prompts import PromptTemplate
Expand Down Expand Up @@ -78,7 +79,7 @@ class TestHandler(unittest.TestCase):
def __format_docs(self, docs):
return "\n\n ".join(doc.page_content for doc in docs)

def __createChain(self):
def __createChain(self, llm_type):

resource = Resource(attributes={
SERVICE_NAME: "coffee_rag_fake"
Expand All @@ -93,8 +94,20 @@ def __createChain(self):
self.instrumentor.instrument()
self.processor = monocleProcessor
responses =[self.ragText]
llm = FakeListLLM(responses=responses)
llm.api_base = "https://example.com/"
if llm_type == "FakeListLLM":
responses = [self.ragText]
llm = FakeListLLM(responses=responses)
llm.api_base = "https://example.com/"
else:
llm = AzureOpenAI(
azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT"),
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
temperature=0.1,
model="gpt-3.5-turbo-0125"
)
llm.azure_endpoint = "https://example.com/"
embeddings = HuggingFaceEmbeddings(model_id = "multi-qa-mpnet-base-dot-v1")
my_path = os.path.abspath(os.path.dirname(__file__))
model_path = os.path.join(my_path, "./vector_data/coffee_embeddings")
Expand All @@ -111,6 +124,10 @@ def __createChain(self):
return rag_chain

def setUp(self):
os.environ["AZURE_OPENAI_API_DEPLOYMENT"] = "AZURE_OPENAI_API_DEPLOYMENT"
os.environ["AZURE_OPENAI_API_KEY"] = "AZURE_OPENAI_API_KEY"
os.environ["AZURE_OPENAI_ENDPOINT"] = "AZURE_OPENAI_ENDPOINT"
os.environ["AZURE_OPENAI_API_VERSION"] = "2024-02-01"
os.environ["HTTP_API_KEY"] = "key1"
os.environ["HTTP_INGESTION_ENDPOINT"] = "https://localhost:3000/api/v1/traces"

Expand All @@ -119,13 +136,11 @@ def tearDown(self) -> None:
return super().tearDown()

@parameterized.expand([
("1", AZURE_ML_ENDPOINT_ENV_NAME, AZURE_ML_SERVICE_NAME),
("2", AZURE_FUNCTION_WORKER_ENV_NAME, AZURE_FUNCTION_NAME),
("3", AZURE_APP_SERVICE_ENV_NAME, AZURE_APP_SERVICE_NAME),
("4", AWS_LAMBDA_ENV_NAME, AWS_LAMBDA_SERVICE_NAME),
("AzureOpenAI", AZURE_ML_ENDPOINT_ENV_NAME, "AzureOpenAI"),
("FakeListLLM", AZURE_ML_ENDPOINT_ENV_NAME, "FakeListLLM"),
])
@patch.object(requests.Session, 'post')
def test_llm_chain(self, test_name, test_input_infra, test_output_infra, mock_post):
def test_llm_chain(self, test_name, test_input_infra, llm_type, mock_post):
app_name = "test"
wrap_method = MagicMock(return_value=3)
setup_monocle_telemetry(
Expand All @@ -134,14 +149,6 @@ def test_llm_chain(self, test_name, test_input_infra, test_output_infra, mock_po
BatchSpanProcessor(HttpSpanExporter("https://localhost:3000/api/v1/traces"))
],
wrapper_methods=[
WrapperMethod(
package="langchain_core.retrievers",
object_name="BaseRetriever",
method="invoke",
wrapper=task_wrapper,
output_processor=["entities.json"]
),

])
try:

Expand All @@ -150,13 +157,13 @@ def test_llm_chain(self, test_name, test_input_infra, test_output_infra, mock_po
context_value = "context_value_1"
set_context_properties({context_key: context_value})

self.chain = self.__createChain()
self.chain = self.__createChain(llm_type)
mock_post.return_value.status_code = 201
mock_post.return_value.json.return_value = 'mock response'

query = "what is latte"
response = self.chain.invoke(query, config={})
assert response == self.ragText
# assert response == self.ragText
time.sleep(5)
mock_post.assert_called_with(
url = 'https://localhost:3000/api/v1/traces',
Expand All @@ -168,14 +175,22 @@ def test_llm_chain(self, test_name, test_input_infra, test_output_infra, mock_po
This can be used to do more asserts'''
dataBodyStr = mock_post.call_args.kwargs['data']
dataJson = json.loads(dataBodyStr) # more asserts can be added on individual fields

llm_vector_store_retriever_span = [x for x in dataJson["batch"] if 'langchain.task.VectorStoreRetriever' in x["name"]][0]
inference_span = [x for x in dataJson["batch"] if 'langchain.task.FakeListLLM' in x["name"]][0]

assert llm_vector_store_retriever_span["attributes"]["span.type"] == "retrieval"
assert llm_vector_store_retriever_span["attributes"]["entity.1.name"] == "FAISS"
assert llm_vector_store_retriever_span["attributes"]["entity.1.type"] == "vectorstore.FAISS"
assert inference_span['attributes']["entity.1.inference_endpoint"] == "https://example.com/"
if llm_type == "FakeListLLM":
llm_vector_store_retriever_span = [x for x in dataJson["batch"] if 'langchain.task.VectorStoreRetriever' in x["name"]][0]
inference_span = [x for x in dataJson["batch"] if 'langchain.task.FakeListLLM' in x["name"]][0]

assert llm_vector_store_retriever_span["attributes"]["span.type"] == "retrieval"
assert llm_vector_store_retriever_span["attributes"]["entity.1.name"] == "FAISS"
assert llm_vector_store_retriever_span["attributes"]["entity.1.type"] == "vectorstore.FAISS"
assert inference_span['attributes']["entity.1.inference_endpoint"] == "https://example.com/"
else:
llm_azure_openai_span = [x for x in dataJson["batch"] if 'langchain.task.AzureOpenAI' in x["name"]][0]
assert llm_azure_openai_span["attributes"]["span.type"] == "inference"
assert llm_azure_openai_span["attributes"]["entity.1.type"] == "inference.azure_oai"
assert llm_azure_openai_span["attributes"]["entity.1.provider_name"] == urlparse(os.environ.get("AZURE_OPENAI_ENDPOINT")).hostname
assert llm_azure_openai_span["attributes"]["entity.1.deployment"] == os.environ.get("AZURE_OPENAI_API_DEPLOYMENT")
assert llm_azure_openai_span["attributes"]["entity.1.inference_endpoint"] == "https://example.com/"
assert llm_azure_openai_span["attributes"]["entity.2.type"] == "model.llm.gpt-3.5-turbo-0125"

finally:
os.environ.pop(test_input_infra)
Expand Down
Loading
Loading