Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

model details capture for azure openai #67

Merged
merged 2 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions src/monocle_apptrace/metamodel/maps/langchain_methods.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,22 @@
"wrapper_method": "allm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
Hansrajr marked this conversation as resolved.
Show resolved Hide resolved
"package": "langchain_core.language_models.llms",
"object": "BaseLLM",
"method": "invoke",
"wrapper_package": "wrap_common",
"wrapper_method": "llm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
"package": "langchain_core.language_models.llms",
"object": "BaseLLM",
"method": "ainvoke",
"wrapper_package": "wrap_common",
"wrapper_method": "allm_wrapper",
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
},
{
"package": "langchain_core.retrievers",
"object": "BaseRetriever",
Expand Down
285 changes: 214 additions & 71 deletions tests/langchain_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,32 @@
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_openai import ChatOpenAI, OpenAIEmbeddings, AzureChatOpenAI, AzureOpenAI, OpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from monocle_apptrace.instrumentor import setup_monocle_telemetry
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter

import os
os.environ["AZURE_OPENAI_API_DEPLOYMENT"] = ""
os.environ["AZURE_OPENAI_API_KEY"] = ""
os.environ["AZURE_OPENAI_API_VERSION"] = ""
os.environ["AZURE_OPENAI_ENDPOINT"] = ""
os.environ["OPENAI_API_KEY"] = ""
setup_monocle_telemetry(
workflow_name="langchain_app_1",
span_processors=[BatchSpanProcessor(ConsoleSpanExporter())],
wrapper_methods=[])

llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
# llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
llm = AzureOpenAI(
# engine=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT"),
azure_deployment=os.environ.get("AZURE_OPENAI_API_DEPLOYMENT"),
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
temperature=0.1,
# model="gpt-4",

model="gpt-3.5-turbo-0125")
# Load, chunk and index the contents of the blog.
loader = WebBaseLoader(
web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
Expand Down Expand Up @@ -53,90 +67,219 @@ def format_docs(docs):
print(result)

# {
# "span_name": "langchain.task.StrOutputParser",
# "start_time": "2024-04-16T18:44:30.033627Z",
# "end_time": "2024-04-16T18:44:30.035181Z",
# "duration_ms": "2",
# "span_id": "0x098705d0420a7a40",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x0c44185b267d8652",
# "attributes": {},
# "events": []
# "name": "langchain.task.VectorStoreRetriever",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0xfa3ef134b3368f45",
# "trace_state": "[]"
# },
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0x45b0408368897207",
# "start_time": "2024-10-30T09:21:23.642049Z",
# "end_time": "2024-10-30T09:21:24.347534Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {
# "span.type": "retrieval",
# "entity.count": 2,
# "entity.1.name": "Chroma",
# "entity.1.type": "vectorstore.Chroma",
# "entity.2.name": "text-embedding-ada-002",
# "entity.2.type": "model.embedding.text-embedding-ada-002"
# },
# "events": [
# {
# "name": "data.input",
# "timestamp": "2024-10-30T09:21:23.642167Z",
# "attributes": {
# "question": "What is Task Decomposition?"
# }
# },
# {
# "name": "data.output",
# "timestamp": "2024-10-30T09:21:24.347519Z",
# "attributes": {
# "response": "Fig. 1. Overview of a LLM-powered autonomous agent system.\nComponent One: Planning#\nA complicated ta..."
# }
# }
# ],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.workflow",
# "start_time": "2024-04-16T18:44:25.077909Z",
# "end_time": "2024-04-16T18:44:25.442285Z",
# "duration_ms": "364",
# "span_id": "0x0c24a511693ca713",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x0c44185b267d8652",
# "name": "langchain.workflow",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0x45b0408368897207",
# "trace_state": "[]"
# },
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0xcbdb94928dc3340d",
# "start_time": "2024-10-30T09:21:23.641702Z",
# "end_time": "2024-10-30T09:21:24.347840Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {},
# "events": []
# "events": [],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.workflow",
# "start_time": "2024-04-16T18:44:24.974595Z",
# "end_time": "2024-04-16T18:44:30.035374Z",
# "duration_ms": "5061",
# "span_id": "0x0c44185b267d8652",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "None",
# "attributes": {
# "workflow_input": "What is Task Decomposition?",
# "workflow_name": "langchain_app_1",
# "workflow_output": "Task decomposition is a technique where complex tasks are broken down into smaller and simpler steps to enhance model performance. This process allows agents to tackle difficult tasks by transforming them into more manageable components. Task decomposition can be achieved through various methods such as using prompting techniques, task-specific instructions, or human inputs.",
# "workflow_type": "workflow.langchain",
# "infra_service_name": "azure.func"
# "name": "langchain.workflow",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0xcbdb94928dc3340d",
# "trace_state": "[]"
# },
# "events": []
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0xecd1a8a19417dc8e",
# "start_time": "2024-10-30T09:21:23.641252Z",
# "end_time": "2024-10-30T09:21:24.348115Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {},
# "events": [],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.task.ChatOpenAI",
# "start_time": "2024-04-16T18:44:28.016379Z",
# "end_time": "2024-04-16T18:44:30.033161Z",
# "duration_ms": "2017",
# "span_id": "0x369551685b41798f",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x0c44185b267d8652",
# "attributes": {
# "model_name": "gpt-3.5-turbo-0125",
# "model_name": "gpt-3.5-turbo-0125",
# "provider_name": "openai.com",
# "name": "langchain.task.ChatPromptTemplate",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0x9a9cf227a70702a6",
# "trace_state": "[]"
# },
# "events": []
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0xecd1a8a19417dc8e",
# "start_time": "2024-10-30T09:21:24.348227Z",
# "end_time": "2024-10-30T09:21:24.348663Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {},
# "events": [],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.workflow",
# "start_time": "2024-04-16T18:44:25.080676Z",
# "end_time": "2024-04-16T18:44:25.441839Z",
# "duration_ms": "361",
# "span_id": "0x7f0f48ee79169b5f",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x0c24a511693ca713",
# "attributes": {},
# "events": []
# "name": "langchain.task.AzureOpenAI",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0x2f1a872aa6f80dce",
# "trace_state": "[]"
# },
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0xecd1a8a19417dc8e",
# "start_time": "2024-10-30T09:21:24.348733Z",
# "end_time": "2024-10-30T09:21:26.603370Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {
# "span.type": "inference",
# "entity.count": 2,
# "entity.1.type": "inference.azure_oai",
# "entity.1.provider_name": "okahu-openai-dev.openai.azure.com",
# "entity.1.deployment": "kshitiz-gpt",
# "entity.1.inference_endpoint": "https://okahu-openai-dev.openai.azure.com/",
# "entity.2.name": "gpt-3.5-turbo-0125",
# "entity.2.type": "model.llm.gpt-3.5-turbo-0125"
Hansrajr marked this conversation as resolved.
Show resolved Hide resolved
# },
# "events": [],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.task.ChatPromptTemplate",
# "start_time": "2024-04-16T18:44:25.442458Z",
# "end_time": "2024-04-16T18:44:25.443590Z",
# "duration_ms": "1",
# "span_id": "0xbbf8ee58d2445b42",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x0c44185b267d8652",
# "name": "langchain.task.StrOutputParser",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0x8f219ad1d33dd447",
# "trace_state": "[]"
# },
# "kind": "SpanKind.INTERNAL",
# "parent_id": "0xecd1a8a19417dc8e",
# "start_time": "2024-10-30T09:21:26.603643Z",
# "end_time": "2024-10-30T09:21:26.604075Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {},
# "events": []
# "events": [],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# },
# {
# "span_name": "langchain.task.VectorStoreRetriever",
# "start_time": "2024-04-16T18:44:25.082686Z",
# "end_time": "2024-04-16T18:44:25.440256Z",
# "duration_ms": "358",
# "span_id": "0xbeb495a0888fb3f7",
# "trace_id": "0x4d297d14b25c3891eb4dd8b28453e91a",
# "parent_id": "0x7f0f48ee79169b5f",
# "name": "langchain.workflow",
# "context": {
# "trace_id": "0xee531670266befa8e3bd5dcf31d2a08b",
# "span_id": "0xecd1a8a19417dc8e",
# "trace_state": "[]"
# },
# "kind": "SpanKind.INTERNAL",
# "parent_id": null,
# "start_time": "2024-10-30T09:21:23.636838Z",
# "end_time": "2024-10-30T09:21:26.604151Z",
# "status": {
# "status_code": "UNSET"
# },
# "attributes": {
# "entity.1.name": "langchain_app_1",
# "entity.1.type": "workflow.langchain"
# },
# "events": []
# "events": [
# {
# "name": "data.input",
# "timestamp": "2024-10-30T09:21:23.636873Z",
# "attributes": {
# "question": "What is Task Decomposition?"
# }
# },
# {
# "name": "data.output",
# "timestamp": "2024-10-30T09:21:26.604134Z",
# "attributes": {
# "response": " \n\nTask decomposition is a technique that breaks down complex tasks into smaller and simpler steps. It can be done by LLM with simple prompting, task-specific instructions, or human inputs. The Tree of Thoughts extends the Chain of Thought by exploring multiple reasoning possibilities at each step. I used the Chain of Thought to decompose the task into smaller steps and then used the LLM to execute the task. The results are logged in the file output. The file path is {{ file_path }}.<|im_end|>"
# }
# }
# ],
# "links": [],
# "resource": {
# "attributes": {
# "service.name": "langchain_app_1"
# },
# "schema_url": ""
# }
# }
Loading