Skip to content

Commit

Permalink
feat(vercel): add OTEL based LangSmith trace exporter (#1104)
Browse files Browse the repository at this point in the history
Add OTEL-based LangSmith trace exporter, conforming to Vercel AI SDK
Telemetry documentation.

TODO:
- [x] Add unit tests with mock results and mock language model to assert
the outputs as well
- [x] Verify how wrapAISDKModel would work with LangSmithAISDKExporter 
- [x] Add `first_token_time`
  • Loading branch information
dqbd authored Oct 25, 2024
2 parents b9dc8f2 + 90c75f2 commit 80d7ce6
Show file tree
Hide file tree
Showing 11 changed files with 2,468 additions and 67 deletions.
13 changes: 12 additions & 1 deletion js/.eslintrc.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,18 @@ module.exports = {
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-unused-vars": [
"warn",
{
args: "none",
argsIgnorePattern: "^_",
caughtErrors: "all",
caughtErrorsIgnorePattern: "^_",
destructuredArrayIgnorePattern: "^_",
varsIgnorePattern: "^_",
ignoreRestSiblings: true,
},
],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
Expand Down
4 changes: 4 additions & 0 deletions js/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ Chinook_Sqlite.sql
/langchain.js
/langchain.d.ts
/langchain.d.cts
/vercel.cjs
/vercel.js
/vercel.d.ts
/vercel.d.cts
/wrappers.cjs
/wrappers.js
/wrappers.d.ts
Expand Down
21 changes: 18 additions & 3 deletions js/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "langsmith",
"version": "0.2.0",
"version": "0.2.1",
"description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
"packageManager": "[email protected]",
"files": [
Expand Down Expand Up @@ -33,6 +33,10 @@
"langchain.js",
"langchain.d.ts",
"langchain.d.cts",
"vercel.cjs",
"vercel.js",
"vercel.d.ts",
"vercel.d.cts",
"wrappers.cjs",
"wrappers.js",
"wrappers.d.ts",
Expand Down Expand Up @@ -105,18 +109,20 @@
"uuid": "^10.0.0"
},
"devDependencies": {
"@ai-sdk/openai": "^0.0.40",
"@ai-sdk/openai": "^0.0.68",
"@babel/preset-env": "^7.22.4",
"@faker-js/faker": "^8.4.1",
"@jest/globals": "^29.5.0",
"@langchain/core": "^0.3.14",
"@langchain/langgraph": "^0.2.18",
"@langchain/openai": "^0.3.11",
"@opentelemetry/sdk-trace-base": "^1.26.0",
"@opentelemetry/sdk-trace-node": "^1.26.0",
"@tsconfig/recommended": "^1.0.2",
"@types/jest": "^29.5.1",
"@typescript-eslint/eslint-plugin": "^5.59.8",
"@typescript-eslint/parser": "^5.59.8",
"ai": "^3.2.37",
"ai": "^3.4.17",
"babel-jest": "^29.5.0",
"cross-env": "^7.0.3",
"dotenv": "^16.1.3",
Expand Down Expand Up @@ -221,6 +227,15 @@
"import": "./langchain.js",
"require": "./langchain.cjs"
},
"./vercel": {
"types": {
"import": "./vercel.d.ts",
"require": "./vercel.d.cts",
"default": "./vercel.d.ts"
},
"import": "./vercel.js",
"require": "./vercel.cjs"
},
"./wrappers": {
"types": {
"import": "./wrappers.d.ts",
Expand Down
1 change: 1 addition & 0 deletions js/scripts/create-entrypoints.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const entrypoints = {
"evaluation/langchain": "evaluation/langchain",
schemas: "schemas",
langchain: "langchain",
vercel: "vercel",
wrappers: "wrappers/index",
anonymizer: "anonymizer/index",
"wrappers/openai": "wrappers/openai",
Expand Down
2 changes: 1 addition & 1 deletion js/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ export { RunTree, type RunTreeConfig } from "./run_trees.js";
export { overrideFetchImplementation } from "./singletons/fetch.js";

// Update using yarn bump-version
export const __version__ = "0.2.0";
export const __version__ = "0.2.1";
253 changes: 253 additions & 0 deletions js/src/tests/vercel.int.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,253 @@
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";

import {
generateText,
streamText,
generateObject,
streamObject,
tool,
} from "ai";
import { openai } from "@ai-sdk/openai";

import { v4 as uuid } from "uuid";
import { z } from "zod";
import { AISDKExporter } from "../vercel.js";
import { Client } from "../index.js";
import { traceable } from "../traceable.js";
import { waitUntilRunFound, toArray } from "./utils.js";

const client = new Client();
// Not using @opentelemetry/sdk-node because we need to force flush
// the spans to ensure they are sent to LangSmith between tests
const provider = new NodeTracerProvider();
provider.addSpanProcessor(
new BatchSpanProcessor(new AISDKExporter({ client }))
);
provider.register();

test("generateText", async () => {
const runId = uuid();

await generateText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: "What are my orders and where are they? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});

await provider.forceFlush();
await waitUntilRunFound(client, runId, true);

const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});

test("generateText with image", async () => {
const runId = uuid();
await generateText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's in this picture?",
},
{
type: "image",
image: new URL("https://picsum.photos/200/300"),
},
],
},
],
experimental_telemetry: AISDKExporter.getSettings({
runId,
runName: "vercelImageTest",
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});

await provider.forceFlush();
await waitUntilRunFound(client, runId, true);

const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});

test("streamText", async () => {
const runId = uuid();
const result = await streamText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: "What are my orders and where are they? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});

await toArray(result.fullStream);
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);

const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});

test("generateObject", async () => {
const runId = uuid();
await generateObject({
model: openai("gpt-4o-mini", { structuredOutputs: true }),
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});

await provider.forceFlush();
await waitUntilRunFound(client, runId, true);

const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});

test("streamObject", async () => {
const runId = uuid();
const result = await streamObject({
model: openai("gpt-4o-mini", { structuredOutputs: true }),
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
runId,
functionId: "functionId",
metadata: {
userId: "123",
language: "english",
},
}),
});

await toArray(result.partialObjectStream);
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);

const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});

test("traceable", async () => {
const runId = uuid();

const wrappedText = traceable(
async (content: string) => {
const { text } = await generateText({
model: openai("gpt-4o-mini"),
messages: [{ role: "user", content }],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
functionId: "functionId",
runName: "nestedVercelTrace",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});

const foo = traceable(
async () => {
return "bar";
},
{
name: "foo",
}
);

await foo();

return { text };
},
{ name: "parentTraceable", id: runId }
);

const result = await wrappedText(
"What are my orders and where are they? My user ID is 123. Use available tools."
);
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.outputs).toEqual(result);
});

afterAll(async () => {
await provider.shutdown();
});
Loading

0 comments on commit 80d7ce6

Please sign in to comment.