diff --git a/js/.eslintrc.cjs b/js/.eslintrc.cjs index da4c3ecb4..f54109db0 100644 --- a/js/.eslintrc.cjs +++ b/js/.eslintrc.cjs @@ -30,7 +30,18 @@ module.exports = { "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], - "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], + "@typescript-eslint/no-unused-vars": [ + "warn", + { + args: "none", + argsIgnorePattern: "^_", + caughtErrors: "all", + caughtErrorsIgnorePattern: "^_", + destructuredArrayIgnorePattern: "^_", + varsIgnorePattern: "^_", + ignoreRestSiblings: true, + }, + ], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", camelcase: 0, diff --git a/js/.gitignore b/js/.gitignore index e758389d2..4b11d6959 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -59,6 +59,10 @@ Chinook_Sqlite.sql /langchain.js /langchain.d.ts /langchain.d.cts +/vercel.cjs +/vercel.js +/vercel.d.ts +/vercel.d.cts /wrappers.cjs /wrappers.js /wrappers.d.ts diff --git a/js/package.json b/js/package.json index 3d071f6f4..7e897da13 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.2.0", + "version": "0.2.1", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -33,6 +33,10 @@ "langchain.js", "langchain.d.ts", "langchain.d.cts", + "vercel.cjs", + "vercel.js", + "vercel.d.ts", + "vercel.d.cts", "wrappers.cjs", "wrappers.js", "wrappers.d.ts", @@ -105,18 +109,20 @@ "uuid": "^10.0.0" }, "devDependencies": { - "@ai-sdk/openai": "^0.0.40", + "@ai-sdk/openai": "^0.0.68", "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", "@langchain/core": "^0.3.14", "@langchain/langgraph": "^0.2.18", "@langchain/openai": "^0.3.11", + "@opentelemetry/sdk-trace-base": "^1.26.0", + "@opentelemetry/sdk-trace-node": "^1.26.0", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", - "ai": "^3.2.37", + "ai": "^3.4.17", "babel-jest": "^29.5.0", "cross-env": "^7.0.3", "dotenv": "^16.1.3", @@ -221,6 +227,15 @@ "import": "./langchain.js", "require": "./langchain.cjs" }, + "./vercel": { + "types": { + "import": "./vercel.d.ts", + "require": "./vercel.d.cts", + "default": "./vercel.d.ts" + }, + "import": "./vercel.js", + "require": "./vercel.cjs" + }, "./wrappers": { "types": { "import": "./wrappers.d.ts", diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index a3487f756..9cce2ab22 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -14,6 +14,7 @@ const entrypoints = { "evaluation/langchain": "evaluation/langchain", schemas: "schemas", langchain: "langchain", + vercel: "vercel", wrappers: "wrappers/index", anonymizer: "anonymizer/index", "wrappers/openai": "wrappers/openai", diff --git a/js/src/index.ts b/js/src/index.ts index 77f0939f1..78f45d4d6 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -14,4 +14,4 @@ export { RunTree, type RunTreeConfig } from "./run_trees.js"; export { overrideFetchImplementation } from "./singletons/fetch.js"; // Update using yarn bump-version -export const __version__ = "0.2.0"; +export const __version__ = "0.2.1"; diff --git a/js/src/tests/vercel.int.test.ts b/js/src/tests/vercel.int.test.ts new file mode 100644 index 000000000..968ec4bdd --- /dev/null +++ b/js/src/tests/vercel.int.test.ts @@ -0,0 +1,253 @@ +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base"; + +import { + generateText, + streamText, + generateObject, + streamObject, + tool, +} from "ai"; +import { openai } from "@ai-sdk/openai"; + +import { v4 as uuid } from "uuid"; +import { z } from "zod"; +import { AISDKExporter } from "../vercel.js"; +import { Client } from "../index.js"; +import { traceable } from "../traceable.js"; +import { waitUntilRunFound, toArray } from "./utils.js"; + +const client = new Client(); +// Not using @opentelemetry/sdk-node because we need to force flush +// the spans to ensure they are sent to LangSmith between tests +const provider = new NodeTracerProvider(); +provider.addSpanProcessor( + new BatchSpanProcessor(new AISDKExporter({ client })) +); +provider.register(); + +test("generateText", async () => { + const runId = uuid(); + + await generateText({ + model: openai("gpt-4o-mini"), + messages: [ + { + role: "user", + content: "What are my orders and where are they? My user ID is 123", + }, + ], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + viewTrackingInformation: tool({ + description: "view tracking information for a specific order", + parameters: z.object({ orderId: z.string() }), + execute: async ({ orderId }) => + `Here is the tracking information for ${orderId}`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + runId, + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + await provider.forceFlush(); + await waitUntilRunFound(client, runId, true); + + const storedRun = await client.readRun(runId); + expect(storedRun.id).toEqual(runId); +}); + +test("generateText with image", async () => { + const runId = uuid(); + await generateText({ + model: openai("gpt-4o-mini"), + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "What's in this picture?", + }, + { + type: "image", + image: new URL("https://picsum.photos/200/300"), + }, + ], + }, + ], + experimental_telemetry: AISDKExporter.getSettings({ + runId, + runName: "vercelImageTest", + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + }); + + await provider.forceFlush(); + await waitUntilRunFound(client, runId, true); + + const storedRun = await client.readRun(runId); + expect(storedRun.id).toEqual(runId); +}); + +test("streamText", async () => { + const runId = uuid(); + const result = await streamText({ + model: openai("gpt-4o-mini"), + messages: [ + { + role: "user", + content: "What are my orders and where are they? My user ID is 123", + }, + ], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + viewTrackingInformation: tool({ + description: "view tracking information for a specific order", + parameters: z.object({ orderId: z.string() }), + execute: async ({ orderId }) => + `Here is the tracking information for ${orderId}`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + runId, + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + await toArray(result.fullStream); + await provider.forceFlush(); + await waitUntilRunFound(client, runId, true); + + const storedRun = await client.readRun(runId); + expect(storedRun.id).toEqual(runId); +}); + +test("generateObject", async () => { + const runId = uuid(); + await generateObject({ + model: openai("gpt-4o-mini", { structuredOutputs: true }), + schema: z.object({ + weather: z.object({ + city: z.string(), + unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]), + }), + }), + prompt: "What's the weather in Prague?", + experimental_telemetry: AISDKExporter.getSettings({ + runId, + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + }); + + await provider.forceFlush(); + await waitUntilRunFound(client, runId, true); + + const storedRun = await client.readRun(runId); + expect(storedRun.id).toEqual(runId); +}); + +test("streamObject", async () => { + const runId = uuid(); + const result = await streamObject({ + model: openai("gpt-4o-mini", { structuredOutputs: true }), + schema: z.object({ + weather: z.object({ + city: z.string(), + unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]), + }), + }), + prompt: "What's the weather in Prague?", + experimental_telemetry: AISDKExporter.getSettings({ + runId, + functionId: "functionId", + metadata: { + userId: "123", + language: "english", + }, + }), + }); + + await toArray(result.partialObjectStream); + await provider.forceFlush(); + await waitUntilRunFound(client, runId, true); + + const storedRun = await client.readRun(runId); + expect(storedRun.id).toEqual(runId); +}); + +test("traceable", async () => { + const runId = uuid(); + + const wrappedText = traceable( + async (content: string) => { + const { text } = await generateText({ + model: openai("gpt-4o-mini"), + messages: [{ role: "user", content }], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + viewTrackingInformation: tool({ + description: "view tracking information for a specific order", + parameters: z.object({ orderId: z.string() }), + execute: async ({ orderId }) => + `Here is the tracking information for ${orderId}`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + functionId: "functionId", + runName: "nestedVercelTrace", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + const foo = traceable( + async () => { + return "bar"; + }, + { + name: "foo", + } + ); + + await foo(); + + return { text }; + }, + { name: "parentTraceable", id: runId } + ); + + const result = await wrappedText( + "What are my orders and where are they? My user ID is 123. Use available tools." + ); + await waitUntilRunFound(client, runId, true); + const storedRun = await client.readRun(runId); + expect(storedRun.outputs).toEqual(result); +}); + +afterAll(async () => { + await provider.shutdown(); +}); diff --git a/js/src/tests/vercel.test.ts b/js/src/tests/vercel.test.ts new file mode 100644 index 000000000..bc104b9bd --- /dev/null +++ b/js/src/tests/vercel.test.ts @@ -0,0 +1,937 @@ +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base"; + +import { + generateText, + streamText, + generateObject, + streamObject, + tool, + LanguageModelV1StreamPart, +} from "ai"; + +import { z } from "zod"; +import { AISDKExporter } from "../vercel.js"; +import { traceable } from "../traceable.js"; +import { toArray } from "./utils.js"; +import { mockClient } from "./utils/mock_client.js"; +import { convertArrayToReadableStream, MockLanguageModelV1 } from "ai/test"; +import { getAssumedTreeFromCalls } from "./utils/tree.js"; + +const { client, callSpy } = mockClient(); +const provider = new NodeTracerProvider(); +provider.addSpanProcessor( + new BatchSpanProcessor(new AISDKExporter({ client })) +); +provider.register(); + +class ExecutionOrderSame { + $$typeof = Symbol.for("jest.asymmetricMatcher"); + + private expectedNs: string; + private expectedDepth: number; + + constructor(depth: number, ns: string) { + this.expectedDepth = depth; + this.expectedNs = ns; + } + + asymmetricMatch(other: unknown) { + // eslint-disable-next-line no-instanceof/no-instanceof + if (!(typeof other === "string" || other instanceof String)) { + return false; + } + + const segments = other.split("."); + if (segments.length !== this.expectedDepth) return false; + + const last = segments.at(-1); + if (!last) return false; + + const nanoseconds = last.split("Z").at(0)?.slice(-3); + return nanoseconds === this.expectedNs; + } + + toString() { + return "ExecutionOrderSame"; + } + + getExpectedType() { + return "string"; + } + + toAsymmetricMatcher() { + return `ExecutionOrderSame<${this.expectedDepth}, ${this.expectedNs}>`; + } +} + +class MockMultiStepLanguageModelV1 extends MockLanguageModelV1 { + generateStep = -1; + streamStep = -1; + + constructor(...args: ConstructorParameters) { + super(...args); + + const oldDoGenerate = this.doGenerate; + this.doGenerate = async (...args) => { + this.generateStep += 1; + return await oldDoGenerate(...args); + }; + + const oldDoStream = this.doStream; + this.doStream = async (...args) => { + this.streamStep += 1; + return await oldDoStream(...args); + }; + } +} + +beforeEach(() => callSpy.mockClear()); +afterAll(async () => await provider.shutdown()); + +test("generateText", async () => { + const model = new MockMultiStepLanguageModelV1({ + doGenerate: async () => { + if (model.generateStep === 0) { + return { + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + toolCalls: [ + { + toolCallType: "function", + toolName: "listOrders", + toolCallId: "tool-id", + args: JSON.stringify({ userId: "123" }), + }, + ], + }; + } + + return { + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + text: `Hello, world!`, + }; + }, + }); + + await generateText({ + model, + messages: [ + { + role: "user", + content: "What are my orders? My user ID is 123", + }, + ], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + runName: "generateText", + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + await provider.forceFlush(); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "generateText:0", + "mock-provider:1", + "listOrders:2", + "mock-provider:3", + ], + edges: [ + ["generateText:0", "mock-provider:1"], + ["generateText:0", "listOrders:2"], + ["generateText:0", "mock-provider:3"], + ], + data: { + "generateText:0": { + name: "generateText", + inputs: { + messages: [ + { + type: "human", + data: { content: "What are my orders? My user ID is 123" }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + extra: { + metadata: { + functionId: "functionId", + userId: "123", + language: "english", + }, + }, + dotted_order: new ExecutionOrderSame(1, "000"), + }, + "mock-provider:1": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "000"), + }, + "listOrders:2": { + inputs: { userId: "123" }, + outputs: { output: "User 123 has the following orders: 1" }, + dotted_order: new ExecutionOrderSame(2, "001"), + }, + "mock-provider:3": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + }, + { + type: "tool", + data: { + content: '"User 123 has the following orders: 1"', + name: "listOrders", + tool_call_id: "tool-id", + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "002"), + }, + }, + }); +}); + +test("streamText", async () => { + const model = new MockMultiStepLanguageModelV1({ + doStream: async () => { + if (model.streamStep === 0) { + return { + stream: convertArrayToReadableStream([ + { + type: "tool-call", + toolCallType: "function", + toolName: "listOrders", + toolCallId: "tool-id", + args: JSON.stringify({ userId: "123" }), + }, + { + type: "finish", + finishReason: "stop", + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ] satisfies LanguageModelV1StreamPart[]), + rawCall: { rawPrompt: null, rawSettings: {} }, + }; + } + + return { + stream: convertArrayToReadableStream([ + { type: "text-delta", textDelta: "Hello" }, + { type: "text-delta", textDelta: ", " }, + { type: "text-delta", textDelta: `world!` }, + { + type: "finish", + finishReason: "stop", + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: null, rawSettings: {} }, + }; + }, + }); + + const result = await streamText({ + model, + messages: [ + { + role: "user", + content: "What are my orders? My user ID is 123", + }, + ], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + await toArray(result.fullStream); + await provider.forceFlush(); + + const actual = getAssumedTreeFromCalls(callSpy.mock.calls); + expect(actual).toMatchObject({ + nodes: [ + "mock-provider:0", + "mock-provider:1", + "listOrders:2", + "mock-provider:3", + ], + edges: [ + ["mock-provider:0", "mock-provider:1"], + ["mock-provider:0", "listOrders:2"], + ["mock-provider:0", "mock-provider:3"], + ], + data: { + "mock-provider:0": { + inputs: { + messages: [ + { + type: "human", + data: { content: "What are my orders? My user ID is 123" }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 20, prompt_tokens: 6 }, + }, + }, + extra: { + metadata: { + functionId: "functionId", + userId: "123", + language: "english", + }, + }, + dotted_order: new ExecutionOrderSame(1, "000"), + }, + "mock-provider:1": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + token_usage: { completion_tokens: 10, prompt_tokens: 3 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "000"), + }, + "listOrders:2": { + inputs: { userId: "123" }, + outputs: { output: "User 123 has the following orders: 1" }, + dotted_order: new ExecutionOrderSame(2, "001"), + }, + "mock-provider:3": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + }, + { + type: "tool", + data: { + content: '"User 123 has the following orders: 1"', + name: "listOrders", + tool_call_id: "tool-id", + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 10, prompt_tokens: 3 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "002"), + }, + }, + }); +}); + +test("generateObject", async () => { + const model = new MockMultiStepLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + toolCalls: [ + { + toolCallType: "function", + toolName: "json", + toolCallId: "tool-id", + args: JSON.stringify({ + weather: { city: "Prague", unit: "celsius" }, + }), + }, + ], + }), + defaultObjectGenerationMode: "tool", + }); + + await generateObject({ + model, + schema: z.object({ + weather: z.object({ + city: z.string(), + unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]), + }), + }), + prompt: "What's the weather in Prague?", + experimental_telemetry: AISDKExporter.getSettings({ + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + }); + + await provider.forceFlush(); + const actual = getAssumedTreeFromCalls(callSpy.mock.calls); + + expect(actual).toMatchObject({ + nodes: ["mock-provider:0", "mock-provider:1"], + edges: [["mock-provider:0", "mock-provider:1"]], + data: { + "mock-provider:0": { + inputs: { + input: { prompt: "What's the weather in Prague?" }, + }, + outputs: { + output: { weather: { city: "Prague", unit: "celsius" } }, + llm_output: { + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(1, "000"), + }, + "mock-provider:1": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { type: "text", text: "What's the weather in Prague?" }, + ], + }, + }, + ], + }, + outputs: { + output: { weather: { city: "Prague", unit: "celsius" } }, + llm_output: { + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + extra: { + metadata: { + functionId: "functionId", + userId: "123", + language: "english", + }, + }, + dotted_order: new ExecutionOrderSame(2, "000"), + }, + }, + }); +}); + +test("streamObject", async () => { + const model = new MockMultiStepLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + toolCalls: [ + { + toolCallType: "function", + toolName: "json", + toolCallId: "tool-id", + args: JSON.stringify({ + weather: { city: "Prague", unit: "celsius" }, + }), + }, + ], + }), + + doStream: async () => { + return { + stream: convertArrayToReadableStream([ + { + type: "tool-call-delta", + toolCallType: "function", + toolName: "json", + toolCallId: "tool-id", + argsTextDelta: JSON.stringify({ + weather: { city: "Prague", unit: "celsius" }, + }), + }, + { + type: "finish", + finishReason: "stop", + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ] satisfies LanguageModelV1StreamPart[]), + rawCall: { rawPrompt: null, rawSettings: {} }, + }; + }, + defaultObjectGenerationMode: "tool", + }); + + const result = await streamObject({ + model, + schema: z.object({ + weather: z.object({ + city: z.string(), + unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]), + }), + }), + prompt: "What's the weather in Prague?", + experimental_telemetry: AISDKExporter.getSettings({ + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + }); + + await toArray(result.partialObjectStream); + await provider.forceFlush(); + + const actual = getAssumedTreeFromCalls(callSpy.mock.calls); + expect(actual).toMatchObject({ + nodes: ["mock-provider:0", "mock-provider:1"], + edges: [["mock-provider:0", "mock-provider:1"]], + data: { + "mock-provider:0": { + inputs: { + input: { prompt: "What's the weather in Prague?" }, + }, + outputs: { + output: { weather: { city: "Prague", unit: "celsius" } }, + llm_output: { + token_usage: { completion_tokens: 10, prompt_tokens: 3 }, + }, + }, + extra: { + metadata: { + functionId: "functionId", + userId: "123", + language: "english", + }, + }, + dotted_order: new ExecutionOrderSame(1, "000"), + }, + "mock-provider:1": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { type: "text", text: "What's the weather in Prague?" }, + ], + }, + }, + ], + }, + outputs: { + output: { weather: { city: "Prague", unit: "celsius" } }, + llm_output: { + token_usage: { completion_tokens: 10, prompt_tokens: 3 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "000"), + }, + }, + }); +}); + +test("traceable", async () => { + const model = new MockMultiStepLanguageModelV1({ + doGenerate: async () => { + if (model.generateStep === 0) { + return { + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + toolCalls: [ + { + toolCallType: "function", + toolName: "listOrders", + toolCallId: "tool-id", + args: JSON.stringify({ userId: "123" }), + }, + ], + }; + } + + return { + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 20 }, + text: `Hello, world!`, + }; + }, + }); + + const wrappedText = traceable( + async (content: string) => { + const { text } = await generateText({ + model, + messages: [{ role: "user", content }], + tools: { + listOrders: tool({ + description: "list all orders", + parameters: z.object({ userId: z.string() }), + execute: async ({ userId }) => + `User ${userId} has the following orders: 1`, + }), + }, + experimental_telemetry: AISDKExporter.getSettings({ + runName: "generateText", + functionId: "functionId", + metadata: { userId: "123", language: "english" }, + }), + maxSteps: 10, + }); + + return { text }; + }, + { name: "wrappedText", client, tracingEnabled: true } + ); + + await wrappedText("What are my orders? My user ID is 123"); + await provider.forceFlush(); + + const actual = getAssumedTreeFromCalls(callSpy.mock.calls); + expect(actual).toMatchObject({ + nodes: [ + "wrappedText:0", + "generateText:1", + "mock-provider:2", + "listOrders:3", + "mock-provider:4", + ], + edges: [ + ["wrappedText:0", "generateText:1"], + ["generateText:1", "mock-provider:2"], + ["generateText:1", "listOrders:3"], + ["generateText:1", "mock-provider:4"], + ], + data: { + "wrappedText:0": { + inputs: { + input: "What are my orders? My user ID is 123", + }, + outputs: { + text: "Hello, world!", + }, + dotted_order: new ExecutionOrderSame(1, "001"), + }, + "generateText:1": { + name: "generateText", + extra: { + metadata: { + functionId: "functionId", + userId: "123", + language: "english", + }, + }, + inputs: { + messages: [ + { + type: "human", + data: { content: "What are my orders? My user ID is 123" }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(2, "000"), + }, + "mock-provider:2": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(3, "000"), + }, + "listOrders:3": { + inputs: { userId: "123" }, + outputs: { output: "User 123 has the following orders: 1" }, + dotted_order: new ExecutionOrderSame(3, "001"), + }, + "mock-provider:4": { + inputs: { + messages: [ + { + type: "human", + data: { + content: [ + { + type: "text", + text: "What are my orders? My user ID is 123", + }, + ], + }, + }, + { + type: "ai", + data: { + content: [ + { + type: "tool_use", + name: "listOrders", + id: "tool-id", + input: { userId: "123" }, + }, + ], + additional_kwargs: { + tool_calls: [ + { + id: "tool-id", + type: "function", + function: { + name: "listOrders", + id: "tool-id", + arguments: '{"userId":"123"}', + }, + }, + ], + }, + }, + }, + { + type: "tool", + data: { + content: '"User 123 has the following orders: 1"', + name: "listOrders", + tool_call_id: "tool-id", + }, + }, + ], + }, + outputs: { + llm_output: { + type: "ai", + data: { content: "Hello, world!" }, + token_usage: { completion_tokens: 20, prompt_tokens: 10 }, + }, + }, + dotted_order: new ExecutionOrderSame(3, "002"), + }, + }, + }); +}); diff --git a/js/src/vercel.ts b/js/src/vercel.ts new file mode 100644 index 000000000..2d703ba97 --- /dev/null +++ b/js/src/vercel.ts @@ -0,0 +1,884 @@ +import type { + CoreAssistantMessage, + CoreMessage, + ToolCallPart, + generateText, +} from "ai"; +import type { AISDKSpan } from "./vercel.types.js"; +import { Client, RunTree } from "./index.js"; +import { KVMap, RunCreate } from "./schemas.js"; +import { v5 as uuid5, v4 as uuid4 } from "uuid"; +import { getCurrentRunTree } from "./singletons/traceable.js"; +import { getLangSmithEnvironmentVariable } from "./utils/env.js"; + +// eslint-disable-next-line @typescript-eslint/ban-types +type AnyString = string & {}; + +export type AITelemetrySettings = Exclude< + Parameters[0]["experimental_telemetry"], + undefined +>; + +export interface TelemetrySettings extends AITelemetrySettings { + /** ID of the run sent to LangSmith */ + runId?: string; + /** Name of the run sent to LangSmith */ + runName?: string; +} + +type LangChainMessageFields = { + content: + | string + | Array< + // eslint-disable-next-line @typescript-eslint/no-explicit-any + Record & { type?: "text" | "image_url" | AnyString } + >; + name?: string; + id?: string; + additional_kwargs?: { + tool_calls?: { + id: string; + function: { arguments: string; name: string }; + type: "function"; + index?: number; + }[]; + [key: string]: unknown; + }; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + response_metadata?: Record; +}; +type LangChainLikeMessage = { type: string; data: LangChainMessageFields }; + +// Attempt to convert CoreMessage to a LangChain-compatible format +// which allows us to render messages more nicely in LangSmith +function convertCoreToSmith( + message: CoreMessage +): + | LangChainLikeMessage + | CoreMessage + | Array { + if (message.role === "assistant") { + const data: LangChainMessageFields = { content: message.content }; + + if (Array.isArray(message.content)) { + data.content = message.content.map((part) => { + if (part.type === "text") { + return { + type: "text", + text: part.text, + ...part.experimental_providerMetadata, + }; + } + + if (part.type === "tool-call") { + return { + type: "tool_use", + name: part.toolName, + id: part.toolCallId, + input: part.args, + ...part.experimental_providerMetadata, + }; + } + + return part; + }); + + const toolCalls = message.content.filter( + (part): part is ToolCallPart => part.type === "tool-call" + ); + + if (toolCalls.length > 0) { + data.additional_kwargs ??= {}; + data.additional_kwargs.tool_calls = toolCalls.map((part) => { + return { + id: part.toolCallId, + type: "function", + function: { + name: part.toolName, + id: part.toolCallId, + arguments: JSON.stringify(part.args), + }, + }; + }); + } + } + + return { type: "ai", data }; + } + + if (message.role === "user") { + const data: LangChainMessageFields = { content: message.content }; + + if (Array.isArray(message.content)) { + data.content = message.content.map((part) => { + if (part.type === "text") { + return { + type: "text", + text: part.text, + ...part.experimental_providerMetadata, + }; + } + + if (part.type === "image") { + return { + type: "image_url", + image_url: part.image, + ...part.experimental_providerMetadata, + }; + } + + return part; + }); + } + + return { type: "human", data }; + } + + if (message.role === "system") { + return { type: "system", data: { content: message.content } }; + } + + if (message.role === "tool") { + const res = message.content.map((toolCall) => { + return { + type: "tool", + data: { + content: JSON.stringify(toolCall.result), + name: toolCall.toolName, + tool_call_id: toolCall.toolCallId, + }, + }; + }); + if (res.length === 1) return res[0]; + return res; + } + + return message; +} + +const tryJson = ( + str: + | string + | number + | boolean + | Array + | Array + | Array + | undefined +) => { + try { + if (!str) return str; + if (typeof str !== "string") return str; + return JSON.parse(str); + } catch { + return str; + } +}; + +function stripNonAlphanumeric(input: string) { + return input.replace(/[-:.]/g, ""); +} + +function convertToDottedOrderFormat( + [seconds, nanoseconds]: [seconds: number, nanoseconds: number], + runId: string, + executionOrder: number +) { + // Date only has millisecond precision, so we use the microseconds to break + // possible ties, avoiding incorrect run order + const ms = Number(String(nanoseconds).slice(0, 3)); + const ns = String(Number(String(nanoseconds).slice(3, 6)) + executionOrder) + .padStart(3, "0") + .slice(0, 3); + + return ( + stripNonAlphanumeric( + `${new Date(seconds * 1000 + ms).toISOString().slice(0, -1)}${ns}Z` + ) + runId + ); +} + +function convertToTimestamp([seconds, nanoseconds]: [ + seconds: number, + nanoseconds: number +]) { + const ms = String(nanoseconds).slice(0, 3); + return Number(String(seconds) + ms); +} + +function sortByHr( + a: [seconds: number, nanoseconds: number], + b: [seconds: number, nanoseconds: number] +): number { + if (a[0] !== b[0]) return Math.sign(a[0] - b[0]); + return Math.sign(a[1] - b[1]); +} + +const ROOT = "$"; +const RUN_ID_NAMESPACE = "5c718b20-9078-11ef-9a3d-325096b39f47"; + +const RUN_ID_METADATA_KEY = { + input: "langsmith:runId", + output: "ai.telemetry.metadata.langsmith:runId", +}; + +const RUN_NAME_METADATA_KEY = { + input: "langsmith:runName", + output: "ai.telemetry.metadata.langsmith:runName", +}; + +const TRACE_METADATA_KEY = { + input: "langsmith:trace", + output: "ai.telemetry.metadata.langsmith:trace", +}; + +const BAGGAGE_METADATA_KEY = { + input: "langsmith:baggage", + output: "ai.telemetry.metadata.langsmith:baggage", +}; + +const RESERVED_METADATA_KEYS = [ + RUN_ID_METADATA_KEY.output, + RUN_NAME_METADATA_KEY.output, + TRACE_METADATA_KEY.output, + BAGGAGE_METADATA_KEY.output, +]; + +interface RunTask { + id: string; + parentId: string | undefined; + startTime: [seconds: number, nanoseconds: number]; + run: RunCreate; + sent: boolean; + executionOrder: number; +} + +type InteropType = + | { type: "traceable"; parentRunTree: RunTree } + | { type: "user"; userTraceId?: string } + | undefined; + +/** + * OpenTelemetry trace exporter for Vercel AI SDK. + * + * @example + * ```ts + * import { AISDKExporter } from "langsmith/vercel"; + * import { Client } from "langsmith"; + * + * import { generateText } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * import { NodeSDK } from "@opentelemetry/sdk-node"; + * import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node"; + * + * const client = new Client(); + * + * const sdk = new NodeSDK({ + * traceExporter: new AISDKExporter({ client }), + * instrumentations: [getNodeAutoInstrumentations()], + * }); + * + * sdk.start(); + * + * const res = await generateText({ + * model: openai("gpt-4o-mini"), + * messages: [ + * { + * role: "user", + * content: "What color is the sky?", + * }, + * ], + * experimental_telemetry: AISDKExporter.getSettings({ + * runName: "langsmith_traced_call", + * metadata: { userId: "123", language: "english" }, + * }), + * }); + * + * await sdk.shutdown(); + * ``` + */ +export class AISDKExporter { + private client: Client; + private traceByMap: Record< + string, + { + childMap: Record; + nodeMap: Record; + relativeExecutionOrder: Record; + interop?: InteropType; + } + > = {}; + + constructor(args?: { client?: Client }) { + this.client = args?.client ?? new Client(); + } + + static getSettings(settings?: TelemetrySettings) { + const { runId, runName, ...rest } = settings ?? {}; + const metadata = { ...rest?.metadata }; + if (runId != null) metadata[RUN_ID_METADATA_KEY.input] = runId; + if (runName != null) metadata[RUN_NAME_METADATA_KEY.input] = runName; + + // attempt to obtain the run tree if used within a traceable function + let defaultEnabled = true; + try { + const runTree = getCurrentRunTree(); + const headers = runTree.toHeaders(); + metadata[TRACE_METADATA_KEY.input] = headers["langsmith-trace"]; + metadata[BAGGAGE_METADATA_KEY.input] = headers["baggage"]; + + // honor the tracingEnabled flag if coming from traceable + if (runTree.tracingEnabled != null) { + defaultEnabled = runTree.tracingEnabled; + } + } catch { + // pass + } + + if ( + metadata[RUN_ID_METADATA_KEY.input] && + metadata[TRACE_METADATA_KEY.input] + ) { + throw new Error( + "Cannot provide `runId` when used within traceable function." + ); + } + + return { ...rest, isEnabled: rest.isEnabled ?? defaultEnabled, metadata }; + } + + /** @internal */ + protected getSpanAttributeKey = ( + span: AISDKSpan, + key: string + ): string | undefined => { + const attributes = span.attributes as Record; + + return key in attributes && typeof attributes[key] === "string" + ? (attributes[key] as string) + : undefined; + }; + + /** @internal */ + protected parseInteropFromMetadata(span: AISDKSpan): InteropType { + const userTraceId = this.getSpanAttributeKey( + span, + RUN_ID_METADATA_KEY.output + ); + const parentTrace = this.getSpanAttributeKey( + span, + TRACE_METADATA_KEY.output + ); + + if (parentTrace && userTraceId) { + throw new Error( + `Cannot provide both "${RUN_ID_METADATA_KEY.input}" and "${TRACE_METADATA_KEY.input}" metadata keys.` + ); + } + + if (parentTrace) { + const parentRunTree = RunTree.fromHeaders({ + "langsmith-trace": parentTrace, + baggage: + this.getSpanAttributeKey(span, BAGGAGE_METADATA_KEY.output) || "", + }); + + if (!parentRunTree) + throw new Error("Unreachable code: empty parent run tree"); + return { type: "traceable", parentRunTree }; + } + + if (userTraceId) return { type: "user", userTraceId }; + return undefined; + } + + /** @internal */ + protected getRunCreate(span: AISDKSpan): RunCreate | undefined { + const runId = uuid5(span.spanContext().spanId, RUN_ID_NAMESPACE); + const parentRunId = span.parentSpanId + ? uuid5(span.parentSpanId, RUN_ID_NAMESPACE) + : undefined; + + const asRunCreate = (rawConfig: RunCreate) => { + const aiMetadata = Object.keys(span.attributes) + .filter( + (key) => + key.startsWith("ai.telemetry.metadata.") && + !RESERVED_METADATA_KEYS.includes(key) + ) + .reduce((acc, key) => { + acc[key.slice("ai.telemetry.metadata.".length)] = + span.attributes[key as keyof typeof span.attributes]; + + return acc; + }, {} as Record); + + if ( + ("ai.telemetry.functionId" in span.attributes && + span.attributes["ai.telemetry.functionId"]) || + ("resource.name" in span.attributes && span.attributes["resource.name"]) + ) { + aiMetadata["functionId"] = + span.attributes["ai.telemetry.functionId"] || + span.attributes["resource.name"]; + } + + const parsedStart = convertToTimestamp(span.startTime); + const parsedEnd = convertToTimestamp(span.endTime); + + let name = rawConfig.name; + + // if user provided a custom name, only use it if it's the root + if (span.parentSpanId == null) { + name = + this.getSpanAttributeKey(span, RUN_NAME_METADATA_KEY.output) || name; + } + + const config: RunCreate = { + ...rawConfig, + name, + id: runId, + parent_run_id: parentRunId, + extra: { + ...rawConfig.extra, + metadata: { + ...rawConfig.extra?.metadata, + ...aiMetadata, + "ai.operationId": span.attributes["ai.operationId"], + }, + }, + session_name: + getLangSmithEnvironmentVariable("PROJECT") ?? + getLangSmithEnvironmentVariable("SESSION"), + start_time: Math.min(parsedStart, parsedEnd), + end_time: Math.max(parsedStart, parsedEnd), + }; + + return config; + }; + + switch (span.name) { + case "ai.generateText.doGenerate": + case "ai.generateText": + case "ai.streamText.doStream": + case "ai.streamText": { + const inputs = ((): KVMap => { + if ("ai.prompt.messages" in span.attributes) { + return { + messages: tryJson(span.attributes["ai.prompt.messages"]).flatMap( + (i: CoreMessage) => convertCoreToSmith(i) + ), + }; + } + + if ("ai.prompt" in span.attributes) { + const input = tryJson(span.attributes["ai.prompt"]); + + if ( + typeof input === "object" && + input != null && + "messages" in input && + Array.isArray(input.messages) + ) { + return { + messages: input.messages.flatMap((i: CoreMessage) => + convertCoreToSmith(i) + ), + }; + } + + return { input }; + } + + return {}; + })(); + + const outputs = ((): KVMap | undefined => { + let result: KVMap | undefined = undefined; + + if (span.attributes["ai.response.toolCalls"]) { + let content = tryJson(span.attributes["ai.response.toolCalls"]); + + if (Array.isArray(content)) { + content = content.map((i) => ({ + type: "tool-call", + ...i, + args: tryJson(i.args), + })); + } + + result = { + llm_output: convertCoreToSmith({ + role: "assistant", + content, + } satisfies CoreAssistantMessage), + }; + } else if (span.attributes["ai.response.text"]) { + result = { + llm_output: convertCoreToSmith({ + role: "assistant", + content: span.attributes["ai.response.text"], + }), + }; + } + + if (span.attributes["ai.usage.completionTokens"]) { + result ??= {}; + result.llm_output ??= {}; + result.llm_output.token_usage ??= {}; + result.llm_output.token_usage["completion_tokens"] = + span.attributes["ai.usage.completionTokens"]; + } + + if (span.attributes["ai.usage.promptTokens"]) { + result ??= {}; + result.llm_output ??= {}; + result.llm_output.token_usage ??= {}; + result.llm_output.token_usage["prompt_tokens"] = + span.attributes["ai.usage.promptTokens"]; + } + + return result; + })(); + + const events: KVMap[] = []; + const firstChunkEvent = span.events.find( + (i) => i.name === "ai.stream.firstChunk" + ); + if (firstChunkEvent) { + events.push({ + name: "new_token", + time: convertToTimestamp(firstChunkEvent.time), + }); + } + + // TODO: add first_token_time + return asRunCreate({ + run_type: "llm", + name: span.attributes["ai.model.provider"], + inputs, + outputs, + events, + extra: { + batch_size: 1, + metadata: { + ls_provider: span.attributes["ai.model.provider"] + .split(".") + .at(0), + ls_model_type: span.attributes["ai.model.provider"] + .split(".") + .at(1), + ls_model_name: span.attributes["ai.model.id"], + }, + }, + }); + } + + case "ai.toolCall": { + const args = tryJson(span.attributes["ai.toolCall.args"]); + let inputs: KVMap = { args }; + + if (typeof args === "object" && args != null) { + inputs = args; + } + + const output = tryJson(span.attributes["ai.toolCall.result"]); + let outputs: KVMap = { output }; + + if (typeof output === "object" && output != null) { + outputs = output; + } + + return asRunCreate({ + run_type: "tool", + name: span.attributes["ai.toolCall.name"], + inputs, + outputs, + }); + } + + case "ai.streamObject": + case "ai.streamObject.doStream": + case "ai.generateObject": + case "ai.generateObject.doGenerate": { + const inputs = ((): KVMap => { + if ("ai.prompt.messages" in span.attributes) { + return { + messages: tryJson(span.attributes["ai.prompt.messages"]).flatMap( + (i: CoreMessage) => convertCoreToSmith(i) + ), + }; + } + + if ("ai.prompt" in span.attributes) { + return { input: tryJson(span.attributes["ai.prompt"]) }; + } + + return {}; + })(); + + const outputs = ((): KVMap | undefined => { + let result: KVMap | undefined = undefined; + + if (span.attributes["ai.response.object"]) { + result = { + output: tryJson(span.attributes["ai.response.object"]), + }; + } + + if (span.attributes["ai.usage.completionTokens"]) { + result ??= {}; + result.llm_output ??= {}; + result.llm_output.token_usage ??= {}; + result.llm_output.token_usage["completion_tokens"] = + span.attributes["ai.usage.completionTokens"]; + } + + if (span.attributes["ai.usage.promptTokens"]) { + result ??= {}; + result.llm_output ??= {}; + result.llm_output.token_usage ??= {}; + result.llm_output.token_usage["prompt_tokens"] = + +span.attributes["ai.usage.promptTokens"]; + } + + return result; + })(); + + const events: KVMap[] = []; + const firstChunkEvent = span.events.find( + (i) => i.name === "ai.stream.firstChunk" + ); + if (firstChunkEvent) { + events.push({ + name: "new_token", + time: convertToTimestamp(firstChunkEvent.time), + }); + } + + return asRunCreate({ + run_type: "llm", + name: span.attributes["ai.model.provider"], + inputs, + outputs, + events, + extra: { + batch_size: 1, + metadata: { + ls_provider: span.attributes["ai.model.provider"] + .split(".") + .at(0), + ls_model_type: span.attributes["ai.model.provider"] + .split(".") + .at(1), + ls_model_name: span.attributes["ai.model.id"], + }, + }, + }); + } + + case "ai.embed": + case "ai.embed.doEmbed": + case "ai.embedMany": + case "ai.embedMany.doEmbed": + default: + return undefined; + } + } + + /** @internal */ + protected isRootRun(span: AISDKSpan): boolean { + switch (span.name) { + case "ai.generateText": + case "ai.streamText": + case "ai.generateObject": + case "ai.streamObject": + case "ai.embed": + case "ai.embedMany": + return true; + default: + return false; + } + } + + export( + spans: unknown[], + resultCallback: (result: { code: 0 | 1; error?: Error }) => void + ): void { + const typedSpans = (spans as AISDKSpan[]) + .slice() + .sort((a, b) => sortByHr(a.startTime, b.startTime)); + + for (const span of typedSpans) { + const { traceId, spanId } = span.spanContext(); + const parentId = span.parentSpanId ?? undefined; + this.traceByMap[traceId] ??= { + childMap: {}, + nodeMap: {}, + relativeExecutionOrder: {}, + }; + + const runId = uuid5(spanId, RUN_ID_NAMESPACE); + let parentRunId = parentId + ? uuid5(parentId, RUN_ID_NAMESPACE) + : undefined; + + // in LangSmith we currently only support certain spans + // which may be deeply nested within other traces + if (this.isRootRun(span)) parentRunId = undefined; + const traceMap = this.traceByMap[traceId]; + + const run = this.getRunCreate(span); + if (!run) continue; + + traceMap.relativeExecutionOrder[parentRunId ?? ROOT] ??= -1; + traceMap.relativeExecutionOrder[parentRunId ?? ROOT] += 1; + + traceMap.nodeMap[runId] ??= { + id: runId, + parentId: parentRunId, + startTime: span.startTime, + run, + sent: false, + executionOrder: traceMap.relativeExecutionOrder[parentRunId ?? ROOT], + }; + + traceMap.childMap[parentRunId ?? ROOT] ??= []; + traceMap.childMap[parentRunId ?? ROOT].push(traceMap.nodeMap[runId]); + traceMap.interop = this.parseInteropFromMetadata(span); + } + + type OverrideRunCreate = { + id: string; + trace_id: string; + dotted_order: string; + parent_run_id: string | undefined; + }; + + // We separate `id`, + const sampled: [OverrideRunCreate, RunCreate][] = []; + + for (const traceId of Object.keys(this.traceByMap)) { + type QueueItem = { item: RunTask; dottedOrder: string; traceId: string }; + const traceMap = this.traceByMap[traceId]; + + const queue: QueueItem[] = + traceMap.childMap[ROOT]?.map((item) => ({ + item, + dottedOrder: convertToDottedOrderFormat( + item.startTime, + item.id, + item.executionOrder + ), + traceId: item.id, + })) ?? []; + + const seen = new Set(); + while (queue.length) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + const task = queue.shift()!; + if (seen.has(task.item.id)) continue; + + if (!task.item.sent) { + let override: OverrideRunCreate = { + id: task.item.id, + parent_run_id: task.item.parentId, + dotted_order: task.dottedOrder, + trace_id: task.traceId, + }; + + if (traceMap.interop) { + // attach the run to a parent run tree + // - id: preserve + // - parent_run_id: use existing parent run id or hook to the provided run tree + // - dotted_order: append to the dotted_order of the parent run tree + // - trace_id: use from the existing run tree + if (traceMap.interop.type === "traceable") { + override = { + id: override.id, + parent_run_id: + override.parent_run_id ?? traceMap.interop.parentRunTree.id, + dotted_order: [ + traceMap.interop.parentRunTree.dotted_order, + override.dotted_order, + ] + .filter(Boolean) + .join("."), + trace_id: traceMap.interop.parentRunTree.trace_id, + }; + } else if (traceMap.interop.type === "user") { + // Allow user to specify custom trace ID = run ID of the root run + // - id: use user provided run ID if root run, otherwise preserve + // - parent_run_id: use user provided run ID if root run, otherwise preserve + // - dotted_order: replace the trace_id with the user provided run ID + // - trace_id: use user provided run ID + const userTraceId = traceMap.interop.userTraceId ?? uuid4(); + override = { + id: + override.id === override.trace_id ? userTraceId : override.id, + parent_run_id: + override.parent_run_id === override.trace_id + ? userTraceId + : override.parent_run_id, + dotted_order: override.dotted_order.replace( + override.trace_id, + userTraceId + ), + trace_id: userTraceId, + }; + } + } + + sampled.push([override, task.item.run]); + task.item.sent = true; + } + + const children = traceMap.childMap[task.item.id] ?? []; + queue.push( + ...children.map((child) => { + return { + item: child, + dottedOrder: [ + task.dottedOrder, + convertToDottedOrderFormat( + child.startTime, + child.id, + child.executionOrder + ), + ].join("."), + traceId: task.traceId, + }; + }) + ); + } + } + + Promise.all( + sampled.map(([override, value]) => + this.client.createRun({ ...value, ...override }) + ) + ).then( + () => resultCallback({ code: 0 }), + (error) => resultCallback({ code: 1, error }) + ); + } + + async shutdown(): Promise { + // find nodes which are incomplete + const incompleteNodes = Object.values(this.traceByMap).flatMap((trace) => + Object.values(trace.nodeMap).filter((i) => !i.sent) + ); + + if (incompleteNodes.length > 0) { + console.warn( + "Some incomplete nodes were found before shutdown and not sent to LangSmith." + ); + } + + await this.client?.awaitPendingTraceBatches(); + } + async forceFlush?(): Promise { + await this.client?.awaitPendingTraceBatches(); + } +} diff --git a/js/src/vercel.types.ts b/js/src/vercel.types.ts new file mode 100644 index 000000000..165611454 --- /dev/null +++ b/js/src/vercel.types.ts @@ -0,0 +1,231 @@ +import type { ReadableSpan } from "@opentelemetry/sdk-trace-base"; + +// eslint-disable-next-line @typescript-eslint/ban-types +type AnyString = string & {}; + +interface TypedReadableSpan + extends Omit { + name: Name; + attributes: Attributes; +} + +interface BaseLLMSpanAttributes { + "ai.model.id": string; + "ai.model.provider": string; + + "ai.usage.promptTokens": number; + "ai.usage.completionTokens": number; + + "ai.telemetry.functionId"?: string; + "resource.name"?: string; +} + +interface CallLLMSpanAttributes extends BaseLLMSpanAttributes { + "ai.response.model": string; + "ai.response.id": string; + "ai.response.timestamp": number; +} + +interface BaseEmbedSpanAttributes { + "ai.model.id": string; + "ai.model.provider": string; + "ai.usage.tokens": number; + + "ai.telemetry.functionId"?: string; + "resource.name"?: string; +} + +type ToolCallSpan = TypedReadableSpan< + "ai.toolCall", + { + "operation.name": "ai.toolCall"; + "ai.operationId": "ai.toolCall"; + "ai.toolCall.name": string; + "ai.toolCall.id": string; + "ai.toolCall.args": string; + "ai.toolCall.result"?: string; + } +>; + +type GenerateTextSpan = TypedReadableSpan< + "ai.generateText", + BaseLLMSpanAttributes & { + "operation.name": "ai.generateText"; + "ai.operationId": "ai.generateText"; + "ai.prompt": string; + "ai.response.text": string; + "ai.response.toolCalls": string; + "ai.response.finishReason": string; + "ai.settings.maxSteps": number; + } +>; + +type DoGenerateTextSpan = TypedReadableSpan< + "ai.generateText.doGenerate", + CallLLMSpanAttributes & { + "operation.name": "ai.generateText.doGenerate"; + "ai.operationId": "ai.generateText.doGenerate"; + "ai.prompt.format": string; + "ai.prompt.messages": string; + "ai.response.text": string; + "ai.response.toolCalls": string; + "ai.response.finishReason": string; + } +>; + +type StreamTextSpan = TypedReadableSpan< + "ai.streamText", + BaseLLMSpanAttributes & { + "operation.name": "ai.streamText"; + "ai.operationId": "ai.streamText"; + "ai.prompt": string; + "ai.response.text": string; + "ai.response.toolCalls": string; + "ai.response.finishReason": string; + "ai.settings.maxSteps": number; + } +>; + +type DoStreamTextSpan = TypedReadableSpan< + "ai.streamText.doStream", + CallLLMSpanAttributes & { + "operation.name": "ai.streamText.doStream"; + "ai.operationId": "ai.streamText.doStream"; + "ai.prompt.format": string; + "ai.prompt.messages": string; + "ai.response.text": string; + "ai.response.toolCalls": string; + "ai.response.msToFirstChunk": number; + "ai.response.msToFinish": number; + "ai.response.avgCompletionTokensPerSecond": number; + "ai.response.finishReason": string; + } +>; + +type GenerateObjectSpan = TypedReadableSpan< + "ai.generateObject", + BaseLLMSpanAttributes & { + "operation.name": "ai.generateObject"; + "ai.operationId": "ai.generateObject"; + "ai.prompt": string; + + "ai.schema": string; + "ai.schema.name": string; + "ai.schema.description": string; + + "ai.response.object": string; + + "ai.settings.mode": "json" | AnyString; + "ai.settings.output": "object" | "no-schema" | AnyString; + } +>; +type DoGenerateObjectSpan = TypedReadableSpan< + "ai.generateObject.doGenerate", + CallLLMSpanAttributes & { + "operation.name": "ai.generateObject.doGenerate"; + "ai.operationId": "ai.generateObject.doGenerate"; + + "ai.prompt.format": string; + "ai.prompt.messages": string; + + "ai.response.object": string; + "ai.response.finishReason": string; + + "ai.settings.mode": "json" | AnyString; + "ai.settings.output": "object" | "no-schema" | AnyString; + } +>; + +type StreamObjectSpan = TypedReadableSpan< + "ai.streamObject", + BaseLLMSpanAttributes & { + "operation.name": "ai.streamObject"; + "ai.operationId": "ai.streamObject"; + "ai.prompt": string; + + "ai.schema": string; + "ai.schema.name": string; + "ai.schema.description": string; + + "ai.response.object": string; + + "ai.settings.mode": "json" | AnyString; + "ai.settings.output": "object" | "no-schema" | AnyString; + } +>; +type DoStreamObjectSpan = TypedReadableSpan< + "ai.streamObject.doStream", + CallLLMSpanAttributes & { + "operation.name": "ai.streamObject.doStream"; + "ai.operationId": "ai.streamObject.doStream"; + + "ai.prompt.format": string; + "ai.prompt.messages": string; + + "ai.response.object": string; + "ai.response.finishReason": string; + "ai.response.msToFirstChunk": number; + + "ai.settings.mode": "json" | AnyString; + } +>; + +type EmbedSpan = TypedReadableSpan< + "ai.embed", + BaseEmbedSpanAttributes & { + "operation.name": "ai.embed"; + "ai.operationId": "ai.embed"; + + "ai.value": string; + "ai.embedding": string; + } +>; + +type DoEmbedSpan = TypedReadableSpan< + "ai.embed.doEmbed", + BaseEmbedSpanAttributes & { + "operation.name": "ai.embed.doEmbed"; + "ai.operationId": "ai.embed.doEmbed"; + + "ai.values": string[]; + "ai.embeddings": string[]; + } +>; + +type EmbedManySpan = TypedReadableSpan< + "ai.embedMany", + BaseEmbedSpanAttributes & { + "operation.name": "ai.embedMany"; + "ai.operationId": "ai.embedMany"; + + "ai.values": string[]; + "ai.embeddings": string[]; + } +>; + +type DoEmbedManySpan = TypedReadableSpan< + "ai.embedMany.doEmbed", + BaseEmbedSpanAttributes & { + "operation.name": "ai.embedMany.doEmbed"; + "ai.operationId": "ai.embedMany.doEmbed"; + + "ai.values": string[]; + "ai.embeddings": string[]; + } +>; + +/** @internal */ +export type AISDKSpan = + | ToolCallSpan + | GenerateTextSpan + | DoGenerateTextSpan + | StreamTextSpan + | DoStreamTextSpan + | GenerateObjectSpan + | DoGenerateObjectSpan + | StreamObjectSpan + | DoStreamObjectSpan + | EmbedSpan + | DoEmbedSpan + | EmbedManySpan + | DoEmbedManySpan; diff --git a/js/tsconfig.json b/js/tsconfig.json index ab24d6247..b778ed83f 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -17,6 +17,7 @@ "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, + "stripInternal": true, "allowJs": true, "strict": true, "outDir": "dist" @@ -39,6 +40,7 @@ "src/evaluation/langchain.ts", "src/schemas.ts", "src/langchain.ts", + "src/vercel.ts", "src/wrappers/index.ts", "src/anonymizer/index.ts", "src/wrappers/openai.ts", diff --git a/js/yarn.lock b/js/yarn.lock index e07004f28..eed0130ac 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -2,72 +2,75 @@ # yarn lockfile v1 -"@ai-sdk/openai@^0.0.40": - version "0.0.40" - resolved "https://registry.yarnpkg.com/@ai-sdk/openai/-/openai-0.0.40.tgz#227df69c8edf8b26b17f78ae55daa03e58a58870" - integrity sha512-9Iq1UaBHA5ZzNv6j3govuKGXrbrjuWvZIgWNJv4xzXlDMHu9P9hnqlBr/Aiay54WwCuTVNhTzAUTfFgnTs2kbQ== +"@ai-sdk/openai@^0.0.68": + version "0.0.68" + resolved "https://registry.yarnpkg.com/@ai-sdk/openai/-/openai-0.0.68.tgz#7507534a217355273651ad2ea0fffd6e208587ea" + integrity sha512-WSzB7qpBTrnYvFbnBBmIsw1G8GM04JRMr+I7B5T7msgZfleG4cTvVrn9A1HeHHw9TmbKiaCKJrEZH4V0lb7jNQ== dependencies: - "@ai-sdk/provider" "0.0.14" - "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/provider" "0.0.24" + "@ai-sdk/provider-utils" "1.0.20" -"@ai-sdk/provider-utils@1.0.5": - version "1.0.5" - resolved "https://registry.yarnpkg.com/@ai-sdk/provider-utils/-/provider-utils-1.0.5.tgz#765c60871019ded104d79b4cea0805ba563bb5aa" - integrity sha512-XfOawxk95X3S43arn2iQIFyWGMi0DTxsf9ETc6t7bh91RPWOOPYN1tsmS5MTKD33OGJeaDQ/gnVRzXUCRBrckQ== +"@ai-sdk/provider-utils@1.0.20": + version "1.0.20" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider-utils/-/provider-utils-1.0.20.tgz#46175945dc32ad2d76cb5447738bcac3ad59dbcb" + integrity sha512-ngg/RGpnA00eNOWEtXHenpX1MsM2QshQh4QJFjUfwcqHpM5kTfG7je7Rc3HcEDP+OkRVv2GF+X4fC1Vfcnl8Ow== dependencies: - "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider" "0.0.24" eventsource-parser "1.1.2" nanoid "3.3.6" secure-json-parse "2.7.0" -"@ai-sdk/provider@0.0.14": - version "0.0.14" - resolved "https://registry.yarnpkg.com/@ai-sdk/provider/-/provider-0.0.14.tgz#a07569c39a8828aa8312cf1ac6f35ce6ee1b2fce" - integrity sha512-gaQ5Y033nro9iX1YUjEDFDRhmMcEiCk56LJdIUbX5ozEiCNCfpiBpEqrjSp/Gp5RzBS2W0BVxfG7UGW6Ezcrzg== +"@ai-sdk/provider@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider/-/provider-0.0.24.tgz#e794f4255a833c47aeffcd8f6808a79b2a6b1f06" + integrity sha512-XMsNGJdGO+L0cxhhegtqZ8+T6nn4EoShS819OvCgI2kLbYTIvk0GWFGD0AXJmxkxs3DrpsJxKAFukFR7bvTkgQ== dependencies: json-schema "0.4.0" -"@ai-sdk/react@0.0.30": - version "0.0.30" - resolved "https://registry.yarnpkg.com/@ai-sdk/react/-/react-0.0.30.tgz#51d586141a81d7f9b76798922b206e8c6faf04dc" - integrity sha512-VnHYRzwhiM4bZdL9DXwJltN8Qnz1MkFdRTa1y7KdmHSJ18ebCNWmPO5XJhnZiQdEXHYmrzZ3WiVt2X6pxK07FA== +"@ai-sdk/react@0.0.64": + version "0.0.64" + resolved "https://registry.yarnpkg.com/@ai-sdk/react/-/react-0.0.64.tgz#921d1dc53c98b7c3488a2099d2b67f6573c83e92" + integrity sha512-4LN2vleyA6rYHZ4Rk9CdxnJgaVkNPJDD4Wx1brUhc5RvUxj3TODcm2UwGOR/mxv4pcydtZGELfQQs/i/tkAUCw== dependencies: - "@ai-sdk/provider-utils" "1.0.5" - "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/provider-utils" "1.0.20" + "@ai-sdk/ui-utils" "0.0.46" swr "2.2.5" -"@ai-sdk/solid@0.0.23": - version "0.0.23" - resolved "https://registry.yarnpkg.com/@ai-sdk/solid/-/solid-0.0.23.tgz#712cf1a02bfc337806c5c1b486d16252bec57a15" - integrity sha512-GMojG2PsqwnOGfx7C1MyQPzPBIlC44qn3ykjp9OVnN2Fu47mcFp3QM6gwWoHwNqi7FQDjRy+s/p+8EqYIQcAwg== +"@ai-sdk/solid@0.0.50": + version "0.0.50" + resolved "https://registry.yarnpkg.com/@ai-sdk/solid/-/solid-0.0.50.tgz#a7a30959a97c472a7bae38986958c5164aa2c487" + integrity sha512-JF+KKOgGAgcROgae6FU+hAtxMRhR896SzwI3H1h5hFOZrjqYeYzemJoKzA5MR5IBnPSK4FzEjunc8G5L67TyzQ== dependencies: - "@ai-sdk/provider-utils" "1.0.5" - "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/provider-utils" "1.0.20" + "@ai-sdk/ui-utils" "0.0.46" -"@ai-sdk/svelte@0.0.24": - version "0.0.24" - resolved "https://registry.yarnpkg.com/@ai-sdk/svelte/-/svelte-0.0.24.tgz#2519b84a0c104c82d5e48d3b8e9350e9dd4af6cf" - integrity sha512-ZjzzvfYLE01VTO0rOZf6z9sTGhJhe6IYZMxQiM3P+zemufRYe57NDcLYEb6h+2qhvU6Z+k/Q+Nh/spAt0JzGUg== +"@ai-sdk/svelte@0.0.52": + version "0.0.52" + resolved "https://registry.yarnpkg.com/@ai-sdk/svelte/-/svelte-0.0.52.tgz#3b1ee970ce870a5b565807d88b701185afabcd4b" + integrity sha512-ZGd81ruVuqpOh1Suma+HwBMBywcOV0IUzi96Q3knIoZIz99sVwebSKH8ExMofXm49bQdCTRa73Wn8sTs6QDIYg== dependencies: - "@ai-sdk/provider-utils" "1.0.5" - "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/provider-utils" "1.0.20" + "@ai-sdk/ui-utils" "0.0.46" sswr "2.1.0" -"@ai-sdk/ui-utils@0.0.20": - version "0.0.20" - resolved "https://registry.yarnpkg.com/@ai-sdk/ui-utils/-/ui-utils-0.0.20.tgz#c68968185a7cc33f7d98d13999731e1c7b672cbb" - integrity sha512-6MRWigzXfuxUcAYEFMLP6cLbALJkg12Iz1Sl+wuPMpB6aw7di2ePiTuNakFUYjgP7TNsW4UxzpypBqqJ1KNB0A== +"@ai-sdk/ui-utils@0.0.46": + version "0.0.46" + resolved "https://registry.yarnpkg.com/@ai-sdk/ui-utils/-/ui-utils-0.0.46.tgz#72311a1917a370074089cc6dd8c982d272f6b836" + integrity sha512-ZG/wneyJG+6w5Nm/hy1AKMuRgjPQToAxBsTk61c9sVPUTaxo+NNjM2MhXQMtmsja2N5evs8NmHie+ExEgpL3cA== dependencies: - "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/provider" "0.0.24" + "@ai-sdk/provider-utils" "1.0.20" + json-schema "0.4.0" secure-json-parse "2.7.0" + zod-to-json-schema "3.23.2" -"@ai-sdk/vue@0.0.24": - version "0.0.24" - resolved "https://registry.yarnpkg.com/@ai-sdk/vue/-/vue-0.0.24.tgz#2e72f7e755850ed51540f9a7b25dc6b228a8647a" - integrity sha512-0S+2dVSui6LFgaWoFx+3h5R7GIP9MxdJo63tFuLvgyKr2jmpo5S5kGcWl95vNdzKDqaesAXfOnky+tn5A2d49A== +"@ai-sdk/vue@0.0.55": + version "0.0.55" + resolved "https://registry.yarnpkg.com/@ai-sdk/vue/-/vue-0.0.55.tgz#3da3466418a3e105dd96bdee7217bd2d94a5cb61" + integrity sha512-NZ89CeRPO3D9GjI7GmK3vC+YXjsaWi3iCIvxlGqfQYt0JFKcjgM6dfeq8Nkk+qWI9OoxoOhV/yQdqWQKPv3RRg== dependencies: - "@ai-sdk/provider-utils" "1.0.5" - "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/provider-utils" "1.0.20" + "@ai-sdk/ui-utils" "0.0.46" swrv "1.0.4" "@ampproject/remapping@^2.2.0": @@ -1455,6 +1458,66 @@ resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.9.0.tgz#d03eba68273dc0f7509e2a3d5cba21eae10379fe" integrity sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg== +"@opentelemetry/context-async-hooks@1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/context-async-hooks/-/context-async-hooks-1.26.0.tgz#fa92f722cf685685334bba95f258d3ef9fce60f6" + integrity sha512-HedpXXYzzbaoutw6DFLWLDket2FwLkLpil4hGCZ1xYEIMTcivdfwEOISgdbLEWyG3HW52gTq2V9mOVJrONgiwg== + +"@opentelemetry/core@1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-1.26.0.tgz#7d84265aaa850ed0ca5813f97d831155be42b328" + integrity sha512-1iKxXXE8415Cdv0yjG3G6hQnB5eVEsJce3QaawX8SjDn0mAS0ZM8fAbZZJD4ajvhC15cePvosSCut404KrIIvQ== + dependencies: + "@opentelemetry/semantic-conventions" "1.27.0" + +"@opentelemetry/propagator-b3@1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/propagator-b3/-/propagator-b3-1.26.0.tgz#3ebbeff26a3fb81e8be011666ea6d07ff3e4fba7" + integrity sha512-vvVkQLQ/lGGyEy9GT8uFnI047pajSOVnZI2poJqVGD3nJ+B9sFGdlHNnQKophE3lHfnIH0pw2ubrCTjZCgIj+Q== + dependencies: + "@opentelemetry/core" "1.26.0" + +"@opentelemetry/propagator-jaeger@1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.26.0.tgz#096ac03d754204921cd5a886c77b5c9bd4677cd7" + integrity sha512-DelFGkCdaxA1C/QA0Xilszfr0t4YbGd3DjxiCDPh34lfnFr+VkkrjV9S8ZTJvAzfdKERXhfOxIKBoGPJwoSz7Q== + dependencies: + "@opentelemetry/core" "1.26.0" + +"@opentelemetry/resources@1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/resources/-/resources-1.26.0.tgz#da4c7366018bd8add1f3aa9c91c6ac59fd503cef" + integrity sha512-CPNYchBE7MBecCSVy0HKpUISEeJOniWqcHaAHpmasZ3j9o6V3AyBzhRc90jdmemq0HOxDr6ylhUbDhBqqPpeNw== + dependencies: + "@opentelemetry/core" "1.26.0" + "@opentelemetry/semantic-conventions" "1.27.0" + +"@opentelemetry/sdk-trace-base@1.26.0", "@opentelemetry/sdk-trace-base@^1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.26.0.tgz#0c913bc6d2cfafd901de330e4540952269ae579c" + integrity sha512-olWQldtvbK4v22ymrKLbIcBi9L2SpMO84sCPY54IVsJhP9fRsxJT194C/AVaAuJzLE30EdhhM1VmvVYR7az+cw== + dependencies: + "@opentelemetry/core" "1.26.0" + "@opentelemetry/resources" "1.26.0" + "@opentelemetry/semantic-conventions" "1.27.0" + +"@opentelemetry/sdk-trace-node@^1.26.0": + version "1.26.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.26.0.tgz#169ef4fc058e82a12460da18cedaf6e4615fc617" + integrity sha512-Fj5IVKrj0yeUwlewCRwzOVcr5avTuNnMHWf7GPc1t6WaT78J6CJyF3saZ/0RkZfdeNO8IcBl/bNcWMVZBMRW8Q== + dependencies: + "@opentelemetry/context-async-hooks" "1.26.0" + "@opentelemetry/core" "1.26.0" + "@opentelemetry/propagator-b3" "1.26.0" + "@opentelemetry/propagator-jaeger" "1.26.0" + "@opentelemetry/sdk-trace-base" "1.26.0" + semver "^7.5.2" + +"@opentelemetry/semantic-conventions@1.27.0": + version "1.27.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/semantic-conventions/-/semantic-conventions-1.27.0.tgz#1a857dcc95a5ab30122e04417148211e6f945e6c" + integrity sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg== + "@sinclair/typebox@^0.25.16": version "0.25.24" resolved "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz" @@ -1756,25 +1819,25 @@ agentkeepalive@^4.2.1: dependencies: humanize-ms "^1.2.1" -ai@^3.2.37: - version "3.2.37" - resolved "https://registry.yarnpkg.com/ai/-/ai-3.2.37.tgz#148ed3124e6b0a01c703597471718520ef1c498d" - integrity sha512-waqKYZOE1zJwKEHx69R4v/xNG0a1o0He8TDgX29hUu36Zk0yrBJoVSlXbC9KoFuxW4eRpt+gZv1kqd1nVc1CGg== - dependencies: - "@ai-sdk/provider" "0.0.14" - "@ai-sdk/provider-utils" "1.0.5" - "@ai-sdk/react" "0.0.30" - "@ai-sdk/solid" "0.0.23" - "@ai-sdk/svelte" "0.0.24" - "@ai-sdk/ui-utils" "0.0.20" - "@ai-sdk/vue" "0.0.24" +ai@^3.4.17: + version "3.4.17" + resolved "https://registry.yarnpkg.com/ai/-/ai-3.4.17.tgz#9c5bbce9a2a2fdb49058ded31f0d5ba9f8531bfd" + integrity sha512-QZc+NgNlzPT34ZTHaCGGXVJ+stbMLj98hwq+vJaIzD1lns6HlDatrmlFjJsYYf8FtnfqGV7yPNu8DrH8a274vA== + dependencies: + "@ai-sdk/provider" "0.0.24" + "@ai-sdk/provider-utils" "1.0.20" + "@ai-sdk/react" "0.0.64" + "@ai-sdk/solid" "0.0.50" + "@ai-sdk/svelte" "0.0.52" + "@ai-sdk/ui-utils" "0.0.46" + "@ai-sdk/vue" "0.0.55" "@opentelemetry/api" "1.9.0" eventsource-parser "1.1.2" json-schema "0.4.0" jsondiffpatch "0.6.0" nanoid "3.3.6" secure-json-parse "2.7.0" - zod-to-json-schema "3.22.5" + zod-to-json-schema "3.23.2" ajv@^6.10.0, ajv@^6.12.4: version "6.12.6" @@ -4344,7 +4407,7 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.6.3: +semver@^7.5.2, semver@^7.6.3: version "7.6.3" resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== @@ -4888,10 +4951,10 @@ yocto-queue@^0.1.0: resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== -zod-to-json-schema@3.22.5: - version "3.22.5" - resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz#3646e81cfc318dbad2a22519e5ce661615418673" - integrity sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q== +zod-to-json-schema@3.23.2: + version "3.23.2" + resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.23.2.tgz#bc7e379c8050462538383e382964c03d8fe008f9" + integrity sha512-uSt90Gzc/tUfyNqxnjlfBs8W6WSGpNBv0rVsNxP/BVSMHMKGdthPYff4xtCHYloJGM0CFxFsb3NbC0eqPhfImw== zod-to-json-schema@^3.22.3: version "3.22.4"