diff --git a/bun.lockb b/bun.lockb index e60034b..c5d03ad 100755 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/public-packages/llm-client/examples/google.function.ts b/public-packages/llm-client/examples/google.function.ts new file mode 100644 index 0000000..dff1137 --- /dev/null +++ b/public-packages/llm-client/examples/google.function.ts @@ -0,0 +1,146 @@ +import { createLLMClient } from "@/index" + +const googleClient = createLLMClient({ + provider: "google" +}) + +// +// Simple Chat +// + +// const completion = await googleClient.chat.completions.create({ +// model: "gemini-1.5-flash-latest", +// messages: [ +// { +// role: "user", +// content: "How much does a soul weigh?" +// } +// ], +// max_tokens: 1000 +// }) + +// console.log(JSON.stringify(completion, null, 2)) + +// +// Function calling +// + +// const completion2 = await googleClient.chat.completions.create({ +// model: "gemini-1.5-flash-latest", +// max_tokens: 1000, +// messages: [ +// { +// role: "user", +// content: "My name is Spartacus." +// } +// ], +// tool_choice: { +// type: "function", +// function: { +// name: "say_hello" +// } +// }, +// tools: [ +// { +// type: "function", +// function: { +// name: "say_hello", +// description: "Say hello", +// parameters: { +// type: "object", +// properties: { +// name: { +// type: "string" +// } +// }, +// required: ["name"] +// //additionalProperties: false +// } +// } +// } +// ] +// }) + +// console.log(JSON.stringify(completion2, null, 2)) + +// +// Streaming chat +// + +// const completion3 = await googleClient.chat.completions.create({ +// model: "gemini-1.5-flash-latest", +// messages: [ +// { +// role: "user", +// //content: "Write a soliloquy about the humidity." +// content: "Write an essay about the chemical composition of dirt." +// } +// ], +// max_tokens: 1000, +// stream: true +// }) + +//expect(completion).toBeTruthy +//let final = "" +// console.log({ completion3 }) +// for await (const message of completion3) { +// console.log({ message }) +// //final += message.choices?.[0].delta?.content ?? "" +// } + +//////////////////////////////////////// +// content caching +// note: need pay-as-you-go account - not available on free tier +//////////////////////////////////////// + +// Generate a very long string +let longContentString = "" +for (let i = 0; i < 32001; i++) { + longContentString += "Purple cats drink gatorade." + longContentString += i % 8 === 7 ? "\n" : " " +} + +// Add content to cache +const cacheResult = await googleClient.cacheManager.create({ + //const cacheResult = await googleClient.createCacheManager({ + ttlSeconds: 600, + model: "models/gemini-1.5-pro-001", + messages: [{ role: "user", content: longContentString }], + max_tokens: 1000 +}) + +// Get name from cache result +const cacheName = cacheResult?.name ?? "" +console.log("Cache name: ", cacheName) + +// List caches +const cacheListResult = await googleClient.cacheManager.list() +console.log("cacheListResult: ", JSON.stringify(cacheListResult, null, 2)) + +// Delete cache +// await googleClient.cacheManager.delete(cacheName) +// cacheListResult = await googleClient.cacheManager.list() +// console.log("cacheListResult after delete: ", JSON.stringify(cacheListResult, null, 2)) + +// Delete all caches +// cacheListResult?.cachedContents?.forEach(async cache => { +// if (cache.name) await googleClient.cacheManager.delete(cache.name) +// }) + +// Pass name into additionalProperties +const completion4 = await googleClient.chat.completions.create({ + // model: "gemini-1.5-flash-latest", + model: "models/gemini-1.5-pro-001", + messages: [ + { + role: "user", + content: "What do purple cats drink?" + } + ], + max_tokens: 10000, + additionalProperties: { + cacheName + } +}) + +console.log("Completion: ", JSON.stringify(completion4, null, 2)) diff --git a/public-packages/llm-client/package.json b/public-packages/llm-client/package.json index 80f0418..6400a74 100644 --- a/public-packages/llm-client/package.json +++ b/public-packages/llm-client/package.json @@ -46,6 +46,7 @@ "author": "Dimitri Kennedy (https://hack.dance)", "homepage": "https://island.novy.work", "dependencies": { + "@google/generative-ai": "^0.14.1", "json-schema": "^0.4.0" }, "peerDependencies": { diff --git a/public-packages/llm-client/src/index.ts b/public-packages/llm-client/src/index.ts index 8e7b15b..29a4065 100644 --- a/public-packages/llm-client/src/index.ts +++ b/public-packages/llm-client/src/index.ts @@ -1,8 +1,14 @@ import { AnthropicProvider } from "@/providers/anthropic" +import { GoogleProvider } from "@/providers/google" import { OpenAIProvider } from "@/providers/openai" import { OpenAILikeClient, Providers } from "@/types" +import { TextDecoderStream, TextEncoderStream } from "@/utils/polyfills" import { ClientOptions } from "openai" +// polyfills +globalThis.TextEncoderStream ||= TextEncoderStream +globalThis.TextDecoderStream ||= TextDecoderStream + export class LLMClient

{ private providerInstance: OpenAILikeClient

@@ -11,10 +17,16 @@ export class LLMClient

{ provider: P } ) { - if (opts?.provider === "openai") { - this.providerInstance = new OpenAIProvider(opts) as OpenAILikeClient

- } else { - this.providerInstance = new AnthropicProvider(opts) as unknown as OpenAILikeClient

+ switch (opts?.provider) { + case "anthropic": + this.providerInstance = new AnthropicProvider(opts) as unknown as OpenAILikeClient

+ break + case "google": + this.providerInstance = new GoogleProvider(opts) as unknown as OpenAILikeClient

+ break + case "openai": + default: + this.providerInstance = new OpenAIProvider(opts) as OpenAILikeClient

} const proxyHandler: ProxyHandler> = { diff --git a/public-packages/llm-client/src/providers/google/index.ts b/public-packages/llm-client/src/providers/google/index.ts new file mode 100644 index 0000000..a7eb0ea --- /dev/null +++ b/public-packages/llm-client/src/providers/google/index.ts @@ -0,0 +1,333 @@ +import { + ExtendedCompletionChunkGoogle, + ExtendedCompletionGoogle, + GooggleCacheCreateParams, + GoogleChatCompletionParams, + GoogleChatCompletionParamsNonStream, + GoogleChatCompletionParamsStream, + LogLevel, + OpenAILikeClient, + Role +} from "@/types" +import { + Content, + EnhancedGenerateContentResponse, + FunctionCallingMode, + FunctionDeclarationsTool, + GenerateContentRequest, + GoogleGenerativeAI, + TextPart, + Tool +} from "@google/generative-ai" +import { CachedContentUpdateParams, GoogleAICacheManager } from "@google/generative-ai/server" +import { ClientOptions } from "openai" + +export class GoogleProvider extends GoogleGenerativeAI implements OpenAILikeClient<"google"> { + public apiKey: string + public logLevel: LogLevel = (process.env?.["LOG_LEVEL"] as LogLevel) ?? "info" + private googleCacheManager + + private log(level: LogLevel, ...args: T) { + const timestamp = new Date().toISOString() + switch (level) { + case "debug": + if (this.logLevel === "debug") { + console.debug(`[LLM-CLIENT--GOOGLE-CLIENT:DEBUG] ${timestamp}:`, ...args) + } + break + case "info": + if (this.logLevel === "debug" || this.logLevel === "info") { + console.info(`[LLM-CLIENT--GOOGLE-CLIENT:INFO] ${timestamp}:`, ...args) + } + break + case "warn": + if (this.logLevel === "debug" || this.logLevel === "info" || this.logLevel === "warn") { + console.warn(`[LLM-CLIENT--GOOGLE-CLIENT:WARN] ${timestamp}:`, ...args) + } + break + case "error": + console.error(`[LLM-CLIENT--GOOGLE-CLIENT:ERROR] ${timestamp}:`, ...args) + break + } + } + + constructor( + opts?: ClientOptions & { + logLevel?: LogLevel + } + ) { + const apiKey = opts?.apiKey ?? process.env?.["GOOGLE_API_KEY"] ?? null + + if (!apiKey) { + throw new Error( + "API key is required for GoogleProvider - please provide it in the constructor or set it as an environment variable named GEMINI_API_KEY." + ) + } + + super(apiKey) + + this.logLevel = opts?.logLevel ?? this.logLevel + this.apiKey = apiKey + this.googleCacheManager = new GoogleAICacheManager(apiKey) + } + + /** + * Transforms the OpenAI chat completion parameters into Google chat completion parameters. + * @param params - The OpenAI chat completion parameters. + * @returns The transformed Google chat completion parameters. + */ + private transformParams(params: GoogleChatCompletionParams): GenerateContentRequest { + let function_declarations: FunctionDeclarationsTool[] = [] + const allowedFunctionNames: string[] = [] + + // conform messages to Google's Content[] type + // they use "model" and "user" instead of "assistant" and "user", and also have no "system" role + const contents: Content[] = params.messages + .filter(message => message.role !== "system") + .map(message => { + const textPart: TextPart = { + text: message.content.toString() + } + return { + role: message.role === "assistant" ? "model" : "user", + parts: [textPart] + } + }) + + // cache data doesn't support tools or system messages (yet) + if (params.additionalProperties?.["cacheName"]) { + return { + contents + } + } + + if ("tools" in params && Array.isArray(params.tools) && params.tools.length > 0) { + function_declarations = params.tools.map(tool => { + allowedFunctionNames.push(tool.function.name) + return { + name: tool.function.name ?? "", + description: tool.function.description ?? "", + parameters: tool.function.parameters + } as FunctionDeclarationsTool + }) + } + + const systemMessages = params.messages.filter(message => message.role === "system") + // the type of systemInstruction is string | Part | Content - but this structure seems to be the only one that works + const systemInstruction = + systemMessages.length > 0 + ? { + parts: systemMessages.map(message => ({ text: message.content.toString() })), + role: "system" + } + : undefined + + return { + contents, + ...(function_declarations?.length + ? { + tools: [{ function_declarations } as Tool], + toolConfig: { + functionCallingConfig: { + mode: FunctionCallingMode.ANY, + allowedFunctionNames + } + } + } + : {}), + systemInstruction + } + } + + /** + * Transforms the Google API response into an ExtendedCompletionGoogle or ExtendedCompletionChunkGoogle object. + * @param response - The Google API response. + * @param stream - An optional parameter indicating whether the response is a stream. + * @returns A Promise that resolves to an ExtendedCompletionGoogle or ExtendedCompletionChunkGoogle object. + */ + private async transformResponse( + response: EnhancedGenerateContentResponse, + { stream }: { stream?: boolean } = {} + ): Promise { + const responseText = response.text() + const functionCalls = response.functionCalls() + + const tool_calls = functionCalls?.map((block, index) => ({ + index, + id: "", + type: "function", + function: { + name: block.name, + arguments: JSON.stringify(block.args) + } + })) + + const transformedResponse = { + id: "", + originResponse: response, + usage: { + prompt_tokens: response.usageMetadata?.promptTokenCount ?? 0, + completion_tokens: response.usageMetadata?.candidatesTokenCount ?? 0, + total_tokens: + (response.usageMetadata?.promptTokenCount ?? 0) + + (response.usageMetadata?.candidatesTokenCount ?? 0) + } + } + + const responseDataChunk = { + role: "assistant" as Role, + content: responseText, + ...(tool_calls?.length ? { tool_calls } : {}) + } + + const finish_reason = tool_calls?.length ? "tool_calls" : "stop" + + if (stream) { + return { + ...transformedResponse, + object: "chat.completion.chunk", + choices: [ + { + delta: responseDataChunk, + finish_reason, + index: 0 + } + ] + } as ExtendedCompletionChunkGoogle + } else { + return { + ...transformedResponse, + object: "chat.completion", + choices: [ + { + message: responseDataChunk, + finish_reason, + index: 0, + logprobs: null + } + ] + } as ExtendedCompletionGoogle + } + } + + /** + * Streams the chat completion response from the Google API. + * @param response - The Response object from the Google API. + * @returns An asynchronous iterable of ExtendedCompletionChunkGoogle objects. + */ + private async *streamChatCompletion( + stream: AsyncIterable + ): AsyncIterable { + for await (const chunk of stream) { + yield (await this.transformResponse(chunk, { stream: true })) as ExtendedCompletionChunkGoogle + } + } + + /** + * Creates a chat completion using the Google AI API. + * @param params - The chat completion parameters. + * @returns A Promise that resolves to an ExtendedCompletionGoogle object or an asynchronous iterable of ExtendedCompletionChunkGoogle objects if streaming is enabled. + */ + public async create( + params: GoogleChatCompletionParamsStream + ): Promise> + + public async create( + params: GoogleChatCompletionParamsNonStream + ): Promise + + public async create( + params: GoogleChatCompletionParams + ): Promise> { + try { + if (!params?.model || !params?.messages?.length) { + throw new Error("model and messages are required") + } + + const googleParams = this.transformParams(params) + + let generativeModel + if (params.additionalProperties?.["cacheName"]) { + // if there's a cacheName, get model using cached content + // note: need pay-as-you-go account - caching not available on free tier + const cache = await this.googleCacheManager.get( + params.additionalProperties?.["cacheName"]?.toString() + ) + + generativeModel = this.getGenerativeModelFromCachedContent(cache) + } else { + // regular, non-cached model + generativeModel = this.getGenerativeModel({ model: params?.model }) + } + + if (params?.stream) { + this.log("debug", "Starting streaming completion response") + + const result = await generativeModel.generateContentStream(googleParams) + + if (!result?.stream) { + throw new Error("generateContentStream failed") + } + + return this.streamChatCompletion(result.stream) + } else { + const result = await generativeModel.generateContent(googleParams) + + if (!result?.response) { + throw new Error("generateContent failed") + } + + const transformedResult = await this.transformResponse(result.response, { + stream: false + }) + transformedResult.model = params.model + + return transformedResult as ExtendedCompletionGoogle + } + } catch (error) { + console.error("Error in Google API request:", error) + throw error + } + } + + /** + * Add content to the Google AI cache manager + * @param params - the same params as used in chat.completion.create plus ttlSeconds + * @returns the cache manager create response (which includes the cache name to use later) + */ + public async createCacheManager(params: GooggleCacheCreateParams) { + const googleParams = this.transformParams(params) + return await this.googleCacheManager.create({ + ttlSeconds: params.ttlSeconds, + model: params.model, + ...googleParams + }) + } + + public chat = { + completions: { + create: this.create.bind(this) + } + } + + /** Interface for Google AI Cache Manager */ + public cacheManager = { + create: this.createCacheManager.bind(this), + get: async (cacheName: string) => { + return await this.googleCacheManager.get(cacheName) + }, + list: async () => { + return await this.googleCacheManager.list() + }, + update: async (cacheName: string, params: GooggleCacheCreateParams) => { + const googleParams = this.transformParams(params) + return await this.googleCacheManager.update( + cacheName, + googleParams as CachedContentUpdateParams + ) + }, + delete: async (cacheName: string) => { + return await this.googleCacheManager.delete(cacheName) + } + } +} diff --git a/public-packages/llm-client/src/types/index.ts b/public-packages/llm-client/src/types/index.ts index b126368..faf3481 100644 --- a/public-packages/llm-client/src/types/index.ts +++ b/public-packages/llm-client/src/types/index.ts @@ -1,9 +1,16 @@ import Anthropic from "@anthropic-ai/sdk" +import { + CachedContent, + EnhancedGenerateContentResponse, + GoogleGenerativeAI +} from "@google/generative-ai" import OpenAI from "openai" -export type Providers = "openai" | "anthropic" +export type Providers = "openai" | "anthropic" | "google" +export type LogLevel = "debug" | "info" | "warn" | "error" +export type Role = "system" | "user" | "assistant" | "tool" -type SupportedChatCompletionMessageParam = Omit< +export type SupportedChatCompletionMessageParam = Omit< OpenAI.ChatCompletionCreateParams["messages"][number], "content" > & { @@ -49,19 +56,80 @@ export type AnthropicChatCompletionParams = | AnthropicChatCompletionParamsStream | AnthropicChatCompletionParamsNonStream -export type OpenAILikeClient

= P extends "openai" +/** Google types */ +export type GoogleChatCompletionParamsStream = Omit< + Partial, + "messages" +> & { + messages: SupportedChatCompletionMessageParam[] + stream: true + max_tokens: number + additionalProperties?: { + cacheName?: string + } +} + +export type GoogleChatCompletionParamsNonStream = Omit< + Partial, + "messages" +> & { + messages: SupportedChatCompletionMessageParam[] + stream?: false | undefined + max_tokens: number + additionalProperties?: { + cacheName?: string + } +} + +export type GoogleChatCompletionParams = + | GoogleChatCompletionParamsStream + | GoogleChatCompletionParamsNonStream + +export type ExtendedCompletionGoogle = Partial & { + originResponse: EnhancedGenerateContentResponse +} + +export type ExtendedCompletionChunkGoogle = Partial & { + originResponse: EnhancedGenerateContentResponse +} + +export type GooggleCacheCreateParams = GoogleChatCompletionParams & { + ttlSeconds: number +} + +/** General type for providers */ +export type OpenAILikeClient

= P extends "openai" | "azure" ? OpenAI - : P extends "anthropic" - ? Anthropic & { - [key: string]: unknown + : P extends "google" + ? GoogleGenerativeAI & { chat: { completions: { - create:

( + create:

( params: P ) => P extends { stream: true } - ? Promise> - : Promise + ? Promise> + : Promise } } + cacheManager: { + create: (params: GooggleCacheCreateParams) => Promise + get: (cacheName: string) => Promise + list: () => Promise<{ cachedContents: CachedContent[] }> + delete: (cacheName: string) => Promise + update: (cacheName: string, params: GooggleCacheCreateParams) => Promise + } } - : never + : P extends "anthropic" + ? Anthropic & { + [key: string]: unknown + chat: { + completions: { + create:

( + params: P + ) => P extends { stream: true } + ? Promise> + : Promise + } + } + } + : never diff --git a/public-packages/llm-client/src/utils/polyfills.ts b/public-packages/llm-client/src/utils/polyfills.ts new file mode 100644 index 0000000..f02febf --- /dev/null +++ b/public-packages/llm-client/src/utils/polyfills.ts @@ -0,0 +1,124 @@ +/** + * TextEncoderStream polyfill based on Node.js' implementation https://github.com/nodejs/node/blob/3f3226c8e363a5f06c1e6a37abd59b6b8c1923f1/lib/internal/webstreams/encoding.js#L38-L119 (MIT License) + */ +export class TextEncoderStream { + #pendingHighSurrogate: string | null = null + + #handle = new TextEncoder() + + #transform = new TransformStream({ + transform: (chunk, controller) => { + // https://encoding.spec.whatwg.org/#encode-and-enqueue-a-chunk + chunk = String(chunk) + + let finalChunk = "" + for (let i = 0; i < chunk.length; i++) { + const item = chunk[i] + const codeUnit = item.charCodeAt(0) + if (this.#pendingHighSurrogate !== null) { + const highSurrogate = this.#pendingHighSurrogate + + this.#pendingHighSurrogate = null + if (0xdc00 <= codeUnit && codeUnit <= 0xdfff) { + finalChunk += highSurrogate + item + continue + } + + finalChunk += "\uFFFD" + } + + if (0xd800 <= codeUnit && codeUnit <= 0xdbff) { + this.#pendingHighSurrogate = item + continue + } + + if (0xdc00 <= codeUnit && codeUnit <= 0xdfff) { + finalChunk += "\uFFFD" + continue + } + + finalChunk += item + } + + if (finalChunk) { + controller.enqueue(this.#handle.encode(finalChunk)) + } + }, + + flush: controller => { + // https://encoding.spec.whatwg.org/#encode-and-flush + if (this.#pendingHighSurrogate !== null) { + controller.enqueue(new Uint8Array([0xef, 0xbf, 0xbd])) + } + } + }) + + get encoding() { + return this.#handle.encoding + } + + get readable() { + return this.#transform.readable + } + + get writable() { + return this.#transform.writable + } + + get [Symbol.toStringTag]() { + return "TextEncoderStream" + } +} + +/** + * TextDecoderStream polyfill based on Node.js' implementation https://github.com/nodejs/node/blob/3f3226c8e363a5f06c1e6a37abd59b6b8c1923f1/lib/internal/webstreams/encoding.js#L121-L200 (MIT License) + */ +export class TextDecoderStream { + #handle: TextDecoder + + #transform = new TransformStream({ + transform: (chunk, controller) => { + const value = this.#handle.decode(chunk, { stream: true }) + + if (value) { + controller.enqueue(value) + } + }, + flush: controller => { + const value = this.#handle.decode() + if (value) { + controller.enqueue(value) + } + + controller.terminate() + } + }) + + constructor(encoding = "utf-8", options: TextDecoderOptions = {}) { + this.#handle = new TextDecoder(encoding, options) + } + + get encoding() { + return this.#handle.encoding + } + + get fatal() { + return this.#handle.fatal + } + + get ignoreBOM() { + return this.#handle.ignoreBOM + } + + get readable() { + return this.#transform.readable + } + + get writable() { + return this.#transform.writable + } + + get [Symbol.toStringTag]() { + return "TextDecoderStream" + } +} diff --git a/public-packages/llm-client/tests/google.test.ts b/public-packages/llm-client/tests/google.test.ts new file mode 100644 index 0000000..c45c3fb --- /dev/null +++ b/public-packages/llm-client/tests/google.test.ts @@ -0,0 +1,167 @@ +import { createLLMClient } from "@/index" +import { describe, expect, test } from "bun:test" + +const googleClient = createLLMClient({ + provider: "google", + logLevel: "error" +}) + +describe(`LLMClient Gemini Provider`, () => { + test("Simple Chat", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + messages: [ + { + role: "user", + content: "What is the capital of Montana?" + } + ], + max_tokens: 1000 + }) + + console.log(completion?.choices?.[0].message.content) + expect(completion?.choices?.[0].message.content).toMatch(/Helena/i) + }) + + test("Chat completion with context", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Who won the world series in 2020?" }, + { role: "assistant", content: "The Los Angeles Dodgers won the World Series in 2020." }, + { role: "user", content: "Where was it played?" } + ], + max_tokens: 1000 + }) + + console.log(completion?.choices?.[0].message.content) + expect(completion?.choices?.[0].message.content).toMatch(/Arlington/i) + }) + + test("Streaming Chat", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + messages: [ + { + role: "user", + //content: "Write a soliloquy about the humidity." + content: "Write an essay about the chemical composition of dirt." + } + ], + max_tokens: 1000, + stream: true + }) + + let final = "" + for await (const message of completion) { + console.log("choice: ", message.choices?.[0]) + final += message.choices?.[0].delta?.content ?? "" + } + console.log(final) + }) + + test("Function calling", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + max_tokens: 1000, + messages: [ + { + role: "user", + content: "My name is Spartacus." + } + ], + tool_choice: { + type: "function", + function: { + name: "say_hello" + } + }, + tools: [ + { + type: "function", + function: { + name: "say_hello", + description: "Say hello", + parameters: { + type: "object", + properties: { + name: { + type: "string" + } + }, + required: ["name"] + //additionalProperties: false + } + } + } + ] + }) + + const responseFunction = completion?.choices?.[0]?.message.tool_calls?.[0].function + console.log({ responseFunction }) + expect(responseFunction?.name).toMatch(/say_hello/i) + expect(responseFunction?.arguments).toMatch(/Spartacus/i) + }) + + test("Function calling - streaming", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + max_tokens: 1000, + stream: true, + messages: [ + { + role: "user", + content: "My name is Spartacus." + } + ], + tool_choice: { + type: "function", + function: { + name: "say_hello" + } + }, + tools: [ + { + type: "function", + function: { + name: "say_hello", + description: "Say hello", + parameters: { + type: "object", + properties: { + name: { + type: "string" + } + }, + required: ["name"] + //additionalProperties: false + } + } + } + ] + }) + + for await (const message of completion) { + console.log("choice: ", message.choices?.[0]) + } + }) + + test("Chat with system messages", async () => { + const completion = await googleClient.chat.completions.create({ + model: "gemini-1.5-flash-latest", + messages: [ + { role: "system", content: "You only speak French." }, + { role: "system", content: "You only speak in rhymes." }, + { + role: "user", + content: "What is the capital of Montana?" + } + ], + max_tokens: 1000 + }) + + console.log(completion?.choices?.[0].message.content) + expect(completion?.choices?.[0].message.content).toMatch(/capitale/i) + }) +})