From 57f00c2bafdc975f36701ce4581201046224ec8c Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 4 Mar 2025 18:52:37 +0000 Subject: [PATCH 01/73] chore(types): improved go to definition on fetchOptions --- src/internal/types.ts | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/internal/types.ts b/src/internal/types.ts index 50c16e9d2..c3bce5a21 100644 --- a/src/internal/types.ts +++ b/src/internal/types.ts @@ -6,14 +6,6 @@ export type HTTPMethod = 'get' | 'post' | 'put' | 'patch' | 'delete'; export type KeysEnum = { [P in keyof Required]: true }; type NotAny = [unknown] extends [T] ? never : T; -type Literal = PropertyKey extends T ? never : T; -type MappedLiteralKeys = T extends any ? Literal : never; -type MappedIndex = - T extends any ? - K extends keyof T ? - T[K] - : never - : never; /** * Some environments overload the global fetch function, and Parameters only gets the last signature. @@ -93,6 +85,6 @@ type RequestInits = * This type contains `RequestInit` options that may be available on the current runtime, * including per-platform extensions like `dispatcher`, `agent`, `client`, etc. */ -export type MergedRequestInit = { - [K in MappedLiteralKeys]?: MappedIndex | undefined; -}; +export type MergedRequestInit = RequestInits & + /** We don't include these in the types as they'll be overridden for every request. */ + Partial>; From 8f51562fbac6343a41d5f04e93b4e26c173ed602 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 4 Mar 2025 23:52:32 +0000 Subject: [PATCH 02/73] chore(docs): improve docs for withResponse/asResponse --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 08226e928..76aa800f2 100644 --- a/README.md +++ b/README.md @@ -505,8 +505,10 @@ while (page.hasNextPage()) { ### Accessing raw Response data (e.g., headers) The "raw" `Response` returned by `fetch()` can be accessed through the `.asResponse()` method on the `APIPromise` type that all methods return. +This method returns as soon as the headers for a successful response are received and does not consume the response body, so you are free to write custom parsing or streaming logic. You can also use the `.withResponse()` method to get the raw `Response` along with the parsed data. +Unlike `.asResponse()` this method consumes the body, returning once it is parsed. ```ts From 979725ff3df5ac745754dcf6d595972f5c080379 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Wed, 5 Mar 2025 21:21:53 +0000 Subject: [PATCH 03/73] fix(api): add missing file rank enum + more metadata --- .stats.yml | 2 +- src/resources/beta/threads/runs/steps.ts | 5 ++-- src/resources/fine-tuning/jobs/jobs.ts | 29 ++++++++++++++++++- .../fine-tuning/jobs/jobs.test.ts | 6 +++- 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index 163146e38..0d7e83be4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index adaa4c9a1..918cdde37 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -242,9 +242,10 @@ export namespace FileSearchToolCall { */ export interface RankingOptions { /** - * The ranker used for the file search. + * The ranker to use for the file search. If not specified will use the `auto` + * ranker. */ - ranker: 'default_2024_08_21'; + ranker: 'auto' | 'default_2024_08_21'; /** * The score threshold for the file search. All values must be a floating point diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 51cac957c..8a70f4e5a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../../resource'; +import * as Shared from '../../shared'; import * as CheckpointsAPI from './checkpoints'; import { CheckpointListParams, @@ -177,6 +178,16 @@ export interface FineTuningJob { */ integrations?: Array | null; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + /** * The method used for fine-tuning. */ @@ -458,6 +469,16 @@ export interface JobCreateParams { */ integrations?: Array | null; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + /** * The method used for fine-tuning. */ @@ -672,7 +693,13 @@ export namespace JobCreateParams { } } -export interface JobListParams extends CursorPageParams {} +export interface JobListParams extends CursorPageParams { + /** + * Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + * Alternatively, set `metadata=null` to indicate no metadata. + */ + metadata?: Record | null; +} export interface JobListEventsParams extends CursorPageParams {} diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index b194ac234..0875598cf 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -33,6 +33,7 @@ describe('resource jobs', () => { wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, ], + metadata: { foo: 'string' }, method: { dpo: { hyperparameters: { @@ -78,7 +79,10 @@ describe('resource jobs', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.fineTuning.jobs.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), + client.fineTuning.jobs.list( + { after: 'after', limit: 0, metadata: { foo: 'string' } }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); From dee1e4af4e20246f47ba93e61569e3a576c63049 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 6 Mar 2025 14:49:45 +0000 Subject: [PATCH 04/73] chore: move ChatModel type to shared --- api.md | 3 +- src/client.ts | 4 +- src/resources/beta/assistants.ts | 3 +- src/resources/beta/threads/runs/runs.ts | 3 +- src/resources/beta/threads/threads.ts | 3 +- src/resources/chat/chat.ts | 44 +------------------ src/resources/chat/completions/completions.ts | 3 +- src/resources/chat/index.ts | 2 +- src/resources/fine-tuning/fine-tuning.ts | 2 +- src/resources/fine-tuning/index.ts | 2 +- src/resources/fine-tuning/jobs/index.ts | 2 +- src/resources/fine-tuning/jobs/jobs.ts | 6 +-- src/resources/shared.ts | 43 ++++++++++++++++++ 13 files changed, 60 insertions(+), 60 deletions(-) diff --git a/api.md b/api.md index 978bff992..a7f030b19 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ Types: +- ChatModel - ErrorObject - FunctionDefinition - FunctionParameters @@ -200,9 +201,9 @@ Types: - FineTuningJob - FineTuningJobEvent -- FineTuningJobIntegration - FineTuningJobWandbIntegration - FineTuningJobWandbIntegrationObject +- FineTuningJobIntegration Methods: diff --git a/src/client.ts b/src/client.ts index 9154dc496..165a97484 100644 --- a/src/client.ts +++ b/src/client.ts @@ -82,7 +82,7 @@ import { formatRequestDetails, loggerFor } from './internal/utils/log'; import { isEmptyObj } from './internal/utils/values'; import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; -import { Chat, ChatModel } from './resources/chat/chat'; +import { Chat } from './resources/chat/chat'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; import { Upload, @@ -910,7 +910,6 @@ export declare namespace OpenAI { export { Chat as Chat, - type ChatModel as ChatModel, type ChatCompletion as ChatCompletion, type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, type ChatCompletionAudio as ChatCompletionAudio, @@ -1019,6 +1018,7 @@ export declare namespace OpenAI { type UploadCompleteParams as UploadCompleteParams, }; + export type ChatModel = API.ChatModel; export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0e109deed..0a32be46d 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Shared from '../shared'; -import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; import * as VectorStoresAPI from './vector-stores/vector-stores'; @@ -1095,7 +1094,7 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 1aafffc92..4a75b5466 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -4,7 +4,6 @@ import { APIResource } from '../../../../resource'; import * as RunsAPI from './runs'; import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; -import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; @@ -699,7 +698,7 @@ export interface RunCreateParamsBase { * associated with the assistant. If not, the model associated with the assistant * will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Body param: Whether to enable diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index be1014f68..036ed680e 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -4,7 +4,6 @@ import { APIResource } from '../../../resource'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; import { Annotation, @@ -564,7 +563,7 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Whether to enable diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 40358c331..51a3606fd 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as Shared from '../shared'; import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, @@ -46,48 +47,7 @@ export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } -export type ChatModel = - | 'o3-mini' - | 'o3-mini-2025-01-31' - | 'o1' - | 'o1-2024-12-17' - | 'o1-preview' - | 'o1-preview-2024-09-12' - | 'o1-mini' - | 'o1-mini-2024-09-12' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' - | 'gpt-4o' - | 'gpt-4o-2024-11-20' - | 'gpt-4o-2024-08-06' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-audio-preview' - | 'gpt-4o-audio-preview-2024-10-01' - | 'gpt-4o-audio-preview-2024-12-17' - | 'gpt-4o-mini-audio-preview' - | 'gpt-4o-mini-audio-preview-2024-12-17' - | 'chatgpt-4o-latest' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; +export type ChatModel = Shared.ChatModel; Chat.Completions = Completions; diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 29c9ad390..5a9e497a1 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -4,7 +4,6 @@ import { APIResource } from '../../../resource'; import * as CompletionsCompletionsAPI from './completions'; import * as CompletionsAPI from '../../completions'; import * as Shared from '../../shared'; -import * as ChatAPI from '../chat'; import * as MessagesAPI from './messages'; import { MessageListParams, Messages } from './messages'; import { APIPromise } from '../../../api-promise'; @@ -1002,7 +1001,7 @@ export interface ChatCompletionCreateParamsBase { * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) * table for details on which models work with the Chat API. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * Parameters for audio output. Required when audio output is requested with diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index f098e5ce7..8eb8cbed6 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Chat, type ChatModel } from './chat'; +export { Chat } from './chat'; export { Completions, type ChatCompletion, diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index 9d027b72d..593a4a89e 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -27,9 +27,9 @@ export declare namespace FineTuning { Jobs as Jobs, type FineTuningJob as FineTuningJob, type FineTuningJobEvent as FineTuningJobEvent, - type FineTuningJobIntegration as FineTuningJobIntegration, type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + type FineTuningJobIntegration as FineTuningJobIntegration, type FineTuningJobsPage as FineTuningJobsPage, type FineTuningJobEventsPage as FineTuningJobEventsPage, type JobCreateParams as JobCreateParams, diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 898f2fc89..52ef721b8 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -5,9 +5,9 @@ export { Jobs, type FineTuningJob, type FineTuningJobEvent, - type FineTuningJobIntegration, type FineTuningJobWandbIntegration, type FineTuningJobWandbIntegrationObject, + type FineTuningJobIntegration, type JobCreateParams, type JobListParams, type JobListEventsParams, diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts index 4e397aea7..18a2b1a93 100644 --- a/src/resources/fine-tuning/jobs/index.ts +++ b/src/resources/fine-tuning/jobs/index.ts @@ -10,9 +10,9 @@ export { Jobs, type FineTuningJob, type FineTuningJobEvent, - type FineTuningJobIntegration, type FineTuningJobWandbIntegration, type FineTuningJobWandbIntegrationObject, + type FineTuningJobIntegration, type JobCreateParams, type JobListParams, type JobListEventsParams, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 8a70f4e5a..4531ec138 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -380,8 +380,6 @@ export interface FineTuningJobEvent { type?: 'message' | 'metrics'; } -export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; - /** * The settings for your integration with Weights and Biases. This payload * specifies the project that metrics will be sent to. Optionally, you can set an @@ -430,6 +428,8 @@ export interface FineTuningJobWandbIntegrationObject { wandb: FineTuningJobWandbIntegration; } +export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; + export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the @@ -709,9 +709,9 @@ export declare namespace Jobs { export { type FineTuningJob as FineTuningJob, type FineTuningJobEvent as FineTuningJobEvent, - type FineTuningJobIntegration as FineTuningJobIntegration, type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + type FineTuningJobIntegration as FineTuningJobIntegration, type FineTuningJobsPage as FineTuningJobsPage, type FineTuningJobEventsPage as FineTuningJobEventsPage, type JobCreateParams as JobCreateParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3bb11582f..18e2ecddc 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,48 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'chatgpt-4o-latest' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + export interface ErrorObject { code: string | null; From 453b58cf9211b9a2a7b344c19d2f2425dcedb299 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Sat, 8 Mar 2025 18:19:49 +0000 Subject: [PATCH 05/73] feat: add SKIP_BREW env var to ./scripts/bootstrap --- scripts/bootstrap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/bootstrap b/scripts/bootstrap index 033156d3a..f107c3a24 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle From c17d57d5a4671e69de2acf4dd044199689319f6a Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Mon, 10 Mar 2025 20:04:45 +0000 Subject: [PATCH 06/73] feat(client): accept RFC6838 JSON content types --- src/internal/parse.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/internal/parse.ts b/src/internal/parse.ts index 173991a55..14147338d 100644 --- a/src/internal/parse.ts +++ b/src/internal/parse.ts @@ -44,8 +44,8 @@ export async function defaultParseResponse( } const contentType = response.headers.get('content-type'); - const isJSON = - contentType?.includes('application/json') || contentType?.includes('application/vnd.api+json'); + const mediaType = contentType?.split(';')[0]?.trim(); + const isJSON = mediaType?.includes('application/json') || mediaType?.endsWith('+json'); if (isJSON) { const json = await response.json(); return addRequestID(json as T, response); From 87d9e8272826368aa89a2e2455ce648b345cb896 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 11 Mar 2025 16:37:56 +0000 Subject: [PATCH 07/73] feat(api): add /v1/responses and built-in tools [platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog) --- .stats.yml | 4 +- MIGRATION.md | 28 +- api.md | 206 +- src/client.ts | 52 +- src/resources/beta/assistants.ts | 52 +- src/resources/beta/beta.ts | 36 - src/resources/beta/index.ts | 16 - src/resources/beta/threads/runs/runs.ts | 4 +- src/resources/beta/threads/threads.ts | 87 +- src/resources/chat/chat.ts | 2 +- src/resources/chat/completions/completions.ts | 287 +- src/resources/chat/completions/index.ts | 1 - src/resources/chat/completions/messages.ts | 2 +- src/resources/chat/index.ts | 1 - src/resources/files.ts | 26 +- src/resources/index.ts | 20 + src/resources/responses/index.ts | 9 + src/resources/responses/input-items.ts | 252 ++ src/resources/responses/responses.ts | 2688 +++++++++++++++++ src/resources/shared.ts | 113 +- src/resources/uploads/uploads.ts | 7 +- .../{beta => }/vector-stores/file-batches.ts | 24 +- .../{beta => }/vector-stores/files.ts | 181 +- .../{beta => }/vector-stores/index.ts | 7 + .../{beta => }/vector-stores/vector-stores.ts | 136 +- .../chat/completions/completions.test.ts | 11 +- .../responses/input-items.test.ts | 32 + .../api-resources/responses/responses.test.ts | 85 + .../vector-stores/file-batches.test.ts | 19 +- .../{beta => }/vector-stores/files.test.ts | 55 +- .../vector-stores/vector-stores.test.ts | 33 +- 31 files changed, 4111 insertions(+), 365 deletions(-) create mode 100644 src/resources/responses/index.ts create mode 100644 src/resources/responses/input-items.ts create mode 100644 src/resources/responses/responses.ts rename src/resources/{beta => }/vector-stores/file-batches.ts (92%) rename src/resources/{beta => }/vector-stores/files.ts (64%) rename src/resources/{beta => }/vector-stores/index.ts (82%) rename src/resources/{beta => }/vector-stores/vector-stores.ts (76%) create mode 100644 tests/api-resources/responses/input-items.test.ts create mode 100644 tests/api-resources/responses/responses.test.ts rename tests/api-resources/{beta => }/vector-stores/file-batches.test.ts (79%) rename tests/api-resources/{beta => }/vector-stores/files.test.ts (57%) rename tests/api-resources/{beta => }/vector-stores/vector-stores.test.ts (68%) diff --git a/.stats.yml b/.stats.yml index 0d7e83be4..455874212 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml +configured_endpoints: 81 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml diff --git a/MIGRATION.md b/MIGRATION.md index c9c07887c..b84a1d6f9 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -110,11 +110,13 @@ client.example.create('c_456', { parent_id: 'p_123' }); This affects the following methods: -- `client.beta.vectorStores.files.retrieve()` -- `client.beta.vectorStores.files.delete()` -- `client.beta.vectorStores.fileBatches.retrieve()` -- `client.beta.vectorStores.fileBatches.cancel()` -- `client.beta.vectorStores.fileBatches.listFiles()` +- `client.vectorStores.files.retrieve()` +- `client.vectorStores.files.update()` +- `client.vectorStores.files.delete()` +- `client.vectorStores.files.content()` +- `client.vectorStores.fileBatches.retrieve()` +- `client.vectorStores.fileBatches.cancel()` +- `client.vectorStores.fileBatches.listFiles()` - `client.beta.threads.runs.retrieve()` - `client.beta.threads.runs.update()` - `client.beta.threads.runs.cancel()` @@ -182,21 +184,23 @@ Some methods could not be named intuitively due to an internal naming conflict. client.chat.completions.del(); client.files.del(); client.models.del(); -client.beta.vectorStores.del(); -client.beta.vectorStores.files.del(); +client.vectorStores.del(); +client.vectorStores.files.del(); client.beta.assistants.del(); client.beta.threads.del(); client.beta.threads.messages.del(); +client.responses.del(); // After client.chat.completions.delete(); client.files.delete(); client.models.delete(); -client.beta.vectorStores.delete(); -client.beta.vectorStores.files.delete(); +client.vectorStores.delete(); +client.vectorStores.files.delete(); client.beta.assistants.delete(); client.beta.threads.delete(); client.beta.threads.messages.delete(); +client.responses.delete(); ``` ### Removed request options overloads @@ -220,13 +224,15 @@ This affects the following methods: - `client.fineTuning.jobs.list()` - `client.fineTuning.jobs.listEvents()` - `client.fineTuning.jobs.checkpoints.list()` -- `client.beta.vectorStores.list()` -- `client.beta.vectorStores.files.list()` +- `client.vectorStores.list()` +- `client.vectorStores.files.list()` - `client.beta.assistants.list()` - `client.beta.threads.create()` - `client.beta.threads.runs.list()` - `client.beta.threads.messages.list()` - `client.batches.list()` +- `client.responses.retrieve()` +- `client.responses.inputItems.list()` ### Pagination changes diff --git a/api.md b/api.md index a7f030b19..786bd3841 100644 --- a/api.md +++ b/api.md @@ -3,10 +3,14 @@ Types: - ChatModel +- ComparisonFilter +- CompoundFilter - ErrorObject - FunctionDefinition - FunctionParameters - Metadata +- Reasoning +- ReasoningEffort - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText @@ -53,7 +57,6 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent -- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStoreMessage - ChatCompletionStreamOptions @@ -63,6 +66,7 @@ Types: - ChatCompletionToolChoiceOption - ChatCompletionToolMessageParam - ChatCompletionUserMessageParam +- ChatCompletionReasoningEffort Methods: @@ -223,6 +227,60 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobID, { ...params }) -> FineTuningJobCheckpointsPage +# VectorStores + +Types: + +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyObjectParam +- VectorStore +- VectorStoreDeleted +- VectorStoreSearchResponse + +Methods: + +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreID) -> VectorStore +- client.vectorStores.update(vectorStoreID, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresPage +- client.vectorStores.delete(vectorStoreID) -> VectorStoreDeleted +- client.vectorStores.search(vectorStoreID, { ...params }) -> VectorStoreSearchResponsesPage + +## Files + +Types: + +- VectorStoreFile +- VectorStoreFileDeleted +- FileContentResponse + +Methods: + +- client.vectorStores.files.create(vectorStoreID, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(fileID, { ...params }) -> VectorStoreFile +- client.vectorStores.files.update(fileID, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreID, { ...params }) -> VectorStoreFilesPage +- client.vectorStores.files.delete(fileID, { ...params }) -> VectorStoreFileDeleted +- client.vectorStores.files.content(fileID, { ...params }) -> FileContentResponsesPage + +## FileBatches + +Types: + +- VectorStoreFileBatch + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreID, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.retrieve(batchID, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.cancel(batchID, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.listFiles(batchID, { ...params }) -> VectorStoreFilesPage + # Beta ## Realtime @@ -286,62 +344,6 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse -## VectorStores - -Types: - -- AutoFileChunkingStrategyParam -- FileChunkingStrategy -- FileChunkingStrategyParam -- OtherFileChunkingStrategyObject -- StaticFileChunkingStrategy -- StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyObjectParam -- VectorStore -- VectorStoreDeleted - -Methods: - -- client.beta.vectorStores.create({ ...params }) -> VectorStore -- client.beta.vectorStores.retrieve(vectorStoreID) -> VectorStore -- client.beta.vectorStores.update(vectorStoreID, { ...params }) -> VectorStore -- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage -- client.beta.vectorStores.delete(vectorStoreID) -> VectorStoreDeleted - -### Files - -Types: - -- VectorStoreFile -- VectorStoreFileDeleted - -Methods: - -- client.beta.vectorStores.files.create(vectorStoreID, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.retrieve(fileID, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.list(vectorStoreID, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.files.delete(fileID, { ...params }) -> VectorStoreFileDeleted -- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> - -### FileBatches - -Types: - -- VectorStoreFileBatch - -Methods: - -- client.beta.vectorStores.fileBatches.create(vectorStoreID, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.retrieve(batchID, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.cancel(batchID, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.listFiles(batchID, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> - ## Chat ### Completions @@ -524,3 +526,93 @@ Types: Methods: - client.uploads.parts.create(uploadID, { ...params }) -> UploadPart + +# Responses + +Types: + +- ComputerTool +- EasyInputMessage +- FileSearchTool +- FunctionTool +- Response +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCodeInterpreterCallCodeDeltaEvent +- ResponseCodeInterpreterCallCodeDoneEvent +- ResponseCodeInterpreterCallCompletedEvent +- ResponseCodeInterpreterCallInProgressEvent +- ResponseCodeInterpreterCallInterpretingEvent +- ResponseCodeInterpreterToolCall +- ResponseCompletedEvent +- ResponseComputerToolCall +- ResponseContent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreatedEvent +- ResponseError +- ResponseErrorEvent +- ResponseFailedEvent +- ResponseFileSearchCallCompletedEvent +- ResponseFileSearchCallInProgressEvent +- ResponseFileSearchCallSearchingEvent +- ResponseFileSearchToolCall +- ResponseFormatTextConfig +- ResponseFormatTextJSONSchemaConfig +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseFunctionToolCall +- ResponseFunctionWebSearch +- ResponseInProgressEvent +- ResponseIncludable +- ResponseIncompleteEvent +- ResponseInput +- ResponseInputAudio +- ResponseInputContent +- ResponseInputFile +- ResponseInputImage +- ResponseInputItem +- ResponseInputMessageContentList +- ResponseInputText +- ResponseOutputAudio +- ResponseOutputItem +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseOutputMessage +- ResponseOutputRefusal +- ResponseOutputText +- ResponseRefusalDeltaEvent +- ResponseRefusalDoneEvent +- ResponseStatus +- ResponseStreamEvent +- ResponseTextAnnotationDeltaEvent +- ResponseTextConfig +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- ResponseUsage +- ResponseWebSearchCallCompletedEvent +- ResponseWebSearchCallInProgressEvent +- ResponseWebSearchCallSearchingEvent +- Tool +- ToolChoiceFunction +- ToolChoiceOptions +- ToolChoiceTypes +- WebSearchTool + +Methods: + +- client.responses.create({ ...params }) -> Response +- client.responses.retrieve(responseID, { ...params }) -> Response +- client.responses.delete(responseID) -> void + +## InputItems + +Types: + +- ResponseItemList + +Methods: + +- client.responses.inputItems.list(responseID, { ...params }) -> ResponseItemListDataPage diff --git a/src/client.ts b/src/client.ts index 165a97484..d7d77fc06 100644 --- a/src/client.ts +++ b/src/client.ts @@ -84,12 +84,32 @@ import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio' import { Beta } from './resources/beta/beta'; import { Chat } from './resources/chat/chat'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Responses } from './resources/responses/responses'; import { Upload, UploadCompleteParams, UploadCreateParams, Uploads as UploadsAPIUploads, } from './resources/uploads/uploads'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreSearchResponsesPage, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './resources/vector-stores/vector-stores'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -115,7 +135,6 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, - ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -873,9 +892,11 @@ export class OpenAI { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); + responses: API.Responses = new API.Responses(this); } OpenAI.Completions = Completions; OpenAI.Chat = Chat; @@ -886,9 +907,11 @@ OpenAI.Audio = Audio; OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.FineTuning = FineTuning; +OpenAI.VectorStores = VectorStores; OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.Uploads = UploadsAPIUploads; +OpenAI.Responses = Responses; export declare namespace OpenAI { export type RequestOptions = Opts.RequestOptions; @@ -930,7 +953,6 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -999,6 +1021,26 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + type VectorStoresPage as VectorStoresPage, + type VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + export { Beta as Beta }; export { @@ -1018,11 +1060,17 @@ export declare namespace OpenAI { type UploadCompleteParams as UploadCompleteParams, }; + export { Responses as Responses }; + export type ChatModel = API.ChatModel; + export type ComparisonFilter = API.ComparisonFilter; + export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; + export type Reasoning = API.Reasoning; + export type ReasoningEffort = API.ReasoningEffort; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0a32be46d..3753ee9c8 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -4,7 +4,6 @@ import { APIResource } from '../../resource'; import * as Shared from '../shared'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { APIPromise } from '../../api-promise'; @@ -1123,14 +1122,14 @@ export interface AssistantCreateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with @@ -1233,9 +1232,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1254,6 +1253,45 @@ export namespace AssistantCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -1326,14 +1364,14 @@ export interface AssistantUpdateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index beab540c5..4218200d8 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -40,58 +40,22 @@ import { ThreadUpdateParams, Threads, } from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; -import { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyObjectParam, - VectorStore, - VectorStoreCreateParams, - VectorStoreDeleted, - VectorStoreListParams, - VectorStoreUpdateParams, - VectorStores, - VectorStoresPage, -} from './vector-stores/vector-stores'; import { Chat } from './chat/chat'; export class Beta extends APIResource { realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); - vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } Beta.Realtime = Realtime; -Beta.VectorStores = VectorStores; Beta.Assistants = Assistants; Beta.Threads = Threads; export declare namespace Beta { export { Realtime as Realtime }; - export { - VectorStores as VectorStores, - type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, - type FileChunkingStrategy as FileChunkingStrategy, - type FileChunkingStrategyParam as FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy as StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, - type VectorStore as VectorStore, - type VectorStoreDeleted as VectorStoreDeleted, - type VectorStoresPage as VectorStoresPage, - type VectorStoreCreateParams as VectorStoreCreateParams, - type VectorStoreUpdateParams as VectorStoreUpdateParams, - type VectorStoreListParams as VectorStoreListParams, - }; - export { Chat }; export { diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index cedb1791a..18746c1d4 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -37,19 +37,3 @@ export { type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { - VectorStores, - type AutoFileChunkingStrategyParam, - type FileChunkingStrategy, - type FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam, - type VectorStore, - type VectorStoreDeleted, - type VectorStoreCreateParams, - type VectorStoreUpdateParams, - type VectorStoreListParams, - type VectorStoresPage, -} from './vector-stores/index'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 4a75b5466..401b3ec59 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -708,14 +708,14 @@ export interface RunCreateParamsBase { parallel_tool_calls?: boolean; /** - * Body param: **o1 and o3-mini models only** + * Body param: **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Body param: Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 036ed680e..22ccd7f02 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -42,7 +42,6 @@ import { TextDelta, TextDeltaBlock, } from './messages'; -import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; import { RequiredActionFunctionToolCall, @@ -431,9 +430,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -452,6 +451,45 @@ export namespace ThreadCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -790,9 +828,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -811,6 +849,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 51a3606fd..19ddd9c2e 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -76,7 +76,6 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -86,6 +85,7 @@ export declare namespace Chat { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 5a9e497a1..8204e2a8b 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -16,6 +16,13 @@ export class Completions extends APIResource { messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); /** + * **Starting a new project?** We recommend trying + * [Responses](https://platform.openai.com/docs/api-reference/responses) to take + * advantage of the latest OpenAI platform features. Compare + * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + * + * --- + * * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and @@ -46,7 +53,7 @@ export class Completions extends APIResource { } /** - * Get a stored chat completion. Only chat completions that have been created with + * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. */ retrieve(completionID: string, options?: RequestOptions): APIPromise { @@ -54,7 +61,7 @@ export class Completions extends APIResource { } /** - * Modify a stored chat completion. Only chat completions that have been created + * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. */ @@ -67,7 +74,7 @@ export class Completions extends APIResource { } /** - * List stored chat completions. Only chat completions that have been stored with + * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. */ list( @@ -78,7 +85,7 @@ export class Completions extends APIResource { } /** - * Delete a stored chat completion. Only chat completions that have been created + * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. */ delete(completionID: string, options?: RequestOptions): APIPromise { @@ -304,16 +311,16 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `ash`, `ballad`, - * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, - * `echo`, and `shimmer`; these voices are less expressive). + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** - * Represents a streamed chunk of a chat completion response returned by model, + * Represents a streamed chunk of a chat completion response returned by the model, * based on the provided input. + * [Learn more](https://platform.openai.com/docs/guides/streaming-responses). */ export interface ChatCompletionChunk { /** @@ -500,7 +507,43 @@ export namespace ChatCompletionChunk { export type ChatCompletionContentPart = | ChatCompletionContentPartText | ChatCompletionContentPartImage - | ChatCompletionContentPartInputAudio; + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPart.File; + +export namespace ChatCompletionContentPart { + /** + * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + * generation. + */ + export interface File { + file: File.File; + + /** + * The type of the content part. Always `file`. + */ + type: 'file'; + } + + export namespace File { + export interface File { + /** + * The base64 encoded file data, used when passing the file to the model as a + * string. + */ + file_data?: string; + + /** + * The ID of an uploaded file to use as input. + */ + file_id?: string; + + /** + * The name of the file, used when passing the file to the model as a string. + */ + file_name?: string; + } + } +} /** * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). @@ -673,6 +716,12 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * Annotations for the message, when applicable, as when using the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + annotations?: Array; + /** * If the audio output modality is requested, this object contains data about the * audio response from the model. @@ -693,6 +742,48 @@ export interface ChatCompletionMessage { } export namespace ChatCompletionMessage { + /** + * A URL citation when using web search. + */ + export interface Annotation { + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * A URL citation when using web search. + */ + url_citation: Annotation.URLCitation; + } + + export namespace Annotation { + /** + * A URL citation when using web search. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The URL of the web resource. + */ + url: string; + } + } + /** * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a * function that should be called, as generated by the model. @@ -806,16 +897,6 @@ export interface ChatCompletionPredictionContent { type: 'content'; } -/** - * **o1 and o3-mini models only** - * - * Constrains effort on reasoning for - * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - * result in faster responses and fewer tokens used on reasoning in a response. - */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; - /** * The role of the author of a message */ @@ -981,6 +1062,8 @@ export interface ChatCompletionUserMessageParam { name?: string; } +export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null; + export type ChatCompletionCreateParams = | ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; @@ -997,9 +1080,11 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. */ model: (string & {}) | Shared.ChatModel; @@ -1090,8 +1175,8 @@ export interface ChatCompletionCreateParamsBase { metadata?: Shared.Metadata | null; /** - * Output types that you would like the model to generate for this request. Most - * models are capable of generating text, which is the default: + * Output types that you would like the model to generate. Most models are capable + * of generating text, which is the default: * * `["text"]` * @@ -1101,7 +1186,7 @@ export interface ChatCompletionCreateParamsBase { * * `["text", "audio"]` */ - modalities?: Array | null; + modalities?: Array<'text' | 'audio'> | null; /** * How many chat completion choices to generate for each input message. Note that @@ -1131,14 +1216,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * An object specifying the format that the model must output. @@ -1148,21 +1233,14 @@ export interface ChatCompletionCreateParamsBase { * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. */ response_format?: | Shared.ResponseFormatText - | Shared.ResponseFormatJSONObject - | Shared.ResponseFormatJSONSchema; + | Shared.ResponseFormatJSONSchema + | Shared.ResponseFormatJSONObject; /** * This feature is in Beta. If specified, our system will make a best effort to @@ -1181,15 +1259,19 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarantee. + * latency guarentee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarantee. + * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ service_tier?: 'auto' | 'default' | null; /** - * Up to 4 sequences where the API will stop generating further tokens. + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. */ stop?: string | null | Array; @@ -1201,12 +1283,14 @@ export interface ChatCompletionCreateParamsBase { store?: boolean | null; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: boolean | null; @@ -1265,6 +1349,13 @@ export interface ChatCompletionCreateParamsBase { * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + web_search_options?: ChatCompletionCreateParams.WebSearchOptions; } export namespace ChatCompletionCreateParams { @@ -1296,6 +1387,70 @@ export namespace ChatCompletionCreateParams { parameters?: Shared.FunctionParameters; } + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + export interface WebSearchOptions { + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * Approximate location parameters for the search. + */ + user_location?: WebSearchOptions.UserLocation | null; + } + + export namespace WebSearchOptions { + /** + * Approximate location parameters for the search. + */ + export interface UserLocation { + /** + * Approximate location parameters for the search. + */ + approximate: UserLocation.Approximate; + + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + } + + export namespace UserLocation { + /** + * Approximate location parameters for the search. + */ + export interface Approximate { + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } + } + } + export type ChatCompletionCreateParamsNonStreaming = CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; export type ChatCompletionCreateParamsStreaming = @@ -1304,24 +1459,28 @@ export namespace ChatCompletionCreateParams { export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: false | null; } export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream: true; } @@ -1340,19 +1499,19 @@ export interface ChatCompletionUpdateParams { export interface ChatCompletionListParams extends CursorPageParams { /** - * A list of metadata keys to filter the chat completions by. Example: + * A list of metadata keys to filter the Chat Completions by. Example: * * `metadata[key1]=value1&metadata[key2]=value2` */ metadata?: Shared.Metadata | null; /** - * The model used to generate the chat completions. + * The model used to generate the Chat Completions. */ model?: string; /** - * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or * `desc` for descending order. Defaults to `asc`. */ order?: 'asc' | 'desc'; @@ -1382,7 +1541,6 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -1392,6 +1550,7 @@ export declare namespace Completions { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 6a3fdec83..32d0eb408 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -22,7 +22,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index f00acbdfc..82478a8ab 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -9,7 +9,7 @@ import { path } from '../../../internal/utils/path'; export class Messages extends APIResource { /** - * Get the messages in a stored chat completion. Only chat completions that have + * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. */ list( diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 8eb8cbed6..3e997dd86 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -23,7 +23,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/files.ts b/src/resources/files.ts index 56bafc224..3183b87f8 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -175,16 +175,12 @@ export interface FileObject { } /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ -export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals'; export interface FileCreateParams { /** @@ -193,14 +189,10 @@ export interface FileCreateParams { file: Uploadable; /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ purpose: FilePurpose; } diff --git a/src/resources/index.ts b/src/resources/index.ts index c1d06d8ce..99a703037 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -60,4 +60,24 @@ export { type ModerationCreateResponse, type ModerationCreateParams, } from './moderations'; +export { Responses } from './responses/responses'; export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; +export { + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, + type VectorStoresPage, + type VectorStoreSearchResponsesPage, +} from './vector-stores/vector-stores'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 000000000..164665eb2 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + InputItems, + type ResponseItemList, + type InputItemListParams, + type ResponseItemListDataPage, +} from './input-items'; +export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 000000000..b38d1be95 --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,252 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as ResponsesAPI from './responses'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseID: string, + query: InputItemListParams | null | undefined = {}, + options?: RequestOptions, + ): PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + > { + return this._client.getAPIList( + path`/responses/${responseID}/input_items`, + CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >, + { query, ...options }, + ); + } +} + +export type ResponseItemListDataPage = CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +>; + +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export namespace ResponseItemList { + export interface Message { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + export interface ComputerCallOutput { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + export interface FunctionCallOutput { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { + type ResponseItemList as ResponseItemList, + type ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 000000000..1186cab6b --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,2688 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as ResponsesAPI from './responses'; +import * as Shared from '../shared'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import { APIPromise } from '../../api-promise'; +import { Stream } from '../../streaming'; +import { buildHeaders } from '../../internal/headers'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; + +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Creates a model response. Provide + * [text](https://platform.openai.com/docs/guides/text) or + * [image](https://platform.openai.com/docs/guides/images) inputs to generate + * [text](https://platform.openai.com/docs/guides/text) or + * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + * the model call your own + * [custom code](https://platform.openai.com/docs/guides/function-calling) or use + * built-in [tools](https://platform.openai.com/docs/guides/tools) like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + * your own data as input for the model's response. + */ + create(body: ResponseCreateParamsNonStreaming, options?: RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: RequestOptions, + ): APIPromise | Response>; + create( + body: ResponseCreateParams, + options?: RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; + } + + /** + * Retrieves a model response with the given ID. + */ + retrieve( + responseID: string, + query: ResponseRetrieveParams | null | undefined = {}, + options?: RequestOptions, + ): APIPromise { + return this._client.get(path`/responses/${responseID}`, { query, ...options }); + } + + /** + * Deletes a model response with the given ID. + */ + delete(responseID: string, options?: RequestOptions): APIPromise { + return this._client.delete(path`/responses/${responseID}`, { + ...options, + headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + }); + } +} + +/** + * A tool that controls a virtual computer. Learn more about the + * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + */ +export interface ComputerTool { + /** + * The height of the computer display. + */ + display_height: number; + + /** + * The width of the computer display. + */ + display_width: number; + + /** + * The type of computer environment to control. + */ + environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + + /** + * The type of the computer use tool. Always `computer_use_preview`. + */ + type: 'computer-preview'; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EasyInputMessage { + /** + * Text, image, or audio input to the model, used to generate a response. Can also + * contain previous assistant responses. + */ + content: string | ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export interface FileSearchTool { + /** + * The type of the file search tool. Always `file_search`. + */ + type: 'file_search'; + + /** + * The IDs of the vector stores to search. + */ + vector_store_ids: Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: FileSearchTool.RankingOptions; +} + +export namespace FileSearchTool { + /** + * Ranking options for search. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. + */ + ranker?: 'auto' | 'default-2024-11-15'; + + /** + * The score threshold for the file search, a number between 0 and 1. Numbers + * closer to 1 will attempt to return only the most relevant results, but may + * return fewer results. + */ + score_threshold?: number; + } +} + +/** + * Defines a function in your own code the model can choose to call. Learn more + * about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ +export interface FunctionTool { + /** + * The name of the function to call. + */ + name: string; + + /** + * A JSON schema object describing the parameters of the function. + */ + parameters: Record; + + /** + * Whether to enforce strict parameter validation. Default `true`. + */ + strict: boolean; + + /** + * The type of the function tool. Always `function`. + */ + type: 'function'; + + /** + * A description of the function. Used by the model to determine whether or not to + * call the function. + */ + description?: string | null; +} + +export interface Response { + /** + * Unique identifier for this Response. + */ + id: string; + + /** + * Unix timestamp (in seconds) of when this Response was created. + */ + created_at: number; + + /** + * An error object returned when the model fails to generate a Response. + */ + error: ResponseError | null; + + /** + * Details about why the response is incomplete. + */ + incomplete_details: Response.IncompleteDetails | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * The object type of this resource - always set to `response`. + */ + object: 'response'; + + /** + * An array of content items generated by the model. + * + * - The length and order of items in the `output` array is dependent on the + * model's response. + * - Rather than accessing the first item in the `output` array and assuming it's + * an `assistant` message with the content generated by the model, you might + * consider using the `output_text` property where supported in SDKs. + */ + output: Array; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls: boolean; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature: number | null; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p: number | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ + status?: ResponseStatus; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ + usage?: ResponseUsage; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace Response { + /** + * Details about why the response is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the response is incomplete. + */ + reason?: 'max_output_tokens' | 'content_filter'; + } +} + +/** + * Emitted when there is a partial audio response. + */ +export interface ResponseAudioDeltaEvent { + /** + * A chunk of Base64 encoded response audio bytes. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Emitted when the audio response is complete. + */ +export interface ResponseAudioDoneEvent { + /** + * The type of the event. Always `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Emitted when there is a partial transcript of audio. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The partial transcript of the audio response. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.transcript.delta`. + */ + type: 'response.audio.transcript.delta'; +} + +/** + * Emitted when the full audio transcript is completed. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The type of the event. Always `response.audio.transcript.done`. + */ + type: 'response.audio.transcript.done'; +} + +/** + * Emitted when a partial code snippet is added by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDeltaEvent { + /** + * The partial code snippet added by the code interpreter. + */ + delta: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.delta`. + */ + type: 'response.code_interpreter_call.code.delta'; +} + +/** + * Emitted when code snippet output is finalized by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDoneEvent { + /** + * The final code snippet output by the code interpreter. + */ + code: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.done`. + */ + type: 'response.code_interpreter_call.code.done'; +} + +/** + * Emitted when the code interpreter call is completed. + */ +export interface ResponseCodeInterpreterCallCompletedEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.completed`. + */ + type: 'response.code_interpreter_call.completed'; +} + +/** + * Emitted when a code interpreter call is in progress. + */ +export interface ResponseCodeInterpreterCallInProgressEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.in_progress`. + */ + type: 'response.code_interpreter_call.in_progress'; +} + +/** + * Emitted when the code interpreter is actively interpreting the code snippet. + */ +export interface ResponseCodeInterpreterCallInterpretingEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.interpreting`. + */ + type: 'response.code_interpreter_call.interpreting'; +} + +/** + * A tool call to run code. + */ +export interface ResponseCodeInterpreterToolCall { + /** + * The unique ID of the code interpreter tool call. + */ + id: string; + + /** + * The code to run. + */ + code: string; + + /** + * The results of the code interpreter tool call. + */ + results: Array; + + /** + * The status of the code interpreter tool call. + */ + status: 'in_progress' | 'interpreting' | 'completed'; + + /** + * The type of the code interpreter tool call. Always `code_interpreter_call`. + */ + type: 'code_interpreter_call'; +} + +export namespace ResponseCodeInterpreterToolCall { + /** + * The output of a code interpreter tool call that is text. + */ + export interface Logs { + /** + * The logs of the code interpreter tool call. + */ + logs: string; + + /** + * The type of the code interpreter text output. Always `logs`. + */ + type: 'logs'; + } + + /** + * The output of a code interpreter tool call that is a file. + */ + export interface Files { + files: Array; + + /** + * The type of the code interpreter file output. Always `files`. + */ + type: 'files'; + } + + export namespace Files { + export interface File { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The MIME type of the file. + */ + mime_type: string; + } + } +} + +/** + * Emitted when the model response is complete. + */ +export interface ResponseCompletedEvent { + /** + * Properties of the completed response. + */ + response: Response; + + /** + * The type of the event. Always `response.completed`. + */ + type: 'response.completed'; +} + +/** + * A tool call to a computer use tool. See the + * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + * for more information. + */ +export interface ResponseComputerToolCall { + /** + * The unique ID of the computer call. + */ + id: string; + + /** + * A click action. + */ + action: + | ResponseComputerToolCall.Click + | ResponseComputerToolCall.DoubleClick + | ResponseComputerToolCall.Drag + | ResponseComputerToolCall.Keypress + | ResponseComputerToolCall.Move + | ResponseComputerToolCall.Screenshot + | ResponseComputerToolCall.Scroll + | ResponseComputerToolCall.Type + | ResponseComputerToolCall.Wait; + + /** + * An identifier used when responding to the tool call with output. + */ + call_id: string; + + /** + * The pending safety checks for the computer call. + */ + pending_safety_checks: Array; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the computer call. Always `computer_call`. + */ + type: 'computer_call'; +} + +export namespace ResponseComputerToolCall { + /** + * A click action. + */ + export interface Click { + /** + * Indicates which mouse button was pressed during the click. One of `left`, + * `right`, `wheel`, `back`, or `forward`. + */ + button: 'left' | 'right' | 'wheel' | 'back' | 'forward'; + + /** + * Specifies the event type. For a click action, this property is always set to + * `click`. + */ + type: 'click'; + + /** + * The x-coordinate where the click occurred. + */ + x: number; + + /** + * The y-coordinate where the click occurred. + */ + y: number; + } + + /** + * A double click action. + */ + export interface DoubleClick { + /** + * Specifies the event type. For a double click action, this property is always set + * to `double_click`. + */ + type: 'double_click'; + + /** + * The x-coordinate where the double click occurred. + */ + x: number; + + /** + * The y-coordinate where the double click occurred. + */ + y: number; + } + + /** + * A drag action. + */ + export interface Drag { + /** + * An array of coordinates representing the path of the drag action. Coordinates + * will appear as an array of objects, eg + * + * ``` + * [ + * { x: 100, y: 200 }, + * { x: 200, y: 300 } + * ] + * ``` + */ + path: Array; + + /** + * Specifies the event type. For a drag action, this property is always set to + * `drag`. + */ + type: 'drag'; + } + + export namespace Drag { + /** + * A series of x/y coordinate pairs in the drag path. + */ + export interface Path { + /** + * The x-coordinate. + */ + x: number; + + /** + * The y-coordinate. + */ + y: number; + } + } + + /** + * A collection of keypresses the model would like to perform. + */ + export interface Keypress { + /** + * The combination of keys the model is requesting to be pressed. This is an array + * of strings, each representing a key. + */ + keys: Array; + + /** + * Specifies the event type. For a keypress action, this property is always set to + * `keypress`. + */ + type: 'keypress'; + } + + /** + * A mouse move action. + */ + export interface Move { + /** + * Specifies the event type. For a move action, this property is always set to + * `move`. + */ + type: 'move'; + + /** + * The x-coordinate to move to. + */ + x: number; + + /** + * The y-coordinate to move to. + */ + y: number; + } + + /** + * A screenshot action. + */ + export interface Screenshot { + /** + * Specifies the event type. For a screenshot action, this property is always set + * to `screenshot`. + */ + type: 'screenshot'; + } + + /** + * A scroll action. + */ + export interface Scroll { + /** + * The horizontal scroll distance. + */ + scroll_x: number; + + /** + * The vertical scroll distance. + */ + scroll_y: number; + + /** + * Specifies the event type. For a scroll action, this property is always set to + * `scroll`. + */ + type: 'scroll'; + + /** + * The x-coordinate where the scroll occurred. + */ + x: number; + + /** + * The y-coordinate where the scroll occurred. + */ + y: number; + } + + /** + * An action to type in text. + */ + export interface Type { + /** + * The text to type. + */ + text: string; + + /** + * Specifies the event type. For a type action, this property is always set to + * `type`. + */ + type: 'type'; + } + + /** + * A wait action. + */ + export interface Wait { + /** + * Specifies the event type. For a wait action, this property is always set to + * `wait`. + */ + type: 'wait'; + } + + /** + * A pending safety check for the computer call. + */ + export interface PendingSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * Multi-modal input and output contents. + */ +export type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + | ResponseOutputText + | ResponseOutputRefusal; + +/** + * Emitted when a new content part is added. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part that was added. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +/** + * Emitted when a content part is done. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part that is done. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +/** + * An event that is emitted when a response is created. + */ +export interface ResponseCreatedEvent { + /** + * The response that was created. + */ + response: Response; + + /** + * The type of the event. Always `response.created`. + */ + type: 'response.created'; +} + +/** + * An error object returned when the model fails to generate a Response. + */ +export interface ResponseError { + /** + * The error code for the response. + */ + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + + /** + * A human-readable description of the error. + */ + message: string; +} + +/** + * Emitted when an error occurs. + */ +export interface ResponseErrorEvent { + /** + * The error code. + */ + code: string | null; + + /** + * The error message. + */ + message: string; + + /** + * The error parameter. + */ + param: string | null; + + /** + * The type of the event. Always `error`. + */ + type: 'error'; +} + +/** + * An event that is emitted when a response fails. + */ +export interface ResponseFailedEvent { + /** + * The response that failed. + */ + response: Response; + + /** + * The type of the event. Always `response.failed`. + */ + type: 'response.failed'; +} + +/** + * Emitted when a file search call is completed (results found). + */ +export interface ResponseFileSearchCallCompletedEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.completed`. + */ + type: 'response.file_search_call.completed'; +} + +/** + * Emitted when a file search call is initiated. + */ +export interface ResponseFileSearchCallInProgressEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.in_progress`. + */ + type: 'response.file_search_call.in_progress'; +} + +/** + * Emitted when a file search is currently searching. + */ +export interface ResponseFileSearchCallSearchingEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is searching. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.searching`. + */ + type: 'response.file_search_call.searching'; +} + +/** + * The results of a file search tool call. See the + * [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + * for more information. + */ +export interface ResponseFileSearchToolCall { + /** + * The unique ID of the file search tool call. + */ + id: string; + + /** + * The queries used to search for files. + */ + queries: Array; + + /** + * The status of the file search tool call. One of `in_progress`, `searching`, + * `incomplete` or `failed`, + */ + status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed'; + + /** + * The type of the file search tool call. Always `file_search_call`. + */ + type: 'file_search_call'; + + /** + * The results of the file search tool call. + */ + results?: Array | null; +} + +export namespace ResponseFileSearchToolCall { + export interface Result { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + + /** + * The unique ID of the file. + */ + file_id?: string; + + /** + * The name of the file. + */ + filename?: string; + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score?: number; + + /** + * The text that was retrieved from the file. + */ + text?: string; + } +} + +/** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ +export type ResponseFormatTextConfig = + | Shared.ResponseFormatText + | ResponseFormatTextJSONSchemaConfig + | Shared.ResponseFormatJSONObject; + +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ +export interface ResponseFormatTextJSONSchemaConfig { + /** + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of response format being defined. Always `json_schema`. + */ + type: 'json_schema'; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name?: string; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; +} + +/** + * Emitted when there is a partial function-call arguments delta. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The function-call arguments delta that is added. + */ + delta: string; + + /** + * The ID of the output item that the function-call arguments delta is added to. + */ + item_id: string; + + /** + * The index of the output item that the function-call arguments delta is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Emitted when function-call arguments are finalized. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The function-call arguments. + */ + arguments: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item. + */ + output_index: number; + + type: 'response.function_call_arguments.done'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCall { + /** + * The unique ID of the function tool call. + */ + id: string; + + /** + * A JSON string of the arguments to pass to the function. + */ + arguments: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * The name of the function to run. + */ + name: string; + + /** + * The type of the function tool call. Always `function_call`. + */ + type: 'function_call'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * The results of a web search tool call. See the + * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + * more information. + */ +export interface ResponseFunctionWebSearch { + /** + * The unique ID of the web search tool call. + */ + id: string; + + /** + * The status of the web search tool call. + */ + status: 'in_progress' | 'searching' | 'completed' | 'failed'; + + /** + * The type of the web search tool call. Always `web_search_call`. + */ + type: 'web_search_call'; +} + +/** + * Emitted when the response is in progress. + */ +export interface ResponseInProgressEvent { + /** + * The response that is in progress. + */ + response: Response; + + /** + * The type of the event. Always `response.in_progress`. + */ + type: 'response.in_progress'; +} + +/** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ +export type ResponseIncludable = + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'computer_call_output.output.image_url'; + +/** + * An event that is emitted when a response finishes as incomplete. + */ +export interface ResponseIncompleteEvent { + /** + * The response that was incomplete. + */ + response: Response; + + /** + * The type of the event. Always `response.incomplete`. + */ + type: 'response.incomplete'; +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInput = Array; + +/** + * An audio input to the model. + */ +export interface ResponseInputAudio { + /** + * Base64-encoded audio data. + */ + data: string; + + /** + * The format of the audio data. Currently supported formats are `mp3` and `wav`. + */ + format: 'mp3' | 'wav'; + + /** + * The type of the input item. Always `input_audio`. + */ + type: 'input_audio'; +} + +/** + * A text input to the model. + */ +export type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile; + +/** + * A file input to the model. + */ +export interface ResponseInputFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The content of the file to be sent to the model. + */ + file_data?: string; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; +} + +/** + * An image input to the model. Learn about + * [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ResponseInputImage { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or + * `auto`. Defaults to `auto`. + */ + detail: 'high' | 'low' | 'auto'; + + /** + * The type of the input item. Always `input_image`. + */ + type: 'input_image'; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 + * encoded image in a data URL. + */ + image_url?: string | null; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export type ResponseInputItem = + | EasyInputMessage + | ResponseInputItem.Message + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseInputItem.ComputerCallOutput + | ResponseFunctionWebSearch + | ResponseFunctionToolCall + | ResponseInputItem.FunctionCallOutput + | ResponseInputItem.Reasoning + | ResponseInputItem.ItemReference; + +export namespace ResponseInputItem { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. + */ + export interface Message { + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + /** + * The output of a computer tool call. + */ + export interface ComputerCallOutput { + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The ID of the computer tool call output. + */ + id?: string; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + /** + * The output of a function tool call. + */ + export interface FunctionCallOutput { + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The unique ID of the function tool call output. Populated when this item is + * returned via API. + */ + id?: string; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } + + /** + * An internal identifier for an item to reference. + */ + export interface ItemReference { + /** + * The ID of the item to reference. + */ + id: string; + + /** + * The type of item to reference. Always `item_reference`. + */ + type: 'item_reference'; + } +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInputMessageContentList = Array; + +/** + * A text input to the model. + */ +export interface ResponseInputText { + /** + * The text input to the model. + */ + text: string; + + /** + * The type of the input item. Always `input_text`. + */ + type: 'input_text'; +} + +/** + * An audio output from the model. + */ +export interface ResponseOutputAudio { + /** + * Base64-encoded audio data from the model. + */ + data: string; + + /** + * The transcript of the audio data from the model. + */ + transcript: string; + + /** + * The type of the output audio. Always `output_audio`. + */ + type: 'output_audio'; +} + +/** + * An output message from the model. + */ +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseFunctionToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export namespace ResponseOutputItem { + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } +} + +/** + * Emitted when a new output item is added. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The output item that was added. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was added. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Emitted when an output item is marked done. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The output item that was marked done. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was marked done. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * An output message from the model. + */ +export interface ResponseOutputMessage { + /** + * The unique ID of the output message. + */ + id: string; + + /** + * The content of the output message. + */ + content: Array; + + /** + * The role of the output message. Always `assistant`. + */ + role: 'assistant'; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the output message. Always `message`. + */ + type: 'message'; +} + +/** + * A refusal from the model. + */ +export interface ResponseOutputRefusal { + /** + * The refusal explanationfrom the model. + */ + refusal: string; + + /** + * The type of the refusal. Always `refusal`. + */ + type: 'refusal'; +} + +/** + * A text output from the model. + */ +export interface ResponseOutputText { + /** + * The annotations of the text output. + */ + annotations: Array< + ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath + >; + + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; +} + +export namespace ResponseOutputText { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Emitted when there is a partial refusal text. + */ +export interface ResponseRefusalDeltaEvent { + /** + * The index of the content part that the refusal text is added to. + */ + content_index: number; + + /** + * The refusal text that is added. + */ + delta: string; + + /** + * The ID of the output item that the refusal text is added to. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.refusal.delta`. + */ + type: 'response.refusal.delta'; +} + +/** + * Emitted when refusal text is finalized. + */ +export interface ResponseRefusalDoneEvent { + /** + * The index of the content part that the refusal text is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the refusal text is finalized. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is finalized. + */ + output_index: number; + + /** + * The refusal text that is finalized. + */ + refusal: string; + + /** + * The type of the event. Always `response.refusal.done`. + */ + type: 'response.refusal.done'; +} + +/** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ +export type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'incomplete'; + +/** + * Emitted when there is a partial audio response. + */ +export type ResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; + +/** + * Emitted when a text annotation is added. + */ +export interface ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + annotation: + | ResponseTextAnnotationDeltaEvent.FileCitation + | ResponseTextAnnotationDeltaEvent.URLCitation + | ResponseTextAnnotationDeltaEvent.FilePath; + + /** + * The index of the annotation that was added. + */ + annotation_index: number; + + /** + * The index of the content part that the text annotation was added to. + */ + content_index: number; + + /** + * The ID of the output item that the text annotation was added to. + */ + item_id: string; + + /** + * The index of the output item that the text annotation was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.annotation.added`. + */ + type: 'response.output_text.annotation.added'; +} + +export namespace ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ +export interface ResponseTextConfig { + /** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + format?: ResponseFormatTextConfig; +} + +/** + * Emitted when there is an additional text delta. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part that the text delta was added to. + */ + content_index: number; + + /** + * The text delta that was added. + */ + delta: string; + + /** + * The ID of the output item that the text delta was added to. + */ + item_id: string; + + /** + * The index of the output item that the text delta was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.delta`. + */ + type: 'response.output_text.delta'; +} + +/** + * Emitted when text content is finalized. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part that the text content is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the text content is finalized. + */ + item_id: string; + + /** + * The index of the output item that the text content is finalized. + */ + output_index: number; + + /** + * The text content that is finalized. + */ + text: string; + + /** + * The type of the event. Always `response.output_text.done`. + */ + type: 'response.output_text.done'; +} + +/** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ +export interface ResponseUsage { + /** + * The number of input tokens. + */ + input_tokens: number; + + /** + * The number of output tokens. + */ + output_tokens: number; + + /** + * A detailed breakdown of the output tokens. + */ + output_tokens_details: ResponseUsage.OutputTokensDetails; + + /** + * The total number of tokens used. + */ + total_tokens: number; +} + +export namespace ResponseUsage { + /** + * A detailed breakdown of the output tokens. + */ + export interface OutputTokensDetails { + /** + * The number of reasoning tokens. + */ + reasoning_tokens: number; + } +} + +/** + * Emitted when a web search call is completed. + */ +export interface ResponseWebSearchCallCompletedEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.completed`. + */ + type: 'response.web_search_call.completed'; +} + +/** + * Emitted when a web search call is initiated. + */ +export interface ResponseWebSearchCallInProgressEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.in_progress`. + */ + type: 'response.web_search_call.in_progress'; +} + +/** + * Emitted when a web search call is executing. + */ +export interface ResponseWebSearchCallSearchingEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.searching`. + */ + type: 'response.web_search_call.searching'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; + +/** + * Use this option to force the model to call a specific function. + */ +export interface ToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; + + /** + * For function calling, the type is always `function`. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. + * + * `none` means the model will not call any tool and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling one or + * more tools. + * + * `required` means the model must call one or more tools. + */ +export type ToolChoiceOptions = 'none' | 'auto' | 'required'; + +/** + * Indicates that the model should use a built-in tool to generate a response. + * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + */ +export interface ToolChoiceTypes { + /** + * The type of hosted tool the model should to use. Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * + * Allowed values are: + * + * - `file_search` + * - `web_search_preview` + * - `computer_use_preview` + */ + type: 'file_search' | 'web_search_preview' | 'computer_use_preview' | 'web_search_preview_2025_03_11'; +} + +/** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + */ +export interface WebSearchTool { + /** + * The type of the web search tool. One of: + * + * - `web_search_preview` + * - `web_search_preview_2025_03_11` + */ + type: 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + user_location?: WebSearchTool.UserLocation | null; +} + +export namespace WebSearchTool { + export interface UserLocation { + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Text, image, or file inputs to the model, used to generate a response. + * + * Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Image inputs](https://platform.openai.com/docs/guides/images) + * - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + * - [Function calling](https://platform.openai.com/docs/guides/function-calling) + */ + input: string | ResponseInput; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ + include?: Array | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions?: string | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls?: boolean | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Whether to store the generated model response for later retrieval via API. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: boolean | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace ResponseCreateParams { + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: false | null; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream: true; +} + +export interface ResponseRetrieveParams { + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; +} + +Responses.InputItems = InputItems; + +export declare namespace Responses { + export { + InputItems as InputItems, + type ResponseItemList as ResponseItemList, + type ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 18e2ecddc..86b2d2dee 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -9,6 +9,9 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' + | 'computer-use-preview' + | 'computer-use-preview-2025-02-04' + | 'computer-use-preview-2025-03-11' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' @@ -43,6 +46,51 @@ export type ChatModel = | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613'; +/** + * A filter used to compare a specified attribute key to a given value using a + * defined comparison operation. + */ +export interface ComparisonFilter { + /** + * The key to compare against the value. + */ + key: string; + + /** + * Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + * + * - `eq`: equals + * - `ne`: not equal + * - `gt`: greater than + * - `gte`: greater than or equal + * - `lt`: less than + * - `lte`: less than or equal + */ + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + + /** + * The value to compare against the attribute key; supports string, number, or + * boolean types. + */ + value: string | number | boolean; +} + +/** + * Combine multiple filters using `and` or `or`. + */ +export interface CompoundFilter { + /** + * Array of filters to combine. Items can be `ComparisonFilter` or + * `CompoundFilter`. + */ + filters: Array; + + /** + * Type of operation: `and` or `or`. + */ + type: 'and' | 'or'; +} + export interface ErrorObject { code: string | null; @@ -108,23 +156,76 @@ export type FunctionParameters = Record; */ export type Metadata = Record; +/** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ +export interface Reasoning { + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + effort: ReasoningEffort | null; + + /** + * **o-series models only** + * + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `concise` or + * `detailed`. + */ + generate_summary?: 'concise' | 'detailed' | null; +} + +/** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ReasoningEffort = 'low' | 'medium' | 'high' | null; + +/** + * JSON object response format. An older method of generating JSON responses. Using + * `json_schema` is recommended for models that support it. Note that the model + * will not generate JSON without a system or user message instructing it to do so. + */ export interface ResponseFormatJSONObject { /** - * The type of response format being defined: `json_object` + * The type of response format being defined. Always `json_object`. */ type: 'json_object'; } +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ export interface ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ json_schema: ResponseFormatJSONSchema.JSONSchema; /** - * The type of response format being defined: `json_schema` + * The type of response format being defined. Always `json_schema`. */ type: 'json_schema'; } export namespace ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ export interface JSONSchema { /** * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores @@ -139,7 +240,8 @@ export namespace ResponseFormatJSONSchema { description?: string; /** - * The schema for the response format, described as a JSON Schema object. + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). */ schema?: Record; @@ -154,9 +256,12 @@ export namespace ResponseFormatJSONSchema { } } +/** + * Default response format. Used to generate text responses. + */ export interface ResponseFormatText { /** - * The type of response format being defined: `text` + * The type of response format being defined. Always `text`. */ type: 'text'; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index a2f1b5250..96ed91f6a 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -24,10 +24,9 @@ export class Uploads extends APIResource { * contains all the parts you uploaded. This File is usable in the rest of our * platform as a regular File object. * - * For certain `purpose`s, the correct `mime_type` must be specified. Please refer - * to documentation for the supported MIME types for your use case: - * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + * For certain `purpose` values, the correct `mime_type` must be specified. Please + * refer to documentation for the + * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts similarity index 92% rename from src/resources/beta/vector-stores/file-batches.ts rename to src/resources/vector-stores/file-batches.ts index 4cb62feb8..81b285b63 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,17 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../resource'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { sleep } from '../../../internal/utils/sleep'; -import { type Uploadable } from '../../../uploads'; -import { allSettledWithThrow } from '../../../lib/Util'; -import { path } from '../../../internal/utils/path'; +import { APIPromise } from '../../api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; +import { buildHeaders } from '../../internal/headers'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; export class FileBatches extends APIResource { /** @@ -262,6 +259,15 @@ export interface FileBatchCreateParams { */ file_ids: Array; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/vector-stores/files.ts similarity index 64% rename from src/resources/beta/vector-stores/files.ts rename to src/resources/vector-stores/files.ts index 2c499930e..5aeef9653 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,14 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../resource'; import * as VectorStoresAPI from './vector-stores'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { sleep } from '../../../internal/utils'; -import { Uploadable } from '../../../uploads'; -import { path } from '../../../internal/utils/path'; +import { APIPromise } from '../../api-promise'; +import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../pagination'; +import { buildHeaders } from '../../internal/headers'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; export class Files extends APIResource { /** @@ -43,6 +41,18 @@ export class Files extends APIResource { }); } + /** + * Update attributes on a vector store file. + */ + update(fileID: string, params: FileUpdateParams, options?: RequestOptions): APIPromise { + const { vector_store_id, ...body } = params; + return this._client.post(path`/vector_stores/${vector_store_id}/files/${fileID}`, { + body, + ...options, + headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]), + }); + } + /** * Returns a list of vector store files. */ @@ -77,97 +87,27 @@ export class Files extends APIResource { } /** - * Attach a file to the given vector store and wait for it to be processed. - */ - async createAndPoll( - vectorStoreId: string, - body: FileCreateParams, - options?: RequestOptions & { pollIntervalMs?: number }, - ): Promise { - const file = await this.create(vectorStoreId, body, options); - return await this.poll(vectorStoreId, file.id, options); - } - - /** - * Wait for the vector store file to finish processing. - * - * Note: this will return even if the file failed to process, you need to check - * file.last_error and file.status to handle these cases + * Retrieve the parsed contents of a vector store file. */ - async poll( - vectorStoreID: string, + content( fileID: string, - options?: RequestOptions & { pollIntervalMs?: number }, - ): Promise { - const headers = buildHeaders([ - options?.headers, - { - 'X-Stainless-Poll-Helper': 'true', - 'X-Stainless-Custom-Poll-Interval': options?.pollIntervalMs?.toString() ?? undefined, - }, - ]); - - while (true) { - const fileResponse = await this.retrieve( - fileID, - { - vector_store_id: vectorStoreID, - }, - { ...options, headers }, - ).withResponse(); - - const file = fileResponse.data; - - switch (file.status) { - case 'in_progress': - let sleepInterval = 5000; - - if (options?.pollIntervalMs) { - sleepInterval = options.pollIntervalMs; - } else { - const headerInterval = fileResponse.response.headers.get('openai-poll-after-ms'); - if (headerInterval) { - const headerIntervalMs = parseInt(headerInterval); - if (!isNaN(headerIntervalMs)) { - sleepInterval = headerIntervalMs; - } - } - } - await sleep(sleepInterval); - break; - case 'failed': - case 'completed': - return file; - } - } - } - - /** - * Upload a file to the `files` API and then attach it to the given vector store. - * - * Note the file will be asynchronously processed (you can use the alternative - * polling helper method to wait for processing to complete). - */ - async upload(vectorStoreId: string, file: Uploadable, options?: RequestOptions): Promise { - const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options); - return this.create(vectorStoreId, { file_id: fileInfo.id }, options); - } - - /** - * Add a file to a vector store and poll until processing is complete. - */ - async uploadAndPoll( - vectorStoreId: string, - file: Uploadable, - options?: RequestOptions & { pollIntervalMs?: number }, - ): Promise { - const fileInfo = await this.upload(vectorStoreId, file, options); - return await this.poll(vectorStoreId, fileInfo.id, options); + params: FileContentParams, + options?: RequestOptions, + ): PagePromise { + const { vector_store_id } = params; + return this._client.getAPIList( + path`/vector_stores/${vector_store_id}/files/${fileID}/content`, + Page, + { ...options, headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]) }, + ); } } export type VectorStoreFilesPage = CursorPage; +// Note: no pagination actually occurs yet, this is for forwards-compatibility. +export type FileContentResponsesPage = Page; + /** * A list of files attached to a vector store. */ @@ -214,6 +154,15 @@ export interface VectorStoreFile { */ vector_store_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The strategy used to chunk the file. */ @@ -246,6 +195,18 @@ export interface VectorStoreFileDeleted { object: 'vector_store.file.deleted'; } +export interface FileContentResponse { + /** + * The text content + */ + text?: string; + + /** + * The content type (currently only `"text"`) + */ + type?: string; +} + export interface FileCreateParams { /** * A [File](https://platform.openai.com/docs/api-reference/files) ID that the @@ -254,6 +215,15 @@ export interface FileCreateParams { */ file_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -268,6 +238,22 @@ export interface FileRetrieveParams { vector_store_id: string; } +export interface FileUpdateParams { + /** + * Path param: The ID of the vector store the file belongs to. + */ + vector_store_id: string; + + /** + * Body param: Set of 16 key-value pairs that can be attached to an object. This + * can be useful for storing additional information about the object in a + * structured format, and querying for objects via API or the dashboard. Keys are + * strings with a maximum length of 64 characters. Values are strings with a + * maximum length of 512 characters, booleans, or numbers. + */ + attributes: Record | null; +} + export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -296,14 +282,25 @@ export interface FileDeleteParams { vector_store_id: string; } +export interface FileContentParams { + /** + * The ID of the vector store. + */ + vector_store_id: string; +} + export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, type VectorStoreFilesPage as VectorStoreFilesPage, + type FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, type FileRetrieveParams as FileRetrieveParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, type FileDeleteParams as FileDeleteParams, + type FileContentParams as FileContentParams, }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/vector-stores/index.ts similarity index 82% rename from src/resources/beta/vector-stores/index.ts rename to src/resources/vector-stores/index.ts index d3353db63..cbcb36221 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -12,11 +12,15 @@ export { Files, type VectorStoreFile, type VectorStoreFileDeleted, + type FileContentResponse, type FileCreateParams, type FileRetrieveParams, + type FileUpdateParams, type FileListParams, type FileDeleteParams, + type FileContentParams, type VectorStoreFilesPage, + type FileContentResponsesPage, } from './files'; export { VectorStores, @@ -29,8 +33,11 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, type VectorStoreListParams, + type VectorStoreSearchParams, type VectorStoresPage, + type VectorStoreSearchResponsesPage, } from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts similarity index 76% rename from src/resources/beta/vector-stores/vector-stores.ts rename to src/resources/vector-stores/vector-stores.ts index 94f32905e..ef942cded 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import * as Shared from '../../shared'; +import { APIResource } from '../../resource'; +import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCancelParams, @@ -13,20 +13,24 @@ import { } from './file-batches'; import * as FilesAPI from './files'; import { + FileContentParams, + FileContentResponse, + FileContentResponsesPage, FileCreateParams, FileDeleteParams, FileListParams, FileRetrieveParams, + FileUpdateParams, Files, VectorStoreFile, VectorStoreFileDeleted, VectorStoreFilesPage, } from './files'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { path } from '../../../internal/utils/path'; +import { APIPromise } from '../../api-promise'; +import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../pagination'; +import { buildHeaders } from '../../internal/headers'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); @@ -91,10 +95,34 @@ export class VectorStores extends APIResource { headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]), }); } + + /** + * Search a vector store for relevant chunks based on a query and file attributes + * filter. + */ + search( + vectorStoreID: string, + body: VectorStoreSearchParams, + options?: RequestOptions, + ): PagePromise { + return this._client.getAPIList( + path`/vector_stores/${vectorStoreID}/search`, + Page, + { + body, + method: 'post', + ...options, + headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]), + }, + ); + } } export type VectorStoresPage = CursorPage; +// Note: no pagination actually occurs yet, this is for forwards-compatibility. +export type VectorStoreSearchResponsesPage = Page; + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of * `800` and `chunk_overlap_tokens` of `400`. @@ -153,6 +181,9 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } +/** + * Customize your own chunking strategy by setting chunk size and chunk overlap. + */ export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; @@ -280,6 +311,51 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +export interface VectorStoreSearchResponse { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; + + /** + * Content chunks from the file. + */ + content: Array; + + /** + * The ID of the vector store file. + */ + file_id: string; + + /** + * The name of the vector store file. + */ + filename: string; + + /** + * The similarity score for the result. + */ + score: number; +} + +export namespace VectorStoreSearchResponse { + export interface Content { + /** + * The text content returned from search. + */ + text: string; + + /** + * The type of content. + */ + type: 'text'; + } +} + export interface VectorStoreCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` @@ -389,6 +465,45 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } +export interface VectorStoreSearchParams { + /** + * A query string for a search + */ + query: string | Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search. + */ + rewrite_query?: boolean; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for search. + */ + export interface RankingOptions { + ranker?: 'auto' | 'default-2024-11-15'; + + score_threshold?: number; + } +} + VectorStores.Files = Files; VectorStores.FileBatches = FileBatches; @@ -403,21 +518,28 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, type VectorStoresPage as VectorStoresPage, + type VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, type VectorStoreCreateParams as VectorStoreCreateParams, type VectorStoreUpdateParams as VectorStoreUpdateParams, type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, }; export { Files as Files, type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, type VectorStoreFilesPage as VectorStoreFilesPage, + type FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, type FileRetrieveParams as FileRetrieveParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, type FileDeleteParams as FileDeleteParams, + type FileContentParams as FileContentParams, }; export { diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index 5de69484a..f95953719 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -42,9 +42,9 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: 0, + seed: -9007199254740991, service_tier: 'auto', - stop: 'string', + stop: '\n', store: true, stream: false, stream_options: { include_usage: true }, @@ -59,6 +59,13 @@ describe('resource completions', () => { top_logprobs: 0, top_p: 1, user: 'user-1234', + web_search_options: { + search_context_size: 'low', + user_location: { + approximate: { city: 'city', country: 'country', region: 'region', timezone: 'timezone' }, + type: 'approximate', + }, + }, }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 000000000..abc8185f6 --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts new file mode 100644 index 000000000..e025facc4 --- /dev/null +++ b/tests/api-resources/responses/responses.test.ts @@ -0,0 +1,85 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource responses', () => { + test('create: only required params', async () => { + const responsePromise = client.responses.create({ input: 'string', model: 'gpt-4o' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.responses.create({ + input: 'string', + model: 'gpt-4o', + include: ['file_search_call.results'], + instructions: 'instructions', + max_output_tokens: 0, + metadata: { foo: 'string' }, + parallel_tool_calls: true, + previous_response_id: 'previous_response_id', + reasoning: { effort: 'low', generate_summary: 'concise' }, + store: true, + stream: false, + temperature: 1, + text: { format: { type: 'text' } }, + tool_choice: 'none', + tools: [ + { + type: 'file_search', + vector_store_ids: ['string'], + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 0, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + }, + ], + top_p: 1, + truncation: 'auto', + user: 'user-1234', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.retrieve( + 'resp_677efb5139a88190b512bc3fef8e535d', + { include: ['file_search_call.results'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.responses.delete('resp_677efb5139a88190b512bc3fef8e535d'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 79% rename from tests/api-resources/beta/vector-stores/file-batches.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index 783801214..e765a2ace 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -9,9 +9,7 @@ const client = new OpenAI({ describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { - file_ids: ['string'], - }); + const responsePromise = client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,14 +20,15 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vsfb_abc123', { + const responsePromise = client.vectorStores.fileBatches.retrieve('vsfb_abc123', { vector_store_id: 'vs_abc123', }); const rawResponse = await responsePromise.asResponse(); @@ -42,13 +41,13 @@ describe('resource fileBatches', () => { }); test('retrieve: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.retrieve('vsfb_abc123', { + const response = await client.vectorStores.fileBatches.retrieve('vsfb_abc123', { vector_store_id: 'vs_abc123', }); }); test('cancel: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.cancel('batch_id', { + const responsePromise = client.vectorStores.fileBatches.cancel('batch_id', { vector_store_id: 'vector_store_id', }); const rawResponse = await responsePromise.asResponse(); @@ -61,13 +60,13 @@ describe('resource fileBatches', () => { }); test('cancel: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.cancel('batch_id', { + const response = await client.vectorStores.fileBatches.cancel('batch_id', { vector_store_id: 'vector_store_id', }); }); test('listFiles: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.listFiles('batch_id', { + const responsePromise = client.vectorStores.fileBatches.listFiles('batch_id', { vector_store_id: 'vector_store_id', }); const rawResponse = await responsePromise.asResponse(); @@ -80,7 +79,7 @@ describe('resource fileBatches', () => { }); test('listFiles: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.listFiles('batch_id', { + const response = await client.vectorStores.fileBatches.listFiles('batch_id', { vector_store_id: 'vector_store_id', after: 'after', before: 'before', diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts similarity index 57% rename from tests/api-resources/beta/vector-stores/files.test.ts rename to tests/api-resources/vector-stores/files.test.ts index c32cb6408..9e9afc95d 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/vector-stores/files.test.ts @@ -9,7 +9,7 @@ const client = new OpenAI({ describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,14 +20,15 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.files.create('vs_abc123', { + const response = await client.vectorStores.files.create('vs_abc123', { file_id: 'file_id', + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.retrieve('file-abc123', { + const responsePromise = client.vectorStores.files.retrieve('file-abc123', { vector_store_id: 'vs_abc123', }); const rawResponse = await responsePromise.asResponse(); @@ -40,13 +41,34 @@ describe('resource files', () => { }); test('retrieve: required and optional params', async () => { - const response = await client.beta.vectorStores.files.retrieve('file-abc123', { + const response = await client.vectorStores.files.retrieve('file-abc123', { vector_store_id: 'vs_abc123', }); }); + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('file-abc123', { + vector_store_id: 'vs_abc123', + attributes: { foo: 'string' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('file-abc123', { + vector_store_id: 'vs_abc123', + attributes: { foo: 'string' }, + }); + }); + test('list', async () => { - const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); + const responsePromise = client.vectorStores.files.list('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -59,7 +81,7 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.list( + client.vectorStores.files.list( 'vector_store_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, @@ -68,7 +90,7 @@ describe('resource files', () => { }); test('delete: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.delete('file_id', { + const responsePromise = client.vectorStores.files.delete('file_id', { vector_store_id: 'vector_store_id', }); const rawResponse = await responsePromise.asResponse(); @@ -81,8 +103,25 @@ describe('resource files', () => { }); test('delete: required and optional params', async () => { - const response = await client.beta.vectorStores.files.delete('file_id', { + const response = await client.vectorStores.files.delete('file_id', { vector_store_id: 'vector_store_id', }); }); + + test('content: only required params', async () => { + const responsePromise = client.vectorStores.files.content('file-abc123', { + vector_store_id: 'vs_abc123', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: required and optional params', async () => { + const response = await client.vectorStores.files.content('file-abc123', { vector_store_id: 'vs_abc123' }); + }); }); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts similarity index 68% rename from tests/api-resources/beta/vector-stores/vector-stores.test.ts rename to tests/api-resources/vector-stores/vector-stores.test.ts index ecb8d33a1..830397279 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -9,7 +9,7 @@ const client = new OpenAI({ describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = client.beta.vectorStores.create({}); + const responsePromise = client.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,7 +20,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -31,7 +31,7 @@ describe('resource vectorStores', () => { }); test('update', async () => { - const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -42,7 +42,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = client.beta.vectorStores.list(); + const responsePromise = client.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -55,7 +55,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.list( + client.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -63,7 +63,7 @@ describe('resource vectorStores', () => { }); test('delete', async () => { - const responsePromise = client.beta.vectorStores.delete('vector_store_id'); + const responsePromise = client.vectorStores.delete('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -72,4 +72,25 @@ describe('resource vectorStores', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vs_abc123', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vs_abc123', { + query: 'string', + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 1, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + rewrite_query: true, + }); + }); }); From 77cb401b5bb396bfe17b55fe6a19d97a0d6f10fc Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 11 Mar 2025 19:52:39 +0000 Subject: [PATCH 08/73] fix(responses): correct computer use enum value --- .stats.yml | 2 +- src/resources/responses/responses.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 455874212..9c4a2e536 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-9ce5257763fb30c6e0e1ee2bef7e13baf661511e09572207e528d643da8e16b3.yml diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 1186cab6b..ac0af3afb 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -90,7 +90,7 @@ export interface ComputerTool { /** * The type of the computer use tool. Always `computer_use_preview`. */ - type: 'computer-preview'; + type: 'computer_use_preview'; } /** From bf7d7df694d54131404fb87d85bdb54449d4a645 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 11 Mar 2025 21:44:06 +0000 Subject: [PATCH 09/73] fix(responses): correct reasoning output type --- .stats.yml | 2 +- api.md | 1 + src/resources/responses/responses.ts | 129 +++++++++------------------ 3 files changed, 45 insertions(+), 87 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9c4a2e536..edc2aaf89 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-9ce5257763fb30c6e0e1ee2bef7e13baf661511e09572207e528d643da8e16b3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/api.md b/api.md index 786bd3841..49ac7f5b5 100644 --- a/api.md +++ b/api.md @@ -583,6 +583,7 @@ Types: - ResponseOutputMessage - ResponseOutputRefusal - ResponseOutputText +- ResponseReasoningItem - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index ac0af3afb..d89f7580d 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -1490,7 +1490,7 @@ export type ResponseInputItem = | ResponseFunctionWebSearch | ResponseFunctionToolCall | ResponseInputItem.FunctionCallOutput - | ResponseInputItem.Reasoning + | ResponseReasoningItem | ResponseInputItem.ItemReference; export namespace ResponseInputItem { @@ -1635,47 +1635,6 @@ export namespace ResponseInputItem { status?: 'in_progress' | 'completed' | 'incomplete'; } - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } - /** * An internal identifier for an item to reference. */ @@ -1742,50 +1701,7 @@ export type ResponseOutputItem = | ResponseFunctionToolCall | ResponseFunctionWebSearch | ResponseComputerToolCall - | ResponseOutputItem.Reasoning; - -export namespace ResponseOutputItem { - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } -} + | ResponseReasoningItem; /** * Emitted when a new output item is added. @@ -1967,6 +1883,47 @@ export namespace ResponseOutputText { } } +/** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ +export interface ResponseReasoningItem { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + summary: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseReasoningItem { + export interface Summary { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `summary_text`. + */ + type: 'summary_text'; + } +} + /** * Emitted when there is a partial refusal text. */ From 15d2fefe77cd34fb1033c34b0393d2134f78f172 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 13 Mar 2025 16:48:14 +0000 Subject: [PATCH 10/73] chore(internal): remove extra empty newlines From fc45cdb4ed34c1cd8abd7a2a25fe7c8d5dfde0c4 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 13 Mar 2025 23:06:15 +0000 Subject: [PATCH 11/73] fix(exports): ensure resource imports don't require /index --- src/resources/audio.ts | 3 +++ src/resources/beta.ts | 3 +++ src/resources/beta/realtime.ts | 3 +++ src/resources/beta/threads.ts | 3 +++ src/resources/beta/threads/runs.ts | 3 +++ src/resources/chat.ts | 3 +++ src/resources/chat/completions.ts | 4 +++- src/resources/fine-tuning.ts | 3 +++ src/resources/fine-tuning/jobs.ts | 3 +++ src/resources/responses.ts | 3 +++ src/resources/uploads.ts | 3 +++ src/resources/vector-stores.ts | 3 +++ 12 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 src/resources/audio.ts create mode 100644 src/resources/beta.ts create mode 100644 src/resources/beta/realtime.ts create mode 100644 src/resources/beta/threads.ts create mode 100644 src/resources/beta/threads/runs.ts create mode 100644 src/resources/chat.ts create mode 100644 src/resources/fine-tuning.ts create mode 100644 src/resources/fine-tuning/jobs.ts create mode 100644 src/resources/responses.ts create mode 100644 src/resources/uploads.ts create mode 100644 src/resources/vector-stores.ts diff --git a/src/resources/audio.ts b/src/resources/audio.ts new file mode 100644 index 000000000..bc19b759c --- /dev/null +++ b/src/resources/audio.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './audio/index'; diff --git a/src/resources/beta.ts b/src/resources/beta.ts new file mode 100644 index 000000000..1542e942b --- /dev/null +++ b/src/resources/beta.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './beta/index'; diff --git a/src/resources/beta/realtime.ts b/src/resources/beta/realtime.ts new file mode 100644 index 000000000..1c5df27d9 --- /dev/null +++ b/src/resources/beta/realtime.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './realtime/index'; diff --git a/src/resources/beta/threads.ts b/src/resources/beta/threads.ts new file mode 100644 index 000000000..705f67016 --- /dev/null +++ b/src/resources/beta/threads.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './threads/index'; diff --git a/src/resources/beta/threads/runs.ts b/src/resources/beta/threads/runs.ts new file mode 100644 index 000000000..a3cc2bc7f --- /dev/null +++ b/src/resources/beta/threads/runs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './runs/index'; diff --git a/src/resources/chat.ts b/src/resources/chat.ts new file mode 100644 index 000000000..b3dd87a90 --- /dev/null +++ b/src/resources/chat.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './chat/index'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 55b151e8b..fe7033a94 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1 +1,3 @@ -export * from './completions/completions'; +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './completions/index'; diff --git a/src/resources/fine-tuning.ts b/src/resources/fine-tuning.ts new file mode 100644 index 000000000..01fd61342 --- /dev/null +++ b/src/resources/fine-tuning.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './fine-tuning/index'; diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs.ts new file mode 100644 index 000000000..6640de1f2 --- /dev/null +++ b/src/resources/fine-tuning/jobs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './jobs/index'; diff --git a/src/resources/responses.ts b/src/resources/responses.ts new file mode 100644 index 000000000..9d26aac0c --- /dev/null +++ b/src/resources/responses.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './responses/index'; diff --git a/src/resources/uploads.ts b/src/resources/uploads.ts new file mode 100644 index 000000000..a6ab87fbe --- /dev/null +++ b/src/resources/uploads.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './uploads/index'; diff --git a/src/resources/vector-stores.ts b/src/resources/vector-stores.ts new file mode 100644 index 000000000..e7a343120 --- /dev/null +++ b/src/resources/vector-stores.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './vector-stores/index'; From 07b8b23a8ca40183f4fc6789fbf866b510cd9203 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 15:34:52 -0400 Subject: [PATCH 12/73] chore(internal): remove CI condition --- .github/workflows/ci.yml | 7 ++----- .stats.yml | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 449c4778f..6d253070c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,8 +13,7 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' - + steps: - uses: actions/checkout@v4 @@ -32,8 +31,7 @@ jobs: build: name: build runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' - + steps: - uses: actions/checkout@v4 @@ -50,7 +48,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' steps: - uses: actions/checkout@v4 diff --git a/.stats.yml b/.stats.yml index edc2aaf89..53c73037d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml From 20e871cac2d9f9b781abce2a465797e363418694 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 19:39:35 +0000 Subject: [PATCH 13/73] fix(internal): add mts file + crypto shim types --- scripts/build | 11 +++-------- src/internal/shims/crypto.node.d.mts | 1 + src/internal/shims/file.node.d.mts | 1 + 3 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 src/internal/shims/crypto.node.d.mts create mode 100644 src/internal/shims/file.node.d.mts diff --git a/scripts/build b/scripts/build index dd2c9dd57..e9ebac741 100755 --- a/scripts/build +++ b/scripts/build @@ -28,20 +28,15 @@ node scripts/utils/make-dist-package-json.cjs > dist/package.json # build to .js/.mjs/.d.ts files npm exec tsc-multi -# we need to add exports = module.exports = OpenAI to index.js; -# No way to get that from index.ts because it would cause compile errors +# we need to patch index.js so that `new module.exports()` works for cjs backwards +# compat. No way to get that from index.ts because it would cause compile errors # when building .mjs node scripts/utils/fix-index-exports.cjs -# with "moduleResolution": "nodenext", if ESM resolves to index.d.ts, -# it'll have TS errors on the default import. But if it resolves to -# index.d.mts the default import will work (even though both files have -# the same export default statement) -cp dist/index.d.ts dist/index.d.mts cp tsconfig.dist-src.json dist/src/tsconfig.json cp src/internal/shim-types.d.ts dist/internal/shim-types.d.ts cp src/internal/shim-types.d.ts dist/internal/shim-types.d.mts mkdir -p dist/internal/shims -cp src/internal/shims/*.{mjs,js,d.ts} dist/internal/shims +cp src/internal/shims/*.{mjs,js,d.ts,d.mts} dist/internal/shims node scripts/utils/postprocess-files.cjs diff --git a/src/internal/shims/crypto.node.d.mts b/src/internal/shims/crypto.node.d.mts new file mode 100644 index 000000000..5cc196301 --- /dev/null +++ b/src/internal/shims/crypto.node.d.mts @@ -0,0 +1 @@ +export { crypto } from './crypto.node.js'; diff --git a/src/internal/shims/file.node.d.mts b/src/internal/shims/file.node.d.mts new file mode 100644 index 000000000..38cc9ff7a --- /dev/null +++ b/src/internal/shims/file.node.d.mts @@ -0,0 +1 @@ +export { File } from './file.node.js'; From 16b9e0642b19c1df8c4b134ab6edb594ac78a9f1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:33:35 +0000 Subject: [PATCH 14/73] chore(internal): minor client file refactoring --- src/client.ts | 50 +++--------------------------------- src/internal/types.ts | 2 ++ src/internal/utils/log.ts | 30 +++++++++++++++++++++- src/internal/utils/values.ts | 8 ++++++ 4 files changed, 43 insertions(+), 47 deletions(-) diff --git a/src/client.ts b/src/client.ts index d7d77fc06..563638ef9 100644 --- a/src/client.ts +++ b/src/client.ts @@ -1,10 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import type { RequestInit, RequestInfo, BodyInit } from './internal/builtin-types'; -import type { HTTPMethod, PromiseOrValue, MergedRequestInit } from './internal/types'; +import type { HTTPMethod, PromiseOrValue, MergedRequestInit, FinalizedRequestInit } from './internal/types'; import { uuid4 } from './internal/utils/uuid'; -import { validatePositiveInteger, isAbsoluteURL, hasOwn } from './internal/utils/values'; +import { validatePositiveInteger, isAbsoluteURL, safeJSON } from './internal/utils/values'; import { sleep } from './internal/utils/sleep'; +import { type Logger, type LogLevel, parseLogLevel } from './internal/utils/log'; +export type { Logger, LogLevel } from './internal/utils/log'; import { castToError, isAbortError } from './internal/errors'; import type { APIResponseProps } from './internal/parse'; import { getPlatformHeaders } from './internal/detect-platform'; @@ -148,48 +150,6 @@ import { ChatCompletionsPage, } from './resources/chat/completions/completions'; -const safeJSON = (text: string) => { - try { - return JSON.parse(text); - } catch (err) { - return undefined; - } -}; - -type LogFn = (message: string, ...rest: unknown[]) => void; -export type Logger = { - error: LogFn; - warn: LogFn; - info: LogFn; - debug: LogFn; -}; -export type LogLevel = 'off' | 'error' | 'warn' | 'info' | 'debug'; -const parseLogLevel = ( - maybeLevel: string | undefined, - sourceName: string, - client: OpenAI, -): LogLevel | undefined => { - if (!maybeLevel) { - return undefined; - } - const levels: Record = { - off: true, - error: true, - warn: true, - info: true, - debug: true, - }; - if (hasOwn(levels, maybeLevel)) { - return maybeLevel; - } - loggerFor(client).warn( - `${sourceName} was set to ${JSON.stringify(maybeLevel)}, expected one of ${JSON.stringify( - Object.keys(levels), - )}`, - ); - return undefined; -}; - export interface ClientOptions { /** * Defaults to process.env['OPENAI_API_KEY']. @@ -279,8 +239,6 @@ export interface ClientOptions { logger?: Logger | undefined; } -type FinalizedRequestInit = RequestInit & { headers: Headers }; - /** * API Client for interfacing with the OpenAI API. */ diff --git a/src/internal/types.ts b/src/internal/types.ts index c3bce5a21..d7928cd35 100644 --- a/src/internal/types.ts +++ b/src/internal/types.ts @@ -5,6 +5,8 @@ export type HTTPMethod = 'get' | 'post' | 'put' | 'patch' | 'delete'; export type KeysEnum = { [P in keyof Required]: true }; +export type FinalizedRequestInit = RequestInit & { headers: Headers }; + type NotAny = [unknown] extends [T] ? never : T; /** diff --git a/src/internal/utils/log.ts b/src/internal/utils/log.ts index f75d46bba..1aec41276 100644 --- a/src/internal/utils/log.ts +++ b/src/internal/utils/log.ts @@ -1,9 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import type { LogLevel, Logger } from '../../client'; +import { hasOwn } from './values'; import { type OpenAI } from '../../client'; import { RequestOptions } from '../request-options'; +type LogFn = (message: string, ...rest: unknown[]) => void; +export type Logger = { + error: LogFn; + warn: LogFn; + info: LogFn; + debug: LogFn; +}; +export type LogLevel = 'off' | 'error' | 'warn' | 'info' | 'debug'; + const levelNumbers = { off: 0, error: 200, @@ -12,6 +21,25 @@ const levelNumbers = { debug: 500, }; +export const parseLogLevel = ( + maybeLevel: string | undefined, + sourceName: string, + client: OpenAI, +): LogLevel | undefined => { + if (!maybeLevel) { + return undefined; + } + if (hasOwn(levelNumbers, maybeLevel)) { + return maybeLevel; + } + loggerFor(client).warn( + `${sourceName} was set to ${JSON.stringify(maybeLevel)}, expected one of ${JSON.stringify( + Object.keys(levelNumbers), + )}`, + ); + return undefined; +}; + function noop() {} function makeLogFn(fnLevel: keyof Logger, logger: Logger | undefined, logLevel: LogLevel) { diff --git a/src/internal/utils/values.ts b/src/internal/utils/values.ts index bb66cfdc1..08255c4b1 100644 --- a/src/internal/utils/values.ts +++ b/src/internal/utils/values.ts @@ -92,3 +92,11 @@ export const maybeCoerceBoolean = (value: unknown): boolean | undefined => { } return coerceBoolean(value); }; + +export const safeJSON = (text: string) => { + try { + return JSON.parse(text); + } catch (err) { + return undefined; + } +}; From 1c7eaaf3c75e799b22779fb377f208cfcc2d8324 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:56:01 +0000 Subject: [PATCH 15/73] chore: add missing type alias exports --- src/client.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client.ts b/src/client.ts index 563638ef9..485fa6821 100644 --- a/src/client.ts +++ b/src/client.ts @@ -137,6 +137,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -920,6 +921,7 @@ export declare namespace OpenAI { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, From 04721e5739176a131242838afe8deb9fb6e1394d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:53:48 +0000 Subject: [PATCH 16/73] fix(api): correct some Responses types --- .stats.yml | 2 +- src/resources/batches.ts | 8 +++--- src/resources/chat/completions/completions.ts | 18 ++++++++----- src/resources/responses/responses.ts | 26 +++++++++++++++---- src/resources/shared.ts | 4 +-- tests/api-resources/batches.test.ts | 4 +-- 6 files changed, 42 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index 53c73037d..1e04d7c26 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index db32d782b..36477851a 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -216,11 +216,11 @@ export interface BatchCreateParams { /** * The endpoint to be used for all requests in the batch. Currently - * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - * embedding inputs across all requests in the batch. + * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + * are supported. Note that `/v1/embeddings` batches are also restricted to a + * maximum of 50,000 embedding inputs across all requests in the batch. */ - endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; + endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; /** * The ID of an uploaded file that contains requests for the new batch. diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 8204e2a8b..d6ece1236 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -366,10 +366,13 @@ export interface ChatCompletionChunk { /** * An optional field that will only be present when you set * `stream_options: {"include_usage": true}` in your request. When present, it - * contains a null value except for the last chunk which contains the token usage - * statistics for the entire request. + * contains a null value **except for the last chunk** which contains the token + * usage statistics for the entire request. + * + * **NOTE:** If the stream is interrupted or cancelled, you may not receive the + * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage | null; + usage?: CompletionsAPI.CompletionUsage; } export namespace ChatCompletionChunk { @@ -540,7 +543,7 @@ export namespace ChatCompletionContentPart { /** * The name of the file, used when passing the file to the model as a string. */ - file_name?: string; + filename?: string; } } } @@ -919,8 +922,11 @@ export interface ChatCompletionStreamOptions { /** * If set, an additional chunk will be streamed before the `data: [DONE]` message. * The `usage` field on this chunk shows the token usage statistics for the entire - * request, and the `choices` field will always be an empty array. All other chunks - * will also include a `usage` field, but with a null value. + * request, and the `choices` field will always be an empty array. + * + * All other chunks will also include a `usage` field, but with a null value. + * **NOTE:** If the stream is interrupted, you may not receive the final usage + * chunk which contains the total token usage for the request. */ include_usage?: boolean; } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index d89f7580d..6b3200e6a 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -1290,11 +1290,6 @@ export interface ResponseFunctionCallArgumentsDoneEvent { * for more information. */ export interface ResponseFunctionToolCall { - /** - * The unique ID of the function tool call. - */ - id: string; - /** * A JSON string of the arguments to pass to the function. */ @@ -1315,6 +1310,11 @@ export interface ResponseFunctionToolCall { */ type: 'function_call'; + /** + * The unique ID of the function tool call. + */ + id?: string; + /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. @@ -2233,6 +2233,11 @@ export interface ResponseUsage { */ input_tokens: number; + /** + * A detailed breakdown of the input tokens. + */ + input_tokens_details: ResponseUsage.InputTokensDetails; + /** * The number of output tokens. */ @@ -2250,6 +2255,17 @@ export interface ResponseUsage { } export namespace ResponseUsage { + /** + * A detailed breakdown of the input tokens. + */ + export interface InputTokensDetails { + /** + * The number of tokens that were retrieved from the cache. + * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + */ + cached_tokens: number; + } + /** * A detailed breakdown of the output tokens. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 86b2d2dee..5fbdbba6a 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -171,10 +171,10 @@ export interface Reasoning { * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - effort: ReasoningEffort | null; + effort?: ReasoningEffort | null; /** - * **o-series models only** + * **computer_use_preview only** * * A summary of the reasoning performed by the model. This can be useful for * debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 870fbec9d..c895f0809 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -11,7 +11,7 @@ describe('resource batches', () => { test('create: only required params', async () => { const responsePromise = client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', }); const rawResponse = await responsePromise.asResponse(); @@ -26,7 +26,7 @@ describe('resource batches', () => { test('create: required and optional params', async () => { const response = await client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', metadata: { foo: 'string' }, }); From 4167993110e5ecef1151948f667a0b9241621c2a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:45:42 +0000 Subject: [PATCH 17/73] fix(types): improve responses type names --- .stats.yml | 2 +- api.md | 8 +- src/resources/responses/index.ts | 7 +- src/resources/responses/input-items.ts | 190 +------------------------ src/resources/responses/responses.ts | 188 ++++++++++++++++++++---- 5 files changed, 180 insertions(+), 215 deletions(-) diff --git a/.stats.yml b/.stats.yml index 1e04d7c26..b03256223 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml diff --git a/api.md b/api.md index 49ac7f5b5..702deeb98 100644 --- a/api.md +++ b/api.md @@ -548,6 +548,8 @@ Types: - ResponseCodeInterpreterToolCall - ResponseCompletedEvent - ResponseComputerToolCall +- ResponseComputerToolCallOutputItem +- ResponseComputerToolCallOutputScreenshot - ResponseContent - ResponseContentPartAddedEvent - ResponseContentPartDoneEvent @@ -564,6 +566,8 @@ Types: - ResponseFunctionCallArgumentsDeltaEvent - ResponseFunctionCallArgumentsDoneEvent - ResponseFunctionToolCall +- ResponseFunctionToolCallItem +- ResponseFunctionToolCallOutputItem - ResponseFunctionWebSearch - ResponseInProgressEvent - ResponseIncludable @@ -575,7 +579,9 @@ Types: - ResponseInputImage - ResponseInputItem - ResponseInputMessageContentList +- ResponseInputMessageItem - ResponseInputText +- ResponseItem - ResponseOutputAudio - ResponseOutputItem - ResponseOutputItemAddedEvent @@ -616,4 +622,4 @@ Types: Methods: -- client.responses.inputItems.list(responseID, { ...params }) -> ResponseItemListDataPage +- client.responses.inputItems.list(responseID, { ...params }) -> ResponseItemsPage diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts index 164665eb2..ad3f9a386 100644 --- a/src/resources/responses/index.ts +++ b/src/resources/responses/index.ts @@ -1,9 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { - InputItems, - type ResponseItemList, - type InputItemListParams, - type ResponseItemListDataPage, -} from './input-items'; +export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items'; export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index b38d1be95..6bbef6366 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -2,6 +2,7 @@ import { APIResource } from '../../resource'; import * as ResponsesAPI from './responses'; +import { ResponseItemsPage } from './responses'; import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; @@ -14,45 +15,15 @@ export class InputItems extends APIResource { responseID: string, query: InputItemListParams | null | undefined = {}, options?: RequestOptions, - ): PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - > { + ): PagePromise { return this._client.getAPIList( path`/responses/${responseID}/input_items`, - CursorPage< - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >, + CursorPage, { query, ...options }, ); } } -export type ResponseItemListDataPage = CursorPage< - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput ->; - /** * A list of Response items. */ @@ -60,16 +31,7 @@ export interface ResponseItemList { /** * A list of items used to generate this response. */ - data: Array< - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + data: Array; /** * The ID of the first item in the list. @@ -92,142 +54,6 @@ export interface ResponseItemList { object: 'list'; } -export namespace ResponseItemList { - export interface Message { - /** - * The unique ID of the message input. - */ - id: string; - - /** - * A list of one or many input items to the model, containing different content - * types. - */ - content: ResponsesAPI.ResponseInputMessageContentList; - - /** - * The role of the message input. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The status of item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - - /** - * The type of the message input. Always set to `message`. - */ - type?: 'message'; - } - - export interface ComputerCallOutput { - /** - * The unique ID of the computer call tool output. - */ - id: string; - - /** - * The ID of the computer tool call that produced the output. - */ - call_id: string; - - /** - * A computer screenshot image used with the computer use tool. - */ - output: ComputerCallOutput.Output; - - /** - * The type of the computer tool call output. Always `computer_call_output`. - */ - type: 'computer_call_output'; - - /** - * The safety checks reported by the API that have been acknowledged by the - * developer. - */ - acknowledged_safety_checks?: Array; - - /** - * The status of the message input. One of `in_progress`, `completed`, or - * `incomplete`. Populated when input items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - - /** - * A pending safety check for the computer call. - */ - export interface AcknowledgedSafetyCheck { - /** - * The ID of the pending safety check. - */ - id: string; - - /** - * The type of the pending safety check. - */ - code: string; - - /** - * Details about the pending safety check. - */ - message: string; - } - } - - export interface FunctionCallOutput { - /** - * The unique ID of the function call tool output. - */ - id: string; - - /** - * The unique ID of the function tool call generated by the model. - */ - call_id: string; - - /** - * A JSON string of the output of the function tool call. - */ - output: string; - - /** - * The type of the function tool call output. Always `function_call_output`. - */ - type: 'function_call_output'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } -} - export interface InputItemListParams extends CursorPageParams { /** * An item ID to list items before, used in pagination. @@ -244,9 +70,7 @@ export interface InputItemListParams extends CursorPageParams { } export declare namespace InputItems { - export { - type ResponseItemList as ResponseItemList, - type ResponseItemListDataPage as ResponseItemListDataPage, - type InputItemListParams as InputItemListParams, - }; + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; } + +export { type ResponseItemsPage }; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 6b3200e6a..968d208d3 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -4,8 +4,9 @@ import { APIResource } from '../../resource'; import * as ResponsesAPI from './responses'; import * as Shared from '../shared'; import * as InputItemsAPI from './input-items'; -import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList } from './input-items'; import { APIPromise } from '../../api-promise'; +import { CursorPage } from '../../pagination'; import { Stream } from '../../streaming'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; @@ -67,6 +68,8 @@ export class Responses extends APIResource { } } +export type ResponseItemsPage = CursorPage; + /** * A tool that controls a virtual computer. Learn more about the * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). @@ -894,6 +897,83 @@ export namespace ResponseComputerToolCall { } } +export interface ResponseComputerToolCallOutputItem { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ResponseComputerToolCallOutputScreenshot; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseComputerToolCallOutputItem { + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * A computer screenshot image used with the computer use tool. + */ +export interface ResponseComputerToolCallOutputScreenshot { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; +} + /** * Multi-modal input and output contents. */ @@ -1322,6 +1402,46 @@ export interface ResponseFunctionToolCall { status?: 'in_progress' | 'completed' | 'incomplete'; } +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + /** + * The unique ID of the function call tool output. + */ + id: string; +} + +export interface ResponseFunctionToolCallOutputItem { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + /** * The results of a web search tool call. See the * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for @@ -1535,7 +1655,7 @@ export namespace ResponseInputItem { /** * A computer screenshot image used with the computer use tool. */ - output: ComputerCallOutput.Output; + output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot; /** * The type of the computer tool call output. Always `computer_call_output`. @@ -1561,27 +1681,6 @@ export namespace ResponseInputItem { } export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - /** * A pending safety check for the computer call. */ @@ -1657,6 +1756,35 @@ export namespace ResponseInputItem { */ export type ResponseInputMessageContentList = Array; +export interface ResponseInputMessageItem { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; +} + /** * A text input to the model. */ @@ -1672,6 +1800,19 @@ export interface ResponseInputText { type: 'input_text'; } +/** + * Content item used to generate a response. + */ +export type ResponseItem = + | ResponseInputMessageItem + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseComputerToolCallOutputItem + | ResponseFunctionWebSearch + | ResponseFunctionToolCallItem + | ResponseFunctionToolCallOutputItem; + /** * An audio output from the model. */ @@ -2655,7 +2796,6 @@ export declare namespace Responses { export { InputItems as InputItems, type ResponseItemList as ResponseItemList, - type ResponseItemListDataPage as ResponseItemListDataPage, type InputItemListParams as InputItemListParams, }; } From ea04e9e4588b1de3b04b7c82dde8b2ddc999dc5e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:14:44 +0000 Subject: [PATCH 18/73] chore(exports): cleaner resource index imports --- src/resources.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/resources.ts diff --git a/src/resources.ts b/src/resources.ts new file mode 100644 index 000000000..b283d5781 --- /dev/null +++ b/src/resources.ts @@ -0,0 +1 @@ +export * from './resources/index'; From 845ea178b5676973621f147fc83530563b90aefa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:28:49 +0000 Subject: [PATCH 19/73] chore(exports): stop using path fallbacks --- scripts/utils/postprocess-files.cjs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/utils/postprocess-files.cjs b/scripts/utils/postprocess-files.cjs index d16c8641c..deae575e3 100644 --- a/scripts/utils/postprocess-files.cjs +++ b/scripts/utils/postprocess-files.cjs @@ -50,14 +50,14 @@ async function postprocess() { if (entry.isDirectory() && entry.name !== 'src' && entry.name !== 'internal' && entry.name !== 'bin') { const subpath = './' + entry.name; newExports[subpath + '/*.mjs'] = { - default: [subpath + '/*.mjs', subpath + '/*/index.mjs'], + default: subpath + '/*.mjs', }; newExports[subpath + '/*.js'] = { - default: [subpath + '/*.js', subpath + '/*/index.js'], + default: subpath + '/*.js', }; newExports[subpath + '/*'] = { - import: [subpath + '/*.mjs', subpath + '/*/index.mjs'], - require: [subpath + '/*.js', subpath + '/*/index.js'], + import: subpath + '/*.mjs', + require: subpath + '/*.js', }; } else if (entry.isFile() && /\.[cm]?js$/.test(entry.name)) { const { name, ext } = path.parse(entry.name); From e7f291fe01ccb5969bda63a5bff130a5a838cb49 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 20:34:55 +0000 Subject: [PATCH 20/73] feat(api): o1-pro now available through the API --- .stats.yml | 2 +- api.md | 2 ++ src/client.ts | 2 ++ src/resources/responses/responses.ts | 6 +++--- src/resources/shared.ts | 27 ++++++++++++++++++++++----- 5 files changed, 30 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index b03256223..e0b06dc22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml diff --git a/api.md b/api.md index 702deeb98..7fb67f877 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ Types: +- AllModels - ChatModel - ComparisonFilter - CompoundFilter @@ -14,6 +15,7 @@ Types: - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText +- ResponsesModel # Completions diff --git a/src/client.ts b/src/client.ts index 485fa6821..71c6c1f18 100644 --- a/src/client.ts +++ b/src/client.ts @@ -1022,6 +1022,7 @@ export declare namespace OpenAI { export { Responses as Responses }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; @@ -1034,4 +1035,5 @@ export declare namespace OpenAI { export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; + export type ResponsesModel = API.ResponsesModel; } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 968d208d3..f4764b7f8 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -255,7 +255,7 @@ export interface Response { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * The object type of this resource - always set to `response`. @@ -1409,7 +1409,7 @@ export interface ResponseFunctionToolCall { */ export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { /** - * The unique ID of the function call tool output. + * The unique ID of the function tool call. */ id: string; } @@ -2607,7 +2607,7 @@ export interface ResponseCreateParamsBase { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * Specify additional output data to include in the model response. Currently diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 5fbdbba6a..2c0fb1c32 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type AllModels = + | string + | ChatModel + | string + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; + export type ChatModel = | 'o3-mini' | 'o3-mini-2025-01-31' @@ -9,11 +19,6 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' - | 'computer-use-preview' - | 'computer-use-preview-2025-02-04' - | 'computer-use-preview-2025-03-11' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' @@ -23,6 +28,10 @@ export type ChatModel = | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'gpt-4o-search-preview' + | 'gpt-4o-mini-search-preview' + | 'gpt-4o-search-preview-2025-03-11' + | 'gpt-4o-mini-search-preview-2025-03-11' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -265,3 +274,11 @@ export interface ResponseFormatText { */ type: 'text'; } + +export type ResponsesModel = + | (string & {}) + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; From 25dc24c9a0e1d8c92f898b7e493114f048d21a34 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 16:08:12 +0000 Subject: [PATCH 21/73] feat(api): new models for TTS, STT, + new audio features for Realtime --- .stats.yml | 4 +- api.md | 18 + src/resources/audio/audio.ts | 17 +- src/resources/audio/index.ts | 6 + src/resources/audio/speech.ts | 10 +- src/resources/audio/transcriptions.ts | 222 +++++++- src/resources/audio/translations.ts | 2 +- src/resources/beta/realtime/index.ts | 5 + src/resources/beta/realtime/realtime.ts | 522 ++++++++++++++++-- src/resources/beta/realtime/sessions.ts | 236 ++++++-- .../beta/realtime/transcription-sessions.ts | 307 ++++++++++ src/resources/chat/completions/completions.ts | 2 +- tests/api-resources/audio/speech.test.ts | 1 + .../audio/transcriptions.test.ts | 6 +- .../realtime/transcription-sessions.test.ts | 21 + 15 files changed, 1236 insertions(+), 143 deletions(-) create mode 100644 src/resources/beta/realtime/transcription-sessions.ts create mode 100644 tests/api-resources/beta/realtime/transcription-sessions.test.ts diff --git a/.stats.yml b/.stats.yml index e0b06dc22..abb937131 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml +configured_endpoints: 82 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml diff --git a/api.md b/api.md index 7fb67f877..8ac9d3374 100644 --- a/api.md +++ b/api.md @@ -140,7 +140,11 @@ Types: Types: - Transcription +- TranscriptionInclude - TranscriptionSegment +- TranscriptionStreamEvent +- TranscriptionTextDeltaEvent +- TranscriptionTextDoneEvent - TranscriptionVerbose - TranscriptionWord - TranscriptionCreateResponse @@ -297,7 +301,9 @@ Types: - ConversationItemDeleteEvent - ConversationItemDeletedEvent - ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionDeltaEvent - ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemRetrieveEvent - ConversationItemTruncateEvent - ConversationItemTruncatedEvent - ConversationItemWithReference @@ -334,6 +340,8 @@ Types: - SessionCreatedEvent - SessionUpdateEvent - SessionUpdatedEvent +- TranscriptionSessionUpdate +- TranscriptionSessionUpdatedEvent ### Sessions @@ -346,6 +354,16 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse +### TranscriptionSessions + +Types: + +- TranscriptionSession + +Methods: + +- client.beta.realtime.transcriptionSessions.create({ ...params }) -> TranscriptionSession + ## Chat ### Completions diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index b9a7ad4f8..071fe5929 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -7,8 +7,14 @@ import * as TranscriptionsAPI from './transcriptions'; import { Transcription, TranscriptionCreateParams, + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, TranscriptionCreateResponse, + TranscriptionInclude, TranscriptionSegment, + TranscriptionStreamEvent, + TranscriptionTextDeltaEvent, + TranscriptionTextDoneEvent, TranscriptionVerbose, TranscriptionWord, Transcriptions, @@ -28,11 +34,12 @@ export class Audio extends APIResource { speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client); } -export type AudioModel = 'whisper-1'; +export type AudioModel = 'whisper-1' | 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe'; /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; @@ -46,11 +53,17 @@ export declare namespace Audio { export { Transcriptions as Transcriptions, type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; export { diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 2bbe9e3ab..deed39ede 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -5,11 +5,17 @@ export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { Transcriptions, type Transcription, + type TranscriptionInclude, type TranscriptionSegment, + type TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent, type TranscriptionVerbose, type TranscriptionWord, type TranscriptionCreateResponse, type TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming, } from './transcriptions'; export { Translations, diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 81dc3e47d..efd722887 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -19,7 +19,7 @@ export class Speech extends APIResource { } } -export type SpeechModel = 'tts-1' | 'tts-1-hd'; +export type SpeechModel = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts'; export interface SpeechCreateParams { /** @@ -29,7 +29,7 @@ export interface SpeechCreateParams { /** * One of the available [TTS models](https://platform.openai.com/docs/models#tts): - * `tts-1` or `tts-1-hd` + * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. */ model: (string & {}) | SpeechModel; @@ -41,6 +41,12 @@ export interface SpeechCreateParams { */ voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + /** + * Control the voice of your generated audio with additional instructions. Does not + * work with `tts-1` or `tts-1-hd`. + */ + instructions?: string; + /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, * `wav`, and `pcm`. diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 253bd1e40..035469b72 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -1,8 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; import { APIPromise } from '../../api-promise'; +import { Stream } from '../../streaming'; import { type Uploadable } from '../../uploads'; import { RequestOptions } from '../../internal/request-options'; import { multipartFormRequestOptions } from '../../internal/uploads'; @@ -12,26 +14,25 @@ export class Transcriptions extends APIResource { * Transcribes audio into the input language. */ create( - body: TranscriptionCreateParams<'json' | undefined>, + body: TranscriptionCreateParamsNonStreaming, options?: RequestOptions, - ): APIPromise; + ): APIPromise; create( - body: TranscriptionCreateParams<'verbose_json'>, + body: TranscriptionCreateParamsStreaming, options?: RequestOptions, - ): APIPromise; + ): APIPromise>; create( - body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, + body: TranscriptionCreateParamsBase, options?: RequestOptions, - ): APIPromise; - create(body: TranscriptionCreateParams, options?: RequestOptions): APIPromise; + ): APIPromise | TranscriptionCreateResponse>; create( body: TranscriptionCreateParams, options?: RequestOptions, - ): APIPromise { + ): APIPromise | APIPromise> { return this._client.post( '/audio/transcriptions', - multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }, this._client), - ); + multipartFormRequestOptions({ body, ...options, stream: body.stream ?? false }, this._client), + ) as APIPromise | APIPromise>; } } @@ -44,8 +45,36 @@ export interface Transcription { * The transcribed text. */ text: string; + + /** + * The log probabilities of the tokens in the transcription. Only returned with the + * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + * to the `include` array. + */ + logprobs?: Array; +} + +export namespace Transcription { + export interface Logprob { + /** + * The token in the transcription. + */ + token?: string; + + /** + * The bytes of the token. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } } +export type TranscriptionInclude = 'logprobs'; + export interface TranscriptionSegment { /** * Unique identifier of the segment. @@ -101,6 +130,103 @@ export interface TranscriptionSegment { tokens: Array; } +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export type TranscriptionStreamEvent = TranscriptionTextDeltaEvent | TranscriptionTextDoneEvent; + +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDeltaEvent { + /** + * The text delta that was additionally transcribed. + */ + delta: string; + + /** + * The type of the event. Always `transcript.text.delta`. + */ + type: 'transcript.text.delta'; + + /** + * The log probabilities of the delta. Only included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDeltaEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +/** + * Emitted when the transcription is complete. Contains the complete transcription + * text. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDoneEvent { + /** + * The text that was transcribed. + */ + text: string; + + /** + * The type of the event. Always `transcript.text.done`. + */ + type: 'transcript.text.done'; + + /** + * The log probabilities of the individual tokens in the transcription. Only + * included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDoneEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + /** * Represents a verbose json transcription response returned by model, based on the * provided input. @@ -155,9 +281,11 @@ export interface TranscriptionWord { */ export type TranscriptionCreateResponse = Transcription | TranscriptionVerbose; -export interface TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, -> { +export type TranscriptionCreateParams = + | TranscriptionCreateParamsNonStreaming + | TranscriptionCreateParamsStreaming; + +export interface TranscriptionCreateParamsBase { /** * The audio file object (not file name) to transcribe, in one of these formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. @@ -165,11 +293,21 @@ export interface TranscriptionCreateParams< file: Uploadable; /** - * ID of the model to use. Only `whisper-1` (which is powered by our open source - * Whisper V2 model) is currently available. + * ID of the model to use. The options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + * Whisper V2 model). */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Additional information to include in the transcription response. `logprobs` will + * return the log probabilities of the tokens in the response to understand the + * model's confidence in the transcription. `logprobs` only works with + * response_format set to `json` and only with the models `gpt-4o-transcribe` and + * `gpt-4o-mini-transcribe`. + */ + include?: Array; + /** * The language of the input audio. Supplying the input language in * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -187,10 +325,23 @@ export interface TranscriptionCreateParams< /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ response_format?: ResponseFormat; + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: boolean | null; + /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the * output more random, while lower values like 0.2 will make it more focused and @@ -210,13 +361,52 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } +export namespace TranscriptionCreateParams { + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; + export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; +} + +export interface TranscriptionCreateParamsNonStreaming extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: false | null; +} + +export interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream: true; +} + export declare namespace Transcriptions { export { type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 4c309e441..55155d592 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -88,7 +88,7 @@ export interface TranslationCreateParams< * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: ResponseFormat; + response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts index 66c3ecaae..ba51d8a66 100644 --- a/src/resources/beta/realtime/index.ts +++ b/src/resources/beta/realtime/index.ts @@ -2,3 +2,8 @@ export { Realtime } from './realtime'; export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; +export { + TranscriptionSessions, + type TranscriptionSession, + type TranscriptionSessionCreateParams, +} from './transcription-sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5e2b1c833..d0a74840b 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -10,9 +10,17 @@ import { SessionCreateResponse, Sessions, } from './sessions'; +import * as TranscriptionSessionsAPI from './transcription-sessions'; +import { + TranscriptionSession, + TranscriptionSessionCreateParams, + TranscriptionSessions, +} from './transcription-sessions'; export class Realtime extends APIResource { sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); + transcriptionSessions: TranscriptionSessionsAPI.TranscriptionSessions = + new TranscriptionSessionsAPI.TranscriptionSessions(this._client); } /** @@ -300,6 +308,91 @@ export interface ConversationItemInputAudioTranscriptionCompletedEvent { * The event type, must be `conversation.item.input_audio_transcription.completed`. */ type: 'conversation.item.input_audio_transcription.completed'; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } +} + +/** + * Returned when the text value of an input audio transcription content part is + * updated. + */ +export interface ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.delta`. + */ + type: 'conversation.item.input_audio_transcription.delta'; + + /** + * The index of the content part in the item's content array. + */ + content_index?: number; + + /** + * The text delta. + */ + delta?: string; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } } /** @@ -361,6 +454,30 @@ export namespace ConversationItemInputAudioTranscriptionFailedEvent { } } +/** + * Send this event when you want to retrieve the server's representation of a + * specific item in the conversation history. This is useful, for example, to + * inspect user audio after noise cancellation and VAD. The server will respond + * with a `conversation.item.retrieved` event, unless the item does not exist in + * the conversation history, in which case the server will respond with an error. + */ +export interface ConversationItemRetrieveEvent { + /** + * The ID of the item to retrieve. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.retrieve`. + */ + type: 'conversation.item.retrieve'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + /** * Send this event to truncate a previous assistant message’s audio. The server * will produce audio faster than realtime, so this event is useful when the user @@ -789,18 +906,20 @@ export namespace RateLimitsUpdatedEvent { } /** - * All events that the client can send to the Realtime API + * A realtime client event. */ export type RealtimeClientEvent = - | SessionUpdateEvent - | InputAudioBufferAppendEvent - | InputAudioBufferCommitEvent - | InputAudioBufferClearEvent | ConversationItemCreateEvent - | ConversationItemTruncateEvent | ConversationItemDeleteEvent + | ConversationItemRetrieveEvent + | ConversationItemTruncateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferClearEvent + | InputAudioBufferCommitEvent + | ResponseCancelEvent | ResponseCreateEvent - | ResponseCancelEvent; + | SessionUpdateEvent + | TranscriptionSessionUpdate; /** * The response resource. @@ -1009,37 +1128,63 @@ export namespace RealtimeResponseUsage { } /** - * All events that the Realtime API can send back + * A realtime server event. */ export type RealtimeServerEvent = - | ErrorEvent - | SessionCreatedEvent - | SessionUpdatedEvent | ConversationCreatedEvent - | InputAudioBufferCommittedEvent - | InputAudioBufferClearedEvent - | InputAudioBufferSpeechStartedEvent - | InputAudioBufferSpeechStoppedEvent | ConversationItemCreatedEvent + | ConversationItemDeletedEvent | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionDeltaEvent | ConversationItemInputAudioTranscriptionFailedEvent + | RealtimeServerEvent.ConversationItemRetrieved | ConversationItemTruncatedEvent - | ConversationItemDeletedEvent + | ErrorEvent + | InputAudioBufferClearedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | RateLimitsUpdatedEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent | ResponseCreatedEvent | ResponseDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent - | ResponseContentPartAddedEvent - | ResponseContentPartDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent - | ResponseAudioTranscriptDeltaEvent - | ResponseAudioTranscriptDoneEvent - | ResponseAudioDeltaEvent - | ResponseAudioDoneEvent - | ResponseFunctionCallArgumentsDeltaEvent - | ResponseFunctionCallArgumentsDoneEvent - | RateLimitsUpdatedEvent; + | SessionCreatedEvent + | SessionUpdatedEvent + | TranscriptionSessionUpdatedEvent; + +export namespace RealtimeServerEvent { + /** + * Returned when a conversation item is retrieved with + * `conversation.item.retrieve`. + */ + export interface ConversationItemRetrieved { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: RealtimeAPI.ConversationItem; + + /** + * The event type, must be `conversation.item.retrieved`. + */ + type: 'conversation.item.retrieved'; + } +} /** * Returned when the model-generated audio is updated. @@ -1834,15 +1979,24 @@ export namespace SessionUpdateEvent { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -1891,7 +2045,8 @@ export namespace SessionUpdateEvent { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -1907,9 +2062,16 @@ export namespace SessionUpdateEvent { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: Session.TurnDetection; @@ -1922,15 +2084,31 @@ export namespace SessionUpdateEvent { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -1941,16 +2119,17 @@ export namespace SessionUpdateEvent { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -1979,48 +2158,62 @@ export namespace SessionUpdateEvent { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } } @@ -2046,7 +2239,216 @@ export interface SessionUpdatedEvent { type: 'session.updated'; } +/** + * Send this event to update a transcription session. + */ +export interface TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + session: TranscriptionSessionUpdate.Session; + + /** + * The event type, must be `transcription_session.update`. + */ + type: 'transcription_session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + export interface Session { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + } + + export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } + } +} + +/** + * Returned when a transcription session is updated with a + * `transcription_session.update` event, unless there is an error. + */ +export interface TranscriptionSessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ + session: TranscriptionSessionsAPI.TranscriptionSession; + + /** + * The event type, must be `transcription_session.updated`. + */ + type: 'transcription_session.updated'; +} + Realtime.Sessions = Sessions; +Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { export { @@ -2055,4 +2457,10 @@ export declare namespace Realtime { type SessionCreateResponse as SessionCreateResponse, type SessionCreateParams as SessionCreateParams, }; + + export { + TranscriptionSessions as TranscriptionSessions, + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; } diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 0ece95bcd..e1c439c1c 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -29,7 +29,7 @@ export class Sessions extends APIResource { */ export interface Session { /** - * Unique identifier for the session object. + * Unique identifier for the session that looks like `sess_1234567890abcdef`. */ id?: string; @@ -40,12 +40,24 @@ export interface Session { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -81,7 +93,6 @@ export interface Session { * The Realtime model used for this session. */ model?: - | (string & {}) | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' @@ -95,7 +106,8 @@ export interface Session { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -111,11 +123,18 @@ export interface Session { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ - turn_detection?: Session.TurnDetection | null; + turn_detection?: Session.TurnDetection; /** * The voice the model uses to respond. Voice cannot be changed during the session @@ -126,19 +145,54 @@ export interface Session { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; } export interface Tool { @@ -165,48 +219,62 @@ export namespace Session { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: 'server_vad'; + type?: 'server_vad' | 'semantic_vad'; } } @@ -396,15 +464,24 @@ export interface SessionCreateParams { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: SessionCreateParams.InputAudioTranscription; @@ -453,7 +530,8 @@ export interface SessionCreateParams { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -469,9 +547,16 @@ export interface SessionCreateParams { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: SessionCreateParams.TurnDetection; @@ -484,15 +569,31 @@ export interface SessionCreateParams { } export namespace SessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -503,16 +604,17 @@ export namespace SessionCreateParams { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -541,48 +643,62 @@ export namespace SessionCreateParams { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts new file mode 100644 index 000000000..f31ee9210 --- /dev/null +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -0,0 +1,307 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { APIPromise } from '../../../api-promise'; +import { buildHeaders } from '../../../internal/headers'; +import { RequestOptions } from '../../../internal/request-options'; + +export class TranscriptionSessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API specifically for realtime transcriptions. Can be configured with + * the same session parameters as the `transcription_session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + */ + create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise { + return this._client.post('/realtime/transcription_sessions', { + body, + ...options, + headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]), + }); + } +} + +/** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ +export interface TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + client_secret: TranscriptionSession.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration of the transcription model. + */ + input_audio_transcription?: TranscriptionSession.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: TranscriptionSession.TurnDetection; +} + +export namespace TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value: string; + } + + /** + * Configuration of the transcription model. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription. Can be `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, or `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface TranscriptionSessionCreateParams { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: TranscriptionSessionCreateParams.TurnDetection; +} + +export namespace TranscriptionSessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +export declare namespace TranscriptionSessions { + export { + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; +} diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index d6ece1236..0aa8d923d 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -372,7 +372,7 @@ export interface ChatCompletionChunk { * **NOTE:** If the stream is interrupted or cancelled, you may not receive the * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage; + usage?: CompletionsAPI.CompletionUsage | null; } export namespace ChatCompletionChunk { diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 904d75e5d..cbec6cfac 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -14,6 +14,7 @@ describe('resource speech', () => { input: 'input', model: 'string', voice: 'alloy', + instructions: 'instructions', response_format: 'mp3', speed: 0.25, }); diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 6e5d560d0..62d478701 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -11,7 +11,7 @@ describe('resource transcriptions', () => { test('create: only required params', async () => { const responsePromise = client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -25,10 +25,12 @@ describe('resource transcriptions', () => { test('create: required and optional params', async () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', + include: ['logprobs'], language: 'language', prompt: 'prompt', response_format: 'json', + stream: false, temperature: 0, timestamp_granularities: ['word'], }); diff --git a/tests/api-resources/beta/realtime/transcription-sessions.test.ts b/tests/api-resources/beta/realtime/transcription-sessions.test.ts new file mode 100644 index 000000000..2c7cbbb15 --- /dev/null +++ b/tests/api-resources/beta/realtime/transcription-sessions.test.ts @@ -0,0 +1,21 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource transcriptionSessions', () => { + test('create', async () => { + const responsePromise = client.beta.realtime.transcriptionSessions.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); From 1727b8eb387063c000d0613ddd9b263fa146800c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:58:45 +0000 Subject: [PATCH 22/73] fix(client): remove duplicate types --- src/azure.ts | 5 ++++- src/resources/shared.ts | 4 +--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/azure.ts b/src/azure.ts index 661f288af..5d8e9a98a 100644 --- a/src/azure.ts +++ b/src/azure.ts @@ -133,7 +133,10 @@ export class AzureOpenAI extends OpenAI { if (!isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this.deploymentName || options.body['model'] || options.__metadata?.['model']; + const model = + this.deploymentName || + (options.body instanceof FormData ? options.body.get('model') : options.body['model']) || + options.__metadata?.['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 2c0fb1c32..3e8ded763 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,9 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export type AllModels = - | string - | ChatModel - | string + | (string & {}) | ChatModel | 'o1-pro' | 'o1-pro-2025-03-19' From 21f5c3ae676dcf6fd31b62dad3d3a0741427133e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:52:01 -0400 Subject: [PATCH 23/73] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index abb937131..2df281d34 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml From 1567852bea19bb52946410073ccf6da1a0d14ecd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 22:42:17 +0000 Subject: [PATCH 24/73] chore(internal): codegen related update --- MIGRATION.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MIGRATION.md b/MIGRATION.md index b84a1d6f9..c375b825c 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -286,7 +286,7 @@ import fs from 'fs'; fs.createReadStream('path/to/file'); ``` -Note that this function previously only worked on Node.j. If you're using Bun, you can use [`Bun.file`](https://bun.sh/docs/api/file-io) instead. +Note that this function previously only worked on Node.js. If you're using Bun, you can use [`Bun.file`](https://bun.sh/docs/api/file-io) instead. ### Shims removal From 60cddbd68e5d7dcf428b7bcf869ea71976d6d9a0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 19:07:49 +0000 Subject: [PATCH 25/73] fix(exports): add missing type exports --- src/resources/beta/beta.ts | 106 +++++++++++++++++++++++- src/resources/beta/realtime/realtime.ts | 52 ++++++++++++ src/resources/responses/responses.ts | 83 +++++++++++++++++++ 3 files changed, 239 insertions(+), 2 deletions(-) diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 4218200d8..73d8ea5d2 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -22,7 +22,58 @@ import { ThreadStreamEvent, } from './assistants'; import * as RealtimeAPI from './realtime/realtime'; -import { Realtime } from './realtime/realtime'; +import { + ConversationCreatedEvent, + ConversationItem, + ConversationItemContent, + ConversationItemCreateEvent, + ConversationItemCreatedEvent, + ConversationItemDeleteEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionDeltaEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemRetrieveEvent, + ConversationItemTruncateEvent, + ConversationItemTruncatedEvent, + ConversationItemWithReference, + ErrorEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommitEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + Realtime, + RealtimeClientEvent, + RealtimeResponse, + RealtimeResponseStatus, + RealtimeResponseUsage, + RealtimeServerEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCancelEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreateEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdateEvent, + SessionUpdatedEvent, + TranscriptionSessionUpdate, + TranscriptionSessionUpdatedEvent, +} from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; import { AssistantResponseFormatOption, @@ -54,7 +105,58 @@ Beta.Assistants = Assistants; Beta.Threads = Threads; export declare namespace Beta { - export { Realtime as Realtime }; + export { + Realtime as Realtime, + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; export { Chat }; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index d0a74840b..224d94f37 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2451,6 +2451,58 @@ Realtime.Sessions = Sessions; Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { + export { + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; + export { Sessions as Sessions, type SessionsAPISession as Session, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index f4764b7f8..8fb697c93 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2793,6 +2793,89 @@ export interface ResponseRetrieveParams { Responses.InputItems = InputItems; export declare namespace Responses { + export { + type ComputerTool as ComputerTool, + type EasyInputMessage as EasyInputMessage, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type Response as Response, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent, + type ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, + type ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent, + type ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent, + type ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, + type ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall, + type ResponseCompletedEvent as ResponseCompletedEvent, + type ResponseComputerToolCall as ResponseComputerToolCall, + type ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, + type ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot, + type ResponseContent as ResponseContent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseError as ResponseError, + type ResponseErrorEvent as ResponseErrorEvent, + type ResponseFailedEvent as ResponseFailedEvent, + type ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, + type ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, + type ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, + type ResponseFileSearchToolCall as ResponseFileSearchToolCall, + type ResponseFormatTextConfig as ResponseFormatTextConfig, + type ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseFunctionToolCall as ResponseFunctionToolCall, + type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem, + type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, + type ResponseFunctionWebSearch as ResponseFunctionWebSearch, + type ResponseInProgressEvent as ResponseInProgressEvent, + type ResponseIncludable as ResponseIncludable, + type ResponseIncompleteEvent as ResponseIncompleteEvent, + type ResponseInput as ResponseInput, + type ResponseInputAudio as ResponseInputAudio, + type ResponseInputContent as ResponseInputContent, + type ResponseInputFile as ResponseInputFile, + type ResponseInputImage as ResponseInputImage, + type ResponseInputItem as ResponseInputItem, + type ResponseInputMessageContentList as ResponseInputMessageContentList, + type ResponseInputMessageItem as ResponseInputMessageItem, + type ResponseInputText as ResponseInputText, + type ResponseItem as ResponseItem, + type ResponseOutputAudio as ResponseOutputAudio, + type ResponseOutputItem as ResponseOutputItem, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseOutputMessage as ResponseOutputMessage, + type ResponseOutputRefusal as ResponseOutputRefusal, + type ResponseOutputText as ResponseOutputText, + type ResponseReasoningItem as ResponseReasoningItem, + type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, + type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, + type ResponseStatus as ResponseStatus, + type ResponseStreamEvent as ResponseStreamEvent, + type ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent, + type ResponseTextConfig as ResponseTextConfig, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type ResponseUsage as ResponseUsage, + type ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, + type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, + type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, + type Tool as Tool, + type ToolChoiceFunction as ToolChoiceFunction, + type ToolChoiceOptions as ToolChoiceOptions, + type ToolChoiceTypes as ToolChoiceTypes, + type WebSearchTool as WebSearchTool, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseRetrieveParams as ResponseRetrieveParams, + }; + export { InputItems as InputItems, type ResponseItemList as ResponseItemList, From dae47ed16ede85397ff89c10e9b94a3dc78ce776 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 17:33:07 +0000 Subject: [PATCH 26/73] chore: add hash of OpenAPI spec/config inputs to .stats.yml --- .stats.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.stats.yml b/.stats.yml index 2df281d34..fe9320429 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml +openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +config_hash: d36e491b0afc4f79e3afad4b3c9bec70 From 61b5b2d8fcc7426e5cde44ea37dee1e8f7fca5f1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 19:33:42 +0000 Subject: [PATCH 27/73] chore(client): more accurate streaming errors --- src/streaming.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 47c366071..5b9c19cfa 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -30,7 +30,7 @@ export class Stream implements AsyncIterable { async function* iterator(): AsyncIterator { if (consumed) { - throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); } consumed = true; let done = false; @@ -55,7 +55,7 @@ export class Stream implements AsyncIterable { } if (data && data.error) { - throw new APIError(undefined, data.error, undefined, undefined); + throw new APIError(undefined, data.error, undefined, response.headers); } yield data; @@ -113,7 +113,7 @@ export class Stream implements AsyncIterable { async function* iterator(): AsyncIterator { if (consumed) { - throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); } consumed = true; let done = false; From af0b603f709af62adf030e4e759cd541f93b6b7b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 19:56:16 +0000 Subject: [PATCH 28/73] chore(client): move misc public files to new `core/` directory, deprecate old paths --- MIGRATION.md | 61 ++-- src/api-promise.ts | 103 +----- src/client.ts | 10 +- src/core/README.md | 3 + src/core/api-promise.ts | 101 ++++++ src/core/error.ts | 154 ++++++++ src/core/pagination.ts | 204 +++++++++++ src/core/resource.ts | 11 + src/core/streaming.ts | 336 +++++++++++++++++ src/core/uploads.ts | 2 + src/error.ts | 156 +------- src/index.ts | 8 +- src/internal/README.md | 3 + src/internal/decoders/line.ts | 2 +- src/internal/parse.ts | 2 +- src/internal/request-options.ts | 2 +- src/internal/utils/base64.ts | 2 +- src/internal/utils/path.ts | 2 +- src/internal/utils/values.ts | 2 +- src/pagination.ts | 206 +---------- src/resource.ts | 13 +- src/resources/audio/audio.ts | 2 +- src/resources/audio/speech.ts | 4 +- src/resources/audio/transcriptions.ts | 8 +- src/resources/audio/translations.ts | 6 +- src/resources/batches.ts | 6 +- src/resources/beta/assistants.ts | 6 +- src/resources/beta/beta.ts | 2 +- src/resources/beta/realtime/realtime.ts | 2 +- src/resources/beta/realtime/sessions.ts | 4 +- .../beta/realtime/transcription-sessions.ts | 4 +- src/resources/beta/threads/messages.ts | 6 +- src/resources/beta/threads/runs/runs.ts | 8 +- src/resources/beta/threads/runs/steps.ts | 6 +- src/resources/beta/threads/threads.ts | 6 +- src/resources/chat/chat.ts | 2 +- src/resources/chat/completions/completions.ts | 8 +- src/resources/chat/completions/messages.ts | 4 +- src/resources/completions.ts | 6 +- src/resources/embeddings.ts | 4 +- src/resources/files.ts | 8 +- src/resources/fine-tuning/fine-tuning.ts | 2 +- src/resources/fine-tuning/jobs/checkpoints.ts | 4 +- src/resources/fine-tuning/jobs/jobs.ts | 6 +- src/resources/images.ts | 6 +- src/resources/models.ts | 6 +- src/resources/moderations.ts | 4 +- src/resources/responses/input-items.ts | 4 +- src/resources/responses/responses.ts | 8 +- src/resources/uploads/parts.ts | 6 +- src/resources/uploads/uploads.ts | 4 +- src/resources/vector-stores/file-batches.ts | 6 +- src/resources/vector-stores/files.ts | 6 +- src/resources/vector-stores/vector-stores.ts | 6 +- src/streaming.ts | 338 +----------------- src/uploads.ts | 4 +- tests/form.test.ts | 2 +- tests/index.test.ts | 2 +- tests/streaming.test.ts | 2 +- tests/uploads.test.ts | 4 +- 60 files changed, 967 insertions(+), 938 deletions(-) create mode 100644 src/core/README.md create mode 100644 src/core/api-promise.ts create mode 100644 src/core/error.ts create mode 100644 src/core/pagination.ts create mode 100644 src/core/resource.ts create mode 100644 src/core/streaming.ts create mode 100644 src/core/uploads.ts create mode 100644 src/internal/README.md diff --git a/MIGRATION.md b/MIGRATION.md index c375b825c..c5e26d3c2 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -321,49 +321,48 @@ The `headers` property on `APIError` objects is now an instance of the Web [Head ### Removed exports -#### `Response` - -```typescript -// Before -import { Response } from 'openai'; - -// After -// `Response` must now come from the builtin types -``` - #### Resource classes -If you were importing resource classes from the root package then you must now import them from the file they are defined in: +If you were importing resource classes from the root package then you must now import them from the file they are defined in. +This was never valid at the type level and only worked in CommonJS files. ```typescript // Before -import { Completions } from 'openai'; +const { Completions } = require('openai'); // After -import { Completions } from 'openai/resources/completions'; +const { OpenAI } = require('openai'); +OpenAI.Completions; // or import directly from openai/resources/completions ``` -#### `openai/core` +#### Refactor of `openai/core`, `error`, `pagination`, `resource`, `streaming` and `uploads` -The `openai/core` file was intended to be internal-only but it was publicly accessible, as such it has been refactored and split up into internal files. +Much of the `openai/core` file was intended to be internal-only but it was publicly accessible, as such it has been refactored and split up into internal and public files, with public-facing code moved to a new `core` folder and internal code moving to the private `internal` folder. -If you were relying on anything that was only exported from `openai/core` and is also not accessible anywhere else, please open an issue and we'll consider adding it to the public API. - -#### `APIClient` - -The `APIClient` base client class has been removed as it is no longer needed. If you were importing this class then you must now import the main client class: +At the same time, we moved some public-facing files which were previously at the top level into `core` to make the file structure cleaner and more clear: ```typescript // Before -import { APIClient } from 'openai/core'; +import 'openai/error'; +import 'openai/pagination'; +import 'openai/resource'; +import 'openai/streaming'; +import 'openai/uploads'; // After -import { OpenAI } from 'openai'; +import 'openai/core/error'; +import 'openai/core/pagination'; +import 'openai/core/resource'; +import 'openai/core/streaming'; +import 'openai/core/uploads'; ``` -#### Cleaned up `openai/uploads` exports +If you were relying on anything that was only exported from `openai/core` and is also not accessible anywhere else, please open an issue and we'll consider adding it to the public API. + +#### Cleaned up `uploads` exports -The following exports have been removed from `openai/uploads` as they were not intended to be a part of the public API: +As part of the `core` refactor, `openai/uploads` was moved to `openai/core/uploads` +and the following exports were removed, as they were not intended to be a part of the public API: - `fileFromPath` - `BlobPart` @@ -382,5 +381,17 @@ The following exports have been removed from `openai/uploads` as they were not i Note that `Uploadable` & `toFile` **are** still exported: ```typescript -import { type Uploadable, toFile } from 'openai/uploads'; +import { type Uploadable, toFile } from 'openai/core/uploads'; +``` + +#### `APIClient` + +The `APIClient` base client class has been removed as it is no longer needed. If you were importing this class then you must now import the main client class: + +```typescript +// Before +import { APIClient } from 'openai/core'; + +// After +import { OpenAI } from 'openai'; ``` diff --git a/src/api-promise.ts b/src/api-promise.ts index c9d9a9a9f..8c775ee69 100644 --- a/src/api-promise.ts +++ b/src/api-promise.ts @@ -1,101 +1,2 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { type OpenAI } from './client'; - -import { type PromiseOrValue } from './internal/types'; -import { - type APIResponseProps, - defaultParseResponse, - type WithRequestID, - addRequestID, -} from './internal/parse'; - -/** - * A subclass of `Promise` providing additional helper methods - * for interacting with the SDK. - */ -export class APIPromise extends Promise> { - private parsedPromise: Promise> | undefined; - #client: OpenAI; - - constructor( - client: OpenAI, - private responsePromise: Promise, - private parseResponse: ( - client: OpenAI, - props: APIResponseProps, - ) => PromiseOrValue> = defaultParseResponse, - ) { - super((resolve) => { - // this is maybe a bit weird but this has to be a no-op to not implicitly - // parse the response body; instead .then, .catch, .finally are overridden - // to parse the response - resolve(null as any); - }); - this.#client = client; - } - - _thenUnwrap(transform: (data: T, props: APIResponseProps) => U): APIPromise { - return new APIPromise(this.#client, this.responsePromise, async (client, props) => - addRequestID(transform(await this.parseResponse(client, props), props), props.response), - ); - } - - /** - * Gets the raw `Response` instance instead of parsing the response - * data. - * - * If you want to parse the response body but still get the `Response` - * instance, you can use {@link withResponse()}. - * - * 👋 Getting the wrong TypeScript type for `Response`? - * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` - * to your `tsconfig.json`. - */ - asResponse(): Promise { - return this.responsePromise.then((p) => p.response); - } - - /** - * Gets the parsed response data, the raw `Response` instance and the ID of the request, - * returned via the X-Request-ID header which is useful for debugging requests and reporting - * issues to OpenAI. - * - * If you just want to get the raw `Response` instance without parsing it, - * you can use {@link asResponse()}. - * - * 👋 Getting the wrong TypeScript type for `Response`? - * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` - * to your `tsconfig.json`. - */ - async withResponse(): Promise<{ data: T; response: Response; request_id: string | null }> { - const [data, response] = await Promise.all([this.parse(), this.asResponse()]); - return { data, response, request_id: response.headers.get('x-request-id') }; - } - - private parse(): Promise> { - if (!this.parsedPromise) { - this.parsedPromise = this.responsePromise.then((data) => - this.parseResponse(this.#client, data), - ) as any as Promise>; - } - return this.parsedPromise; - } - - override then, TResult2 = never>( - onfulfilled?: ((value: WithRequestID) => TResult1 | PromiseLike) | undefined | null, - onrejected?: ((reason: any) => TResult2 | PromiseLike) | undefined | null, - ): Promise { - return this.parse().then(onfulfilled, onrejected); - } - - override catch( - onrejected?: ((reason: any) => TResult | PromiseLike) | undefined | null, - ): Promise | TResult> { - return this.parse().catch(onrejected); - } - - override finally(onfinally?: (() => void) | undefined | null): Promise> { - return this.parse().finally(onfinally); - } -} +/** @deprecated Import from ./core/api-promise instead */ +export * from './core/api-promise'; diff --git a/src/client.ts b/src/client.ts index 71c6c1f18..41f128ebe 100644 --- a/src/client.ts +++ b/src/client.ts @@ -14,12 +14,12 @@ import * as Shims from './internal/shims'; import * as Opts from './internal/request-options'; import * as qs from './internal/qs'; import { VERSION } from './version'; -import * as Errors from './error'; -import * as Pagination from './pagination'; -import { AbstractPage, type CursorPageParams, CursorPageResponse, PageResponse } from './pagination'; -import * as Uploads from './uploads'; +import * as Errors from './core/error'; +import * as Pagination from './core/pagination'; +import { AbstractPage, type CursorPageParams, CursorPageResponse, PageResponse } from './core/pagination'; +import * as Uploads from './core/uploads'; import * as API from './resources/index'; -import { APIPromise } from './api-promise'; +import { APIPromise } from './core/api-promise'; import { type Fetch } from './internal/builtin-types'; import { isRunningInBrowser } from './internal/detect-platform'; import { HeadersLike, NullableHeaders, buildHeaders } from './internal/headers'; diff --git a/src/core/README.md b/src/core/README.md new file mode 100644 index 000000000..485fce861 --- /dev/null +++ b/src/core/README.md @@ -0,0 +1,3 @@ +# `core` + +This directory holds public modules implementing non-resource-specific SDK functionality. diff --git a/src/core/api-promise.ts b/src/core/api-promise.ts new file mode 100644 index 000000000..9e6c756c8 --- /dev/null +++ b/src/core/api-promise.ts @@ -0,0 +1,101 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { type OpenAI } from '../client'; + +import { type PromiseOrValue } from '../internal/types'; +import { + type APIResponseProps, + defaultParseResponse, + type WithRequestID, + addRequestID, +} from '../internal/parse'; + +/** + * A subclass of `Promise` providing additional helper methods + * for interacting with the SDK. + */ +export class APIPromise extends Promise> { + private parsedPromise: Promise> | undefined; + #client: OpenAI; + + constructor( + client: OpenAI, + private responsePromise: Promise, + private parseResponse: ( + client: OpenAI, + props: APIResponseProps, + ) => PromiseOrValue> = defaultParseResponse, + ) { + super((resolve) => { + // this is maybe a bit weird but this has to be a no-op to not implicitly + // parse the response body; instead .then, .catch, .finally are overridden + // to parse the response + resolve(null as any); + }); + this.#client = client; + } + + _thenUnwrap(transform: (data: T, props: APIResponseProps) => U): APIPromise { + return new APIPromise(this.#client, this.responsePromise, async (client, props) => + addRequestID(transform(await this.parseResponse(client, props), props), props.response), + ); + } + + /** + * Gets the raw `Response` instance instead of parsing the response + * data. + * + * If you want to parse the response body but still get the `Response` + * instance, you can use {@link withResponse()}. + * + * 👋 Getting the wrong TypeScript type for `Response`? + * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` + * to your `tsconfig.json`. + */ + asResponse(): Promise { + return this.responsePromise.then((p) => p.response); + } + + /** + * Gets the parsed response data, the raw `Response` instance and the ID of the request, + * returned via the X-Request-ID header which is useful for debugging requests and reporting + * issues to OpenAI. + * + * If you just want to get the raw `Response` instance without parsing it, + * you can use {@link asResponse()}. + * + * 👋 Getting the wrong TypeScript type for `Response`? + * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` + * to your `tsconfig.json`. + */ + async withResponse(): Promise<{ data: T; response: Response; request_id: string | null }> { + const [data, response] = await Promise.all([this.parse(), this.asResponse()]); + return { data, response, request_id: response.headers.get('x-request-id') }; + } + + private parse(): Promise> { + if (!this.parsedPromise) { + this.parsedPromise = this.responsePromise.then((data) => + this.parseResponse(this.#client, data), + ) as any as Promise>; + } + return this.parsedPromise; + } + + override then, TResult2 = never>( + onfulfilled?: ((value: WithRequestID) => TResult1 | PromiseLike) | undefined | null, + onrejected?: ((reason: any) => TResult2 | PromiseLike) | undefined | null, + ): Promise { + return this.parse().then(onfulfilled, onrejected); + } + + override catch( + onrejected?: ((reason: any) => TResult | PromiseLike) | undefined | null, + ): Promise | TResult> { + return this.parse().catch(onrejected); + } + + override finally(onfinally?: (() => void) | undefined | null): Promise> { + return this.parse().finally(onfinally); + } +} diff --git a/src/core/error.ts b/src/core/error.ts new file mode 100644 index 000000000..c18cc0e33 --- /dev/null +++ b/src/core/error.ts @@ -0,0 +1,154 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { castToError } from '../internal/errors'; + +export class OpenAIError extends Error {} + +export class APIError< + TStatus extends number | undefined = number | undefined, + THeaders extends Headers | undefined = Headers | undefined, + TError extends Object | undefined = Object | undefined, +> extends OpenAIError { + /** HTTP status for the response that caused the error */ + readonly status: TStatus; + /** HTTP headers for the response that caused the error */ + readonly headers: THeaders; + /** JSON body of the response that caused the error */ + readonly error: TError; + + readonly code: string | null | undefined; + readonly param: string | null | undefined; + readonly type: string | undefined; + + readonly requestID: string | null | undefined; + + constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { + super(`${APIError.makeMessage(status, error, message)}`); + this.status = status; + this.headers = headers; + this.requestID = headers?.get('x-request-id'); + this.error = error; + + const data = error as Record; + this.code = data?.['code']; + this.param = data?.['param']; + this.type = data?.['type']; + } + + private static makeMessage(status: number | undefined, error: any, message: string | undefined) { + const msg = + error?.message ? + typeof error.message === 'string' ? + error.message + : JSON.stringify(error.message) + : error ? JSON.stringify(error) + : message; + + if (status && msg) { + return `${status} ${msg}`; + } + if (status) { + return `${status} status code (no body)`; + } + if (msg) { + return msg; + } + return '(no status code or body)'; + } + + static generate( + status: number | undefined, + errorResponse: Object | undefined, + message: string | undefined, + headers: Headers | undefined, + ): APIError { + if (!status || !headers) { + return new APIConnectionError({ message, cause: castToError(errorResponse) }); + } + + const error = (errorResponse as Record)?.['error']; + + if (status === 400) { + return new BadRequestError(status, error, message, headers); + } + + if (status === 401) { + return new AuthenticationError(status, error, message, headers); + } + + if (status === 403) { + return new PermissionDeniedError(status, error, message, headers); + } + + if (status === 404) { + return new NotFoundError(status, error, message, headers); + } + + if (status === 409) { + return new ConflictError(status, error, message, headers); + } + + if (status === 422) { + return new UnprocessableEntityError(status, error, message, headers); + } + + if (status === 429) { + return new RateLimitError(status, error, message, headers); + } + + if (status >= 500) { + return new InternalServerError(status, error, message, headers); + } + + return new APIError(status, error, message, headers); + } +} + +export class APIUserAbortError extends APIError { + constructor({ message }: { message?: string } = {}) { + super(undefined, undefined, message || 'Request was aborted.', undefined); + } +} + +export class APIConnectionError extends APIError { + constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { + super(undefined, undefined, message || 'Connection error.', undefined); + // in some environments the 'cause' property is already declared + // @ts-ignore + if (cause) this.cause = cause; + } +} + +export class APIConnectionTimeoutError extends APIConnectionError { + constructor({ message }: { message?: string } = {}) { + super({ message: message ?? 'Request timed out.' }); + } +} + +export class BadRequestError extends APIError<400, Headers> {} + +export class AuthenticationError extends APIError<401, Headers> {} + +export class PermissionDeniedError extends APIError<403, Headers> {} + +export class NotFoundError extends APIError<404, Headers> {} + +export class ConflictError extends APIError<409, Headers> {} + +export class UnprocessableEntityError extends APIError<422, Headers> {} + +export class RateLimitError extends APIError<429, Headers> {} + +export class InternalServerError extends APIError {} + +export class LengthFinishReasonError extends OpenAIError { + constructor() { + super(`Could not parse response content as the length limit was reached`); + } +} + +export class ContentFilterFinishReasonError extends OpenAIError { + constructor() { + super(`Could not parse response content as the request was rejected by the content filter`); + } +} diff --git a/src/core/pagination.ts b/src/core/pagination.ts new file mode 100644 index 000000000..f7f49faaa --- /dev/null +++ b/src/core/pagination.ts @@ -0,0 +1,204 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { OpenAIError } from './error'; +import { FinalRequestOptions } from '../internal/request-options'; +import { defaultParseResponse, WithRequestID } from '../internal/parse'; +import { type OpenAI } from '../client'; +import { APIPromise } from './api-promise'; +import { type APIResponseProps } from '../internal/parse'; +import { maybeObj } from '../internal/utils/values'; + +export type PageRequestOptions = Pick; + +export abstract class AbstractPage implements AsyncIterable { + #client: OpenAI; + protected options: FinalRequestOptions; + + protected response: Response; + protected body: unknown; + + constructor(client: OpenAI, response: Response, body: unknown, options: FinalRequestOptions) { + this.#client = client; + this.options = options; + this.response = response; + this.body = body; + } + + abstract nextPageRequestOptions(): PageRequestOptions | null; + + abstract getPaginatedItems(): Item[]; + + hasNextPage(): boolean { + const items = this.getPaginatedItems(); + if (!items.length) return false; + return this.nextPageRequestOptions() != null; + } + + async getNextPage(): Promise { + const nextOptions = this.nextPageRequestOptions(); + if (!nextOptions) { + throw new OpenAIError( + 'No next page expected; please check `.hasNextPage()` before calling `.getNextPage()`.', + ); + } + + return await this.#client.requestAPIList(this.constructor as any, nextOptions); + } + + async *iterPages(): AsyncGenerator { + let page: this = this; + yield page; + while (page.hasNextPage()) { + page = await page.getNextPage(); + yield page; + } + } + + async *[Symbol.asyncIterator](): AsyncGenerator { + for await (const page of this.iterPages()) { + for (const item of page.getPaginatedItems()) { + yield item; + } + } + } +} + +/** + * This subclass of Promise will resolve to an instantiated Page once the request completes. + * + * It also implements AsyncIterable to allow auto-paginating iteration on an unawaited list call, eg: + * + * for await (const item of client.items.list()) { + * console.log(item) + * } + */ +export class PagePromise< + PageClass extends AbstractPage, + Item = ReturnType[number], + > + extends APIPromise + implements AsyncIterable +{ + constructor( + client: OpenAI, + request: Promise, + Page: new (...args: ConstructorParameters) => PageClass, + ) { + super( + client, + request, + async (client, props) => + new Page( + client, + props.response, + await defaultParseResponse(client, props), + props.options, + ) as WithRequestID, + ); + } + + /** + * Allow auto-paginating iteration on an unawaited list call, eg: + * + * for await (const item of client.items.list()) { + * console.log(item) + * } + */ + async *[Symbol.asyncIterator]() { + const page = await this; + for await (const item of page) { + yield item; + } + } +} + +export interface PageResponse { + data: Array; + + object: string; +} + +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class Page extends AbstractPage implements PageResponse { + data: Array; + + object: string; + + constructor(client: OpenAI, response: Response, body: PageResponse, options: FinalRequestOptions) { + super(client, response, body, options); + + this.data = body.data || []; + this.object = body.object; + } + + getPaginatedItems(): Item[] { + return this.data ?? []; + } + + nextPageRequestOptions(): PageRequestOptions | null { + return null; + } +} + +export interface CursorPageResponse { + data: Array; + + has_more: boolean; +} + +export interface CursorPageParams { + after?: string; + + limit?: number; +} + +export class CursorPage + extends AbstractPage + implements CursorPageResponse +{ + data: Array; + + has_more: boolean; + + constructor( + client: OpenAI, + response: Response, + body: CursorPageResponse, + options: FinalRequestOptions, + ) { + super(client, response, body, options); + + this.data = body.data || []; + this.has_more = body.has_more || false; + } + + getPaginatedItems(): Item[] { + return this.data ?? []; + } + + override hasNextPage(): boolean { + if (this.has_more === false) { + return false; + } + + return super.hasNextPage(); + } + + nextPageRequestOptions(): PageRequestOptions | null { + const data = this.getPaginatedItems(); + const id = data[data.length - 1]?.id; + if (!id) { + return null; + } + + return { + ...this.options, + query: { + ...maybeObj(this.options.query), + after: id, + }, + }; + } +} diff --git a/src/core/resource.ts b/src/core/resource.ts new file mode 100644 index 000000000..8ba97f70a --- /dev/null +++ b/src/core/resource.ts @@ -0,0 +1,11 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import type { OpenAI } from '../client'; + +export class APIResource { + protected _client: OpenAI; + + constructor(client: OpenAI) { + this._client = client; + } +} diff --git a/src/core/streaming.ts b/src/core/streaming.ts new file mode 100644 index 000000000..e47c088d2 --- /dev/null +++ b/src/core/streaming.ts @@ -0,0 +1,336 @@ +import { OpenAIError } from './error'; +import { type ReadableStream } from '../internal/shim-types'; +import { makeReadableStream } from '../internal/shims'; +import { findDoubleNewlineIndex, LineDecoder } from '../internal/decoders/line'; +import { ReadableStreamToAsyncIterable } from '../internal/shims'; +import { isAbortError } from '../internal/errors'; + +import { APIError } from './error'; + +type Bytes = string | ArrayBuffer | Uint8Array | null | undefined; + +export type ServerSentEvent = { + event: string | null; + data: string; + raw: string[]; +}; + +export class Stream implements AsyncIterable { + controller: AbortController; + + constructor( + private iterator: () => AsyncIterator, + controller: AbortController, + ) { + this.controller = controller; + } + + static fromSSEResponse(response: Response, controller: AbortController): Stream { + let consumed = false; + + async function* iterator(): AsyncIterator { + if (consumed) { + throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + } + consumed = true; + let done = false; + try { + for await (const sse of _iterSSEMessages(response, controller)) { + if (done) continue; + + if (sse.data.startsWith('[DONE]')) { + done = true; + continue; + } + + if (sse.event === null) { + let data; + + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } + + if (data && data.error) { + throw new APIError(undefined, data.error, undefined, response.headers); + } + + yield data; + } else { + let data; + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } + // TODO: Is this where the error should be thrown? + if (sse.event == 'error') { + throw new APIError(undefined, data.error, data.message, undefined); + } + yield { event: sse.event, data: data } as any; + } + } + done = true; + } catch (e) { + // If the user calls `stream.controller.abort()`, we should exit without throwing. + if (isAbortError(e)) return; + throw e; + } finally { + // If the user `break`s, abort the ongoing request. + if (!done) controller.abort(); + } + } + + return new Stream(iterator, controller); + } + + /** + * Generates a Stream from a newline-separated ReadableStream + * where each item is a JSON value. + */ + static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { + let consumed = false; + + async function* iterLines(): AsyncGenerator { + const lineDecoder = new LineDecoder(); + + const iter = ReadableStreamToAsyncIterable(readableStream); + for await (const chunk of iter) { + for (const line of lineDecoder.decode(chunk)) { + yield line; + } + } + + for (const line of lineDecoder.flush()) { + yield line; + } + } + + async function* iterator(): AsyncIterator { + if (consumed) { + throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + } + consumed = true; + let done = false; + try { + for await (const line of iterLines()) { + if (done) continue; + if (line) yield JSON.parse(line); + } + done = true; + } catch (e) { + // If the user calls `stream.controller.abort()`, we should exit without throwing. + if (isAbortError(e)) return; + throw e; + } finally { + // If the user `break`s, abort the ongoing request. + if (!done) controller.abort(); + } + } + + return new Stream(iterator, controller); + } + + [Symbol.asyncIterator](): AsyncIterator { + return this.iterator(); + } + + /** + * Splits the stream into two streams which can be + * independently read from at different speeds. + */ + tee(): [Stream, Stream] { + const left: Array>> = []; + const right: Array>> = []; + const iterator = this.iterator(); + + const teeIterator = (queue: Array>>): AsyncIterator => { + return { + next: () => { + if (queue.length === 0) { + const result = iterator.next(); + left.push(result); + right.push(result); + } + return queue.shift()!; + }, + }; + }; + + return [ + new Stream(() => teeIterator(left), this.controller), + new Stream(() => teeIterator(right), this.controller), + ]; + } + + /** + * Converts this stream to a newline-separated ReadableStream of + * JSON stringified values in the stream + * which can be turned back into a Stream with `Stream.fromReadableStream()`. + */ + toReadableStream(): ReadableStream { + const self = this; + let iter: AsyncIterator; + const encoder: { + encode(str: string): Uint8Array; + } = new (globalThis as any).TextEncoder(); + + return makeReadableStream({ + async start() { + iter = self[Symbol.asyncIterator](); + }, + async pull(ctrl: any) { + try { + const { value, done } = await iter.next(); + if (done) return ctrl.close(); + + const bytes = encoder.encode(JSON.stringify(value) + '\n'); + + ctrl.enqueue(bytes); + } catch (err) { + ctrl.error(err); + } + }, + async cancel() { + await iter.return?.(); + }, + }); + } +} + +export async function* _iterSSEMessages( + response: Response, + controller: AbortController, +): AsyncGenerator { + if (!response.body) { + controller.abort(); + if ( + typeof (globalThis as any).navigator !== 'undefined' && + (globalThis as any).navigator.product === 'ReactNative' + ) { + throw new OpenAIError( + `The default react-native fetch implementation does not support streaming. Please use expo/fetch: https://docs.expo.dev/versions/latest/sdk/expo/#expofetch-api`, + ); + } + throw new OpenAIError(`Attempted to iterate over a response with no body`); + } + + const sseDecoder = new SSEDecoder(); + const lineDecoder = new LineDecoder(); + + const iter = ReadableStreamToAsyncIterable(response.body); + for await (const sseChunk of iterSSEChunks(iter)) { + for (const line of lineDecoder.decode(sseChunk)) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } + } + + for (const line of lineDecoder.flush()) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } +} + +/** + * Given an async iterable iterator, iterates over it and yields full + * SSE chunks, i.e. yields when a double new-line is encountered. + */ +async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGenerator { + let data = new Uint8Array(); + + for await (const chunk of iterator) { + if (chunk == null) { + continue; + } + + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new (globalThis as any).TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(data.length + binaryChunk.length); + newData.set(data); + newData.set(binaryChunk, data.length); + data = newData; + + let patternIndex; + while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) { + yield data.slice(0, patternIndex); + data = data.slice(patternIndex); + } + } + + if (data.length > 0) { + yield data; + } +} + +class SSEDecoder { + private data: string[]; + private event: string | null; + private chunks: string[]; + + constructor() { + this.event = null; + this.data = []; + this.chunks = []; + } + + decode(line: string) { + if (line.endsWith('\r')) { + line = line.substring(0, line.length - 1); + } + + if (!line) { + // empty line and we didn't previously encounter any messages + if (!this.event && !this.data.length) return null; + + const sse: ServerSentEvent = { + event: this.event, + data: this.data.join('\n'), + raw: this.chunks, + }; + + this.event = null; + this.data = []; + this.chunks = []; + + return sse; + } + + this.chunks.push(line); + + if (line.startsWith(':')) { + return null; + } + + let [fieldname, _, value] = partition(line, ':'); + + if (value.startsWith(' ')) { + value = value.substring(1); + } + + if (fieldname === 'event') { + this.event = value; + } else if (fieldname === 'data') { + this.data.push(value); + } + + return null; + } +} + +function partition(str: string, delimiter: string): [string, string, string] { + const index = str.indexOf(delimiter); + if (index !== -1) { + return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)]; + } + + return [str, '', '']; +} diff --git a/src/core/uploads.ts b/src/core/uploads.ts new file mode 100644 index 000000000..2882ca6d1 --- /dev/null +++ b/src/core/uploads.ts @@ -0,0 +1,2 @@ +export { type Uploadable } from '../internal/uploads'; +export { toFile, type ToFileInput } from '../internal/to-file'; diff --git a/src/error.ts b/src/error.ts index 74251d51f..fc55f46c0 100644 --- a/src/error.ts +++ b/src/error.ts @@ -1,154 +1,2 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { castToError } from './internal/errors'; - -export class OpenAIError extends Error {} - -export class APIError< - TStatus extends number | undefined = number | undefined, - THeaders extends Headers | undefined = Headers | undefined, - TError extends Object | undefined = Object | undefined, -> extends OpenAIError { - /** HTTP status for the response that caused the error */ - readonly status: TStatus; - /** HTTP headers for the response that caused the error */ - readonly headers: THeaders; - /** JSON body of the response that caused the error */ - readonly error: TError; - - readonly code: string | null | undefined; - readonly param: string | null | undefined; - readonly type: string | undefined; - - readonly requestID: string | null | undefined; - - constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { - super(`${APIError.makeMessage(status, error, message)}`); - this.status = status; - this.headers = headers; - this.requestID = headers?.get('x-request-id'); - this.error = error; - - const data = error as Record; - this.code = data?.['code']; - this.param = data?.['param']; - this.type = data?.['type']; - } - - private static makeMessage(status: number | undefined, error: any, message: string | undefined) { - const msg = - error?.message ? - typeof error.message === 'string' ? - error.message - : JSON.stringify(error.message) - : error ? JSON.stringify(error) - : message; - - if (status && msg) { - return `${status} ${msg}`; - } - if (status) { - return `${status} status code (no body)`; - } - if (msg) { - return msg; - } - return '(no status code or body)'; - } - - static generate( - status: number | undefined, - errorResponse: Object | undefined, - message: string | undefined, - headers: Headers | undefined, - ): APIError { - if (!status || !headers) { - return new APIConnectionError({ message, cause: castToError(errorResponse) }); - } - - const error = (errorResponse as Record)?.['error']; - - if (status === 400) { - return new BadRequestError(status, error, message, headers); - } - - if (status === 401) { - return new AuthenticationError(status, error, message, headers); - } - - if (status === 403) { - return new PermissionDeniedError(status, error, message, headers); - } - - if (status === 404) { - return new NotFoundError(status, error, message, headers); - } - - if (status === 409) { - return new ConflictError(status, error, message, headers); - } - - if (status === 422) { - return new UnprocessableEntityError(status, error, message, headers); - } - - if (status === 429) { - return new RateLimitError(status, error, message, headers); - } - - if (status >= 500) { - return new InternalServerError(status, error, message, headers); - } - - return new APIError(status, error, message, headers); - } -} - -export class APIUserAbortError extends APIError { - constructor({ message }: { message?: string } = {}) { - super(undefined, undefined, message || 'Request was aborted.', undefined); - } -} - -export class APIConnectionError extends APIError { - constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { - super(undefined, undefined, message || 'Connection error.', undefined); - // in some environments the 'cause' property is already declared - // @ts-ignore - if (cause) this.cause = cause; - } -} - -export class APIConnectionTimeoutError extends APIConnectionError { - constructor({ message }: { message?: string } = {}) { - super({ message: message ?? 'Request timed out.' }); - } -} - -export class BadRequestError extends APIError<400, Headers> {} - -export class AuthenticationError extends APIError<401, Headers> {} - -export class PermissionDeniedError extends APIError<403, Headers> {} - -export class NotFoundError extends APIError<404, Headers> {} - -export class ConflictError extends APIError<409, Headers> {} - -export class UnprocessableEntityError extends APIError<422, Headers> {} - -export class RateLimitError extends APIError<429, Headers> {} - -export class InternalServerError extends APIError {} - -export class LengthFinishReasonError extends OpenAIError { - constructor() { - super(`Could not parse response content as the length limit was reached`); - } -} - -export class ContentFilterFinishReasonError extends OpenAIError { - constructor() { - super(`Could not parse response content as the request was rejected by the content filter`); - } -} +/** @deprecated Import from ./core/error instead */ +export * from './core/error'; diff --git a/src/index.ts b/src/index.ts index 9fb1eeb03..69deeec37 100644 --- a/src/index.ts +++ b/src/index.ts @@ -2,10 +2,10 @@ export { OpenAI as default } from './client'; -export { type Uploadable, toFile } from './uploads'; -export { APIPromise } from './api-promise'; +export { type Uploadable, toFile } from './core/uploads'; +export { APIPromise } from './core/api-promise'; export { OpenAI, type ClientOptions } from './client'; -export { PagePromise } from './pagination'; +export { PagePromise } from './core/pagination'; export { OpenAIError, APIError, @@ -20,6 +20,6 @@ export { InternalServerError, PermissionDeniedError, UnprocessableEntityError, -} from './error'; +} from './core/error'; export { AzureOpenAI } from './azure'; diff --git a/src/internal/README.md b/src/internal/README.md new file mode 100644 index 000000000..3ef5a25ba --- /dev/null +++ b/src/internal/README.md @@ -0,0 +1,3 @@ +# `internal` + +The modules in this directory are not importable outside this package and will change between releases. diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 335ad7e30..1af727b34 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -1,4 +1,4 @@ -import { OpenAIError } from '../../error'; +import { OpenAIError } from '../../core/error'; export type Bytes = string | ArrayBuffer | Uint8Array | null | undefined; diff --git a/src/internal/parse.ts b/src/internal/parse.ts index 14147338d..5355c7728 100644 --- a/src/internal/parse.ts +++ b/src/internal/parse.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import type { FinalRequestOptions } from './request-options'; -import { Stream } from '../streaming'; +import { Stream } from '../core/streaming'; import { type OpenAI } from '../client'; import { formatRequestDetails, loggerFor } from './utils/log'; import type { AbstractPage } from '../pagination'; diff --git a/src/internal/request-options.ts b/src/internal/request-options.ts index 1c7a81e0b..78daa4199 100644 --- a/src/internal/request-options.ts +++ b/src/internal/request-options.ts @@ -3,7 +3,7 @@ import { NullableHeaders } from './headers'; import type { BodyInit } from './builtin-types'; -import { Stream } from '../streaming'; +import { Stream } from '../core/streaming'; import type { HTTPMethod, MergedRequestInit } from './types'; import { type HeadersLike } from './headers'; diff --git a/src/internal/utils/base64.ts b/src/internal/utils/base64.ts index 978bacde2..84854e241 100644 --- a/src/internal/utils/base64.ts +++ b/src/internal/utils/base64.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { OpenAIError } from '../../error'; +import { OpenAIError } from '../../core/error'; export const toBase64 = (data: string | Uint8Array | null | undefined): string => { if (!data) return ''; diff --git a/src/internal/utils/path.ts b/src/internal/utils/path.ts index b40291903..0dceb10f1 100644 --- a/src/internal/utils/path.ts +++ b/src/internal/utils/path.ts @@ -1,4 +1,4 @@ -import { OpenAIError } from '../../error'; +import { OpenAIError } from '../../core/error'; /** * Percent-encode everything that isn't safe to have in a path without encoding safe chars. diff --git a/src/internal/utils/values.ts b/src/internal/utils/values.ts index 08255c4b1..aee03b055 100644 --- a/src/internal/utils/values.ts +++ b/src/internal/utils/values.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { OpenAIError } from '../../error'; +import { OpenAIError } from '../../core/error'; // https://url.spec.whatwg.org/#url-scheme-string const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; diff --git a/src/pagination.ts b/src/pagination.ts index fdcc7e131..90bf015e1 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -1,204 +1,2 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { OpenAIError } from './error'; -import { FinalRequestOptions } from './internal/request-options'; -import { defaultParseResponse, WithRequestID } from './internal/parse'; -import { APIPromise } from './api-promise'; -import { type OpenAI } from './client'; -import { type APIResponseProps } from './internal/parse'; -import { maybeObj } from './internal/utils/values'; - -export type PageRequestOptions = Pick; - -export abstract class AbstractPage implements AsyncIterable { - #client: OpenAI; - protected options: FinalRequestOptions; - - protected response: Response; - protected body: unknown; - - constructor(client: OpenAI, response: Response, body: unknown, options: FinalRequestOptions) { - this.#client = client; - this.options = options; - this.response = response; - this.body = body; - } - - abstract nextPageRequestOptions(): PageRequestOptions | null; - - abstract getPaginatedItems(): Item[]; - - hasNextPage(): boolean { - const items = this.getPaginatedItems(); - if (!items.length) return false; - return this.nextPageRequestOptions() != null; - } - - async getNextPage(): Promise { - const nextOptions = this.nextPageRequestOptions(); - if (!nextOptions) { - throw new OpenAIError( - 'No next page expected; please check `.hasNextPage()` before calling `.getNextPage()`.', - ); - } - - return await this.#client.requestAPIList(this.constructor as any, nextOptions); - } - - async *iterPages(): AsyncGenerator { - let page: this = this; - yield page; - while (page.hasNextPage()) { - page = await page.getNextPage(); - yield page; - } - } - - async *[Symbol.asyncIterator](): AsyncGenerator { - for await (const page of this.iterPages()) { - for (const item of page.getPaginatedItems()) { - yield item; - } - } - } -} - -/** - * This subclass of Promise will resolve to an instantiated Page once the request completes. - * - * It also implements AsyncIterable to allow auto-paginating iteration on an unawaited list call, eg: - * - * for await (const item of client.items.list()) { - * console.log(item) - * } - */ -export class PagePromise< - PageClass extends AbstractPage, - Item = ReturnType[number], - > - extends APIPromise - implements AsyncIterable -{ - constructor( - client: OpenAI, - request: Promise, - Page: new (...args: ConstructorParameters) => PageClass, - ) { - super( - client, - request, - async (client, props) => - new Page( - client, - props.response, - await defaultParseResponse(client, props), - props.options, - ) as WithRequestID, - ); - } - - /** - * Allow auto-paginating iteration on an unawaited list call, eg: - * - * for await (const item of client.items.list()) { - * console.log(item) - * } - */ - async *[Symbol.asyncIterator]() { - const page = await this; - for await (const item of page) { - yield item; - } - } -} - -export interface PageResponse { - data: Array; - - object: string; -} - -/** - * Note: no pagination actually occurs yet, this is for forwards-compatibility. - */ -export class Page extends AbstractPage implements PageResponse { - data: Array; - - object: string; - - constructor(client: OpenAI, response: Response, body: PageResponse, options: FinalRequestOptions) { - super(client, response, body, options); - - this.data = body.data || []; - this.object = body.object; - } - - getPaginatedItems(): Item[] { - return this.data ?? []; - } - - nextPageRequestOptions(): PageRequestOptions | null { - return null; - } -} - -export interface CursorPageResponse { - data: Array; - - has_more: boolean; -} - -export interface CursorPageParams { - after?: string; - - limit?: number; -} - -export class CursorPage - extends AbstractPage - implements CursorPageResponse -{ - data: Array; - - has_more: boolean; - - constructor( - client: OpenAI, - response: Response, - body: CursorPageResponse, - options: FinalRequestOptions, - ) { - super(client, response, body, options); - - this.data = body.data || []; - this.has_more = body.has_more || false; - } - - getPaginatedItems(): Item[] { - return this.data ?? []; - } - - override hasNextPage(): boolean { - if (this.has_more === false) { - return false; - } - - return super.hasNextPage(); - } - - nextPageRequestOptions(): PageRequestOptions | null { - const data = this.getPaginatedItems(); - const id = data[data.length - 1]?.id; - if (!id) { - return null; - } - - return { - ...this.options, - query: { - ...maybeObj(this.options.query), - after: id, - }, - }; - } -} +/** @deprecated Import from ./core/pagination instead */ +export * from './core/pagination'; diff --git a/src/resource.ts b/src/resource.ts index ea299e176..363e3516b 100644 --- a/src/resource.ts +++ b/src/resource.ts @@ -1,11 +1,2 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import type { OpenAI } from './client'; - -export class APIResource { - protected _client: OpenAI; - - constructor(client: OpenAI) { - this._client = client; - } -} +/** @deprecated Import from ./core/resource instead */ +export * from './core/resource'; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index 071fe5929..081db7d99 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as SpeechAPI from './speech'; import { Speech, SpeechCreateParams, SpeechModel } from './speech'; import * as TranscriptionsAPI from './transcriptions'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index efd722887..7a795f0e6 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { APIPromise } from '../../api-promise'; +import { APIResource } from '../../core/resource'; +import { APIPromise } from '../../core/api-promise'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 035469b72..f8e3c24dd 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; -import { APIPromise } from '../../api-promise'; -import { Stream } from '../../streaming'; -import { type Uploadable } from '../../uploads'; +import { APIPromise } from '../../core/api-promise'; +import { Stream } from '../../core/streaming'; +import { type Uploadable } from '../../core/uploads'; import { RequestOptions } from '../../internal/request-options'; import { multipartFormRequestOptions } from '../../internal/uploads'; diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 55155d592..5f07a1ddb 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as AudioAPI from './audio'; import * as TranscriptionsAPI from './transcriptions'; -import { APIPromise } from '../../api-promise'; -import { type Uploadable } from '../../uploads'; +import { APIPromise } from '../../core/api-promise'; +import { type Uploadable } from '../../core/uploads'; import { RequestOptions } from '../../internal/request-options'; import { multipartFormRequestOptions } from '../../internal/uploads'; diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 36477851a..b52a27226 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; +import { APIResource } from '../core/resource'; import * as BatchesAPI from './batches'; import * as Shared from './shared'; -import { APIPromise } from '../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../pagination'; +import { APIPromise } from '../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../core/pagination'; import { RequestOptions } from '../internal/request-options'; import { path } from '../internal/utils/path'; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 3753ee9c8..217782aa7 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1,13 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as Shared from '../shared'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; -import { APIPromise } from '../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagination'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 73d8ea5d2..9e0c160dd 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as AssistantsAPI from './assistants'; import * as ChatAPI from './chat/chat'; import { diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 224d94f37..b0fa02919 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as RealtimeAPI from './realtime'; import * as Shared from '../../shared'; import * as SessionsAPI from './sessions'; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index e1c439c1c..3e0f939ef 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { APIPromise } from '../../../api-promise'; +import { APIResource } from '../../../core/resource'; +import { APIPromise } from '../../../core/api-promise'; import { buildHeaders } from '../../../internal/headers'; import { RequestOptions } from '../../../internal/request-options'; diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index f31ee9210..c530c4091 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { APIPromise } from '../../../api-promise'; +import { APIResource } from '../../../core/resource'; +import { APIPromise } from '../../../core/api-promise'; import { buildHeaders } from '../../../internal/headers'; import { RequestOptions } from '../../../internal/request-options'; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 3bba148b9..a4a7377c2 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; +import { APIPromise } from '../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; import { buildHeaders } from '../../../internal/headers'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 401b3ec59..0d786d759 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../../resource'; +import { APIResource } from '../../../../core/resource'; import * as RunsAPI from './runs'; import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; @@ -31,9 +31,9 @@ import { ToolCallDeltaObject, ToolCallsStepDetails, } from './steps'; -import { APIPromise } from '../../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../../pagination'; -import { Stream } from '../../../../streaming'; +import { APIPromise } from '../../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../../core/pagination'; +import { Stream } from '../../../../core/streaming'; import { buildHeaders } from '../../../../internal/headers'; import { RequestOptions } from '../../../../internal/request-options'; import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/AssistantStream'; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 918cdde37..68be569e5 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../../resource'; +import { APIResource } from '../../../../core/resource'; import * as StepsAPI from './steps'; import * as Shared from '../../../shared'; -import { APIPromise } from '../../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../../pagination'; +import { APIPromise } from '../../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../../core/pagination'; import { buildHeaders } from '../../../../internal/headers'; import { RequestOptions } from '../../../../internal/request-options'; import { path } from '../../../../internal/utils/path'; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 22ccd7f02..b06ac458f 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; @@ -65,8 +65,8 @@ import { Runs, RunsPage, } from './runs/runs'; -import { APIPromise } from '../../../api-promise'; -import { Stream } from '../../../streaming'; +import { APIPromise } from '../../../core/api-promise'; +import { Stream } from '../../../core/streaming'; import { buildHeaders } from '../../../internal/headers'; import { RequestOptions } from '../../../internal/request-options'; import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from '../../../lib/AssistantStream'; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 19ddd9c2e..5bf388470 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as Shared from '../shared'; import * as CompletionsAPI from './completions/completions'; import { diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 0aa8d923d..191c6250c 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -1,14 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as CompletionsCompletionsAPI from './completions'; import * as CompletionsAPI from '../../completions'; import * as Shared from '../../shared'; import * as MessagesAPI from './messages'; import { MessageListParams, Messages } from './messages'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; -import { Stream } from '../../../streaming'; +import { APIPromise } from '../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; +import { Stream } from '../../../core/streaming'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index 82478a8ab..0ea05d2f7 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as CompletionsAPI from './completions'; import { ChatCompletionStoreMessagesPage } from './completions'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 742f682fe..4777a359d 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; +import { APIResource } from '../core/resource'; import * as CompletionsAPI from './completions'; import * as CompletionsCompletionsAPI from './chat/completions/completions'; -import { APIPromise } from '../api-promise'; -import { Stream } from '../streaming'; +import { APIPromise } from '../core/api-promise'; +import { Stream } from '../core/streaming'; import { RequestOptions } from '../internal/request-options'; export class Completions extends APIResource { diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index dcd542a1a..b9c48efad 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import { APIPromise } from '../api-promise'; +import { APIResource } from '../core/resource'; +import { APIPromise } from '../core/api-promise'; import { RequestOptions } from '../internal/request-options'; export class Embeddings extends APIResource { diff --git a/src/resources/files.ts b/src/resources/files.ts index 3183b87f8..e0b66b77a 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import { APIPromise } from '../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../pagination'; -import { type Uploadable } from '../uploads'; +import { APIResource } from '../core/resource'; +import { APIPromise } from '../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../core/pagination'; +import { type Uploadable } from '../core/uploads'; import { buildHeaders } from '../internal/headers'; import { RequestOptions } from '../internal/request-options'; import { sleep } from '../internal/utils/sleep'; diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index 593a4a89e..be9eb0f89 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as JobsAPI from './jobs/jobs'; import { FineTuningJob, diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 134715cec..1b014aa75 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; +import { APIResource } from '../../../core/resource'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 4531ec138..79cfe6156 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; +import { APIResource } from '../../../core/resource'; import * as Shared from '../../shared'; import * as CheckpointsAPI from './checkpoints'; import { @@ -9,8 +9,8 @@ import { FineTuningJobCheckpoint, FineTuningJobCheckpointsPage, } from './checkpoints'; -import { APIPromise } from '../../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../../pagination'; +import { APIPromise } from '../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; diff --git a/src/resources/images.ts b/src/resources/images.ts index d8eb5be4c..491255563 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import { APIPromise } from '../api-promise'; -import { type Uploadable } from '../uploads'; +import { APIResource } from '../core/resource'; +import { APIPromise } from '../core/api-promise'; +import { type Uploadable } from '../core/uploads'; import { RequestOptions } from '../internal/request-options'; import { multipartFormRequestOptions } from '../internal/uploads'; diff --git a/src/resources/models.ts b/src/resources/models.ts index 69ba58279..25a730ebf 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import { APIPromise } from '../api-promise'; -import { Page, PagePromise } from '../pagination'; +import { APIResource } from '../core/resource'; +import { APIPromise } from '../core/api-promise'; +import { Page, PagePromise } from '../core/pagination'; import { RequestOptions } from '../internal/request-options'; import { path } from '../internal/utils/path'; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index 478850e5e..2792e0f30 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import { APIPromise } from '../api-promise'; +import { APIResource } from '../core/resource'; +import { APIPromise } from '../core/api-promise'; import { RequestOptions } from '../internal/request-options'; export class Moderations extends APIResource { diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 6bbef6366..b49f30b81 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as ResponsesAPI from './responses'; import { ResponseItemsPage } from './responses'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagination'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 8fb697c93..7077aeb9a 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -1,13 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as ResponsesAPI from './responses'; import * as Shared from '../shared'; import * as InputItemsAPI from './input-items'; import { InputItemListParams, InputItems, ResponseItemList } from './input-items'; -import { APIPromise } from '../../api-promise'; -import { CursorPage } from '../../pagination'; -import { Stream } from '../../streaming'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage } from '../../core/pagination'; +import { Stream } from '../../core/streaming'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/uploads/parts.ts b/src/resources/uploads/parts.ts index 18568c3e4..4e3509f9f 100644 --- a/src/resources/uploads/parts.ts +++ b/src/resources/uploads/parts.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { APIPromise } from '../../api-promise'; -import { type Uploadable } from '../../uploads'; +import { APIResource } from '../../core/resource'; +import { APIPromise } from '../../core/api-promise'; +import { type Uploadable } from '../../core/uploads'; import { RequestOptions } from '../../internal/request-options'; import { multipartFormRequestOptions } from '../../internal/uploads'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 96ed91f6a..e4ac1752a 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as FilesAPI from '../files'; import * as PartsAPI from './parts'; import { PartCreateParams, Parts, UploadPart } from './parts'; -import { APIPromise } from '../../api-promise'; +import { APIPromise } from '../../core/api-promise'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index 81b285b63..7471b5d43 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { APIPromise } from '../../api-promise'; -import { CursorPage, type CursorPageParams, PagePromise } from '../../pagination'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagination'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts index 5aeef9653..3b20a6a95 100644 --- a/src/resources/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as VectorStoresAPI from './vector-stores'; -import { APIPromise } from '../../api-promise'; -import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../pagination'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../core/pagination'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index ef942cded..de08d0494 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; +import { APIResource } from '../../core/resource'; import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; import { @@ -26,8 +26,8 @@ import { VectorStoreFileDeleted, VectorStoreFilesPage, } from './files'; -import { APIPromise } from '../../api-promise'; -import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../pagination'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage, type CursorPageParams, Page, PagePromise } from '../../core/pagination'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; diff --git a/src/streaming.ts b/src/streaming.ts index 5b9c19cfa..9e6da1063 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,336 +1,2 @@ -import { OpenAIError } from './error'; -import { type ReadableStream } from './internal/shim-types'; -import { makeReadableStream } from './internal/shims'; -import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; -import { ReadableStreamToAsyncIterable } from './internal/shims'; -import { isAbortError } from './internal/errors'; - -import { APIError } from './error'; - -type Bytes = string | ArrayBuffer | Uint8Array | null | undefined; - -export type ServerSentEvent = { - event: string | null; - data: string; - raw: string[]; -}; - -export class Stream implements AsyncIterable { - controller: AbortController; - - constructor( - private iterator: () => AsyncIterator, - controller: AbortController, - ) { - this.controller = controller; - } - - static fromSSEResponse(response: Response, controller: AbortController): Stream { - let consumed = false; - - async function* iterator(): AsyncIterator { - if (consumed) { - throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); - } - consumed = true; - let done = false; - try { - for await (const sse of _iterSSEMessages(response, controller)) { - if (done) continue; - - if (sse.data.startsWith('[DONE]')) { - done = true; - continue; - } - - if (sse.event === null) { - let data; - - try { - data = JSON.parse(sse.data); - } catch (e) { - console.error(`Could not parse message into JSON:`, sse.data); - console.error(`From chunk:`, sse.raw); - throw e; - } - - if (data && data.error) { - throw new APIError(undefined, data.error, undefined, response.headers); - } - - yield data; - } else { - let data; - try { - data = JSON.parse(sse.data); - } catch (e) { - console.error(`Could not parse message into JSON:`, sse.data); - console.error(`From chunk:`, sse.raw); - throw e; - } - // TODO: Is this where the error should be thrown? - if (sse.event == 'error') { - throw new APIError(undefined, data.error, data.message, undefined); - } - yield { event: sse.event, data: data } as any; - } - } - done = true; - } catch (e) { - // If the user calls `stream.controller.abort()`, we should exit without throwing. - if (isAbortError(e)) return; - throw e; - } finally { - // If the user `break`s, abort the ongoing request. - if (!done) controller.abort(); - } - } - - return new Stream(iterator, controller); - } - - /** - * Generates a Stream from a newline-separated ReadableStream - * where each item is a JSON value. - */ - static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { - let consumed = false; - - async function* iterLines(): AsyncGenerator { - const lineDecoder = new LineDecoder(); - - const iter = ReadableStreamToAsyncIterable(readableStream); - for await (const chunk of iter) { - for (const line of lineDecoder.decode(chunk)) { - yield line; - } - } - - for (const line of lineDecoder.flush()) { - yield line; - } - } - - async function* iterator(): AsyncIterator { - if (consumed) { - throw new OpenAIError('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); - } - consumed = true; - let done = false; - try { - for await (const line of iterLines()) { - if (done) continue; - if (line) yield JSON.parse(line); - } - done = true; - } catch (e) { - // If the user calls `stream.controller.abort()`, we should exit without throwing. - if (isAbortError(e)) return; - throw e; - } finally { - // If the user `break`s, abort the ongoing request. - if (!done) controller.abort(); - } - } - - return new Stream(iterator, controller); - } - - [Symbol.asyncIterator](): AsyncIterator { - return this.iterator(); - } - - /** - * Splits the stream into two streams which can be - * independently read from at different speeds. - */ - tee(): [Stream, Stream] { - const left: Array>> = []; - const right: Array>> = []; - const iterator = this.iterator(); - - const teeIterator = (queue: Array>>): AsyncIterator => { - return { - next: () => { - if (queue.length === 0) { - const result = iterator.next(); - left.push(result); - right.push(result); - } - return queue.shift()!; - }, - }; - }; - - return [ - new Stream(() => teeIterator(left), this.controller), - new Stream(() => teeIterator(right), this.controller), - ]; - } - - /** - * Converts this stream to a newline-separated ReadableStream of - * JSON stringified values in the stream - * which can be turned back into a Stream with `Stream.fromReadableStream()`. - */ - toReadableStream(): ReadableStream { - const self = this; - let iter: AsyncIterator; - const encoder: { - encode(str: string): Uint8Array; - } = new (globalThis as any).TextEncoder(); - - return makeReadableStream({ - async start() { - iter = self[Symbol.asyncIterator](); - }, - async pull(ctrl: any) { - try { - const { value, done } = await iter.next(); - if (done) return ctrl.close(); - - const bytes = encoder.encode(JSON.stringify(value) + '\n'); - - ctrl.enqueue(bytes); - } catch (err) { - ctrl.error(err); - } - }, - async cancel() { - await iter.return?.(); - }, - }); - } -} - -export async function* _iterSSEMessages( - response: Response, - controller: AbortController, -): AsyncGenerator { - if (!response.body) { - controller.abort(); - if ( - typeof (globalThis as any).navigator !== 'undefined' && - (globalThis as any).navigator.product === 'ReactNative' - ) { - throw new OpenAIError( - `The default react-native fetch implementation does not support streaming. Please use expo/fetch: https://docs.expo.dev/versions/latest/sdk/expo/#expofetch-api`, - ); - } - throw new OpenAIError(`Attempted to iterate over a response with no body`); - } - - const sseDecoder = new SSEDecoder(); - const lineDecoder = new LineDecoder(); - - const iter = ReadableStreamToAsyncIterable(response.body); - for await (const sseChunk of iterSSEChunks(iter)) { - for (const line of lineDecoder.decode(sseChunk)) { - const sse = sseDecoder.decode(line); - if (sse) yield sse; - } - } - - for (const line of lineDecoder.flush()) { - const sse = sseDecoder.decode(line); - if (sse) yield sse; - } -} - -/** - * Given an async iterable iterator, iterates over it and yields full - * SSE chunks, i.e. yields when a double new-line is encountered. - */ -async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGenerator { - let data = new Uint8Array(); - - for await (const chunk of iterator) { - if (chunk == null) { - continue; - } - - const binaryChunk = - chunk instanceof ArrayBuffer ? new Uint8Array(chunk) - : typeof chunk === 'string' ? new (globalThis as any).TextEncoder().encode(chunk) - : chunk; - - let newData = new Uint8Array(data.length + binaryChunk.length); - newData.set(data); - newData.set(binaryChunk, data.length); - data = newData; - - let patternIndex; - while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) { - yield data.slice(0, patternIndex); - data = data.slice(patternIndex); - } - } - - if (data.length > 0) { - yield data; - } -} - -class SSEDecoder { - private data: string[]; - private event: string | null; - private chunks: string[]; - - constructor() { - this.event = null; - this.data = []; - this.chunks = []; - } - - decode(line: string) { - if (line.endsWith('\r')) { - line = line.substring(0, line.length - 1); - } - - if (!line) { - // empty line and we didn't previously encounter any messages - if (!this.event && !this.data.length) return null; - - const sse: ServerSentEvent = { - event: this.event, - data: this.data.join('\n'), - raw: this.chunks, - }; - - this.event = null; - this.data = []; - this.chunks = []; - - return sse; - } - - this.chunks.push(line); - - if (line.startsWith(':')) { - return null; - } - - let [fieldname, _, value] = partition(line, ':'); - - if (value.startsWith(' ')) { - value = value.substring(1); - } - - if (fieldname === 'event') { - this.event = value; - } else if (fieldname === 'data') { - this.data.push(value); - } - - return null; - } -} - -function partition(str: string, delimiter: string): [string, string, string] { - const index = str.indexOf(delimiter); - if (index !== -1) { - return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)]; - } - - return [str, '', '']; -} +/** @deprecated Import from ./core/streaming instead */ +export * from './core/streaming'; diff --git a/src/uploads.ts b/src/uploads.ts index 79d3073ea..b2ef64710 100644 --- a/src/uploads.ts +++ b/src/uploads.ts @@ -1,2 +1,2 @@ -export { type Uploadable } from './internal/uploads'; -export { toFile, type ToFileInput } from './internal/to-file'; +/** @deprecated Import from ./core/uploads instead */ +export * from './core/uploads'; diff --git a/tests/form.test.ts b/tests/form.test.ts index 5ca5b75f2..08cae4da0 100644 --- a/tests/form.test.ts +++ b/tests/form.test.ts @@ -1,5 +1,5 @@ import { multipartFormRequestOptions, createForm } from 'openai/internal/uploads'; -import { toFile } from 'openai/uploads'; +import { toFile } from 'openai/core/uploads'; describe('form data validation', () => { test('valid values do not error', async () => { diff --git a/tests/index.test.ts b/tests/index.test.ts index 07c99ba87..e1989722a 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIPromise } from 'openai/api-promise'; +import { APIPromise } from 'openai/core/api-promise'; import util from 'node:util'; import OpenAI from 'openai'; diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 857cf4620..a1abbaba4 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,5 +1,5 @@ import assert from 'assert'; -import { _iterSSEMessages } from 'openai/streaming'; +import { _iterSSEMessages } from 'openai/core/streaming'; import { ReadableStreamFrom } from 'openai/internal/shims'; describe('streaming decoding', () => { diff --git a/tests/uploads.test.ts b/tests/uploads.test.ts index 508fce58f..902a788a4 100644 --- a/tests/uploads.test.ts +++ b/tests/uploads.test.ts @@ -1,6 +1,6 @@ import fs from 'fs'; import type { ResponseLike } from 'openai/internal/to-file'; -import { toFile } from 'openai/uploads'; +import { toFile } from 'openai/core/uploads'; import { File } from 'node:buffer'; class MyClass { @@ -97,7 +97,7 @@ describe('missing File error message', () => { }); test('is thrown', async () => { - const uploads = await import('openai/uploads'); + const uploads = await import('openai/core/uploads'); await expect( uploads.toFile(mockResponse({ url: 'https://example.com/my/audio.mp3' })), ).rejects.toMatchInlineSnapshot( From c3b306abc656143ff66e23ce8a8da9eba3bfae3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 20:32:36 +0000 Subject: [PATCH 29/73] chore(api): updates to supported Voice IDs --- .stats.yml | 4 +- src/resources/audio/speech.ts | 18 +++++- src/resources/beta/realtime/realtime.ts | 61 +++++++++++++++---- src/resources/beta/realtime/sessions.ts | 47 ++++++++++++-- .../beta/realtime/transcription-sessions.ts | 4 +- src/resources/chat/completions/completions.ts | 14 ++++- src/resources/responses/input-items.ts | 6 ++ src/resources/responses/responses.ts | 20 +++--- tests/api-resources/audio/speech.test.ts | 2 +- .../chat/completions/completions.test.ts | 2 +- .../responses/input-items.test.ts | 2 +- 11 files changed, 143 insertions(+), 37 deletions(-) diff --git a/.stats.yml b/.stats.yml index fe9320429..4d1276a5e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml -openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e config_hash: d36e491b0afc4f79e3afad4b3c9bec70 diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 7a795f0e6..451234e6b 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -35,11 +35,23 @@ export interface SpeechCreateParams { /** * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - * `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - * voices are available in the + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. Previews of the voices are available in the * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; /** * Control the voice of your generated audio with additional instructions. Does not diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index b0fa02919..f4e1becfc 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1005,9 +1005,22 @@ export interface RealtimeResponse { /** * The voice the model used to respond. Current voice options are `alloy`, `ash`, - * `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** @@ -1620,9 +1633,22 @@ export namespace ResponseCreateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Response { @@ -2078,9 +2104,22 @@ export namespace SessionUpdateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -2376,7 +2415,7 @@ export namespace TranscriptionSessionUpdate { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -2390,7 +2429,7 @@ export namespace TranscriptionSessionUpdate { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 3e0f939ef..324421197 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -141,7 +141,19 @@ export interface Session { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -363,7 +375,19 @@ export interface SessionCreateResponse { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateResponse { @@ -563,9 +587,22 @@ export interface SessionCreateParams { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateParams { diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index c530c4091..8040c5056 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -254,7 +254,7 @@ export namespace TranscriptionSessionCreateParams { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -268,7 +268,7 @@ export namespace TranscriptionSessionCreateParams { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 191c6250c..720e8a583 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -314,7 +314,19 @@ export interface ChatCompletionAudioParam { * The voice the model uses to respond. Supported voices are `alloy`, `ash`, * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ - voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index b49f30b81..c04fbe283 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -60,6 +60,12 @@ export interface InputItemListParams extends CursorPageParams { */ before?: string; + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; + /** * The order to return the input items in. Default is `asc`. * diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 7077aeb9a..83835a0f7 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -233,8 +233,8 @@ export interface Response { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions: string | null; @@ -1284,6 +1284,12 @@ export type ResponseFormatTextConfig = * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). */ export interface ResponseFormatTextJSONSchemaConfig { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string; + /** * The schema for the response format, described as a JSON Schema object. Learn how * to build JSON schemas [here](https://json-schema.org/). @@ -1301,12 +1307,6 @@ export interface ResponseFormatTextJSONSchemaConfig { */ description?: string; - /** - * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - * and dashes, with a maximum length of 64. - */ - name?: string; - /** * Whether to enable strict schema adherence when generating the output. If set to * true, the model will always follow the exact schema defined in the `schema` @@ -2626,8 +2626,8 @@ export interface ResponseCreateParamsBase { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions?: string | null; diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index cbec6cfac..191c6a313 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -13,7 +13,7 @@ describe('resource speech', () => { const response = await client.audio.speech.create({ input: 'input', model: 'string', - voice: 'alloy', + voice: 'ash', instructions: 'instructions', response_format: 'mp3', speed: 0.25, diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index f95953719..b593ab4eb 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -26,7 +26,7 @@ describe('resource completions', () => { const response = await client.chat.completions.create({ messages: [{ content: 'string', role: 'developer', name: 'name' }], model: 'gpt-4o', - audio: { format: 'wav', voice: 'alloy' }, + audio: { format: 'wav', voice: 'ash' }, frequency_penalty: -2, function_call: 'none', functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts index abc8185f6..a96128939 100644 --- a/tests/api-resources/responses/input-items.test.ts +++ b/tests/api-resources/responses/input-items.test.ts @@ -24,7 +24,7 @@ describe('resource inputItems', () => { await expect( client.responses.inputItems.list( 'response_id', - { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { after: 'after', before: 'before', include: ['file_search_call.results'], limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); From 63b99b3fad48157ff28f57e4c2498034b8dad418 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 16:07:22 +0000 Subject: [PATCH 30/73] feat(api): add `get /chat/completions` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4d1276a5e..1e1104a06 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: d36e491b0afc4f79e3afad4b3c9bec70 +config_hash: 9351ea829c2b41da3b48a38c934c92ee From d25e995433e48d3f78684e1cc70303ddc23510bc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 19:41:58 +0000 Subject: [PATCH 31/73] feat(api): add `get /responses/{response_id}/input_items` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1e1104a06..f6a90d243 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 9351ea829c2b41da3b48a38c934c92ee +config_hash: e25e31d8446b6bc0e3ef7103b6993cce From 35556bb5b2ee51dc1588e66d25bb4ed61160b5a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 00:26:26 +0000 Subject: [PATCH 32/73] chore: Remove deprecated/unused remote spec feature --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index f6a90d243..2ccfd3411 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: e25e31d8446b6bc0e3ef7103b6993cce +config_hash: 2daae06cc598821ccf87201de0861e40 From 15a6a099334455d62baa8a4f510ceeac5cf2d83c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:12:29 +0000 Subject: [PATCH 33/73] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2ccfd3411..71ac95541 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 2daae06cc598821ccf87201de0861e40 +config_hash: 31a12443afeef2933b34e2de23c40954 From 9bfff6efa2725003207608ab9572febcbc3dd4e5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:18:34 +0000 Subject: [PATCH 34/73] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 71ac95541..baad2afc1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 31a12443afeef2933b34e2de23c40954 +config_hash: 178ba1bfb1237bf6b94abb3408072aa7 From 62b1765a619f60701fbbf35209ea10187ae9e39d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 13:33:34 +0000 Subject: [PATCH 35/73] fix(client): send `X-Stainless-Timeout` in seconds --- src/client.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.ts b/src/client.ts index 41f128ebe..002151aca 100644 --- a/src/client.ts +++ b/src/client.ts @@ -770,7 +770,7 @@ export class OpenAI { Accept: 'application/json', 'User-Agent': this.getUserAgent(), 'X-Stainless-Retry-Count': String(retryCount), - ...(options.timeout ? { 'X-Stainless-Timeout': String(options.timeout) } : {}), + ...(options.timeout ? { 'X-Stainless-Timeout': String(Math.trunc(options.timeout / 1000)) } : {}), ...getPlatformHeaders(), 'OpenAI-Organization': this.organization, 'OpenAI-Project': this.project, From ae2d5b1c2c4b574bcb004f56a37c146b3ecc6aba Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 14:49:57 +0000 Subject: [PATCH 36/73] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index baad2afc1..675edb075 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 178ba1bfb1237bf6b94abb3408072aa7 +config_hash: 578c5bff4208d560c0c280f13324409f From 4af1450aba07e6769f770d5b9b029e61219119f4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 20:55:22 +0000 Subject: [PATCH 37/73] chore(internal): add aliases for Record and Array --- src/internal/builtin-types.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/internal/builtin-types.ts b/src/internal/builtin-types.ts index b2e598a81..c23d3bded 100644 --- a/src/internal/builtin-types.ts +++ b/src/internal/builtin-types.ts @@ -39,9 +39,23 @@ type _HeadersInit = RequestInit['headers']; */ type _BodyInit = RequestInit['body']; +/** + * An alias to the builtin `Array` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Array = Array; + +/** + * An alias to the builtin `Record` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Record = Record; + export type { + _Array as Array, _BodyInit as BodyInit, _HeadersInit as HeadersInit, + _Record as Record, _RequestInfo as RequestInfo, _RequestInit as RequestInit, _Response as Response, From 42a53e4a96edddc67ee59bd3fc0b586acd975fd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:23:00 +0000 Subject: [PATCH 38/73] fix(api): improve type resolution when importing as a package --- packages/mcp-server/src/tools.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts new file mode 100644 index 000000000..7e516de7c --- /dev/null +++ b/packages/mcp-server/src/tools.ts @@ -0,0 +1 @@ +export * from './tools/index'; From 152206c66d0a64543fc37c2577af8f780b044f4e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:37:49 +0000 Subject: [PATCH 39/73] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 675edb075..aebb90c8c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 578c5bff4208d560c0c280f13324409f +config_hash: bcd2cacdcb9fae9938f273cd167f613c From 1aa23080cb35b2e353fbb671c0044472aca31833 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 19:55:08 +0000 Subject: [PATCH 40/73] chore(docs): improve migration doc --- MIGRATION.md | 284 +++++++++++++++++++++++++-------------------------- 1 file changed, 141 insertions(+), 143 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index c5e26d3c2..0ee18b224 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -102,10 +102,10 @@ For example, for a method that would call an endpoint at `/v1/parents/{parent_id ```ts // Before -client.parents.children.create('p_123', 'c_456'); +client.parents.children.retrieve('p_123', 'c_456'); // After -client.example.create('c_456', { parent_id: 'p_123' }); +client.parents.children.retrieve('c_456', { parent_id: 'p_123' }); ``` This affects the following methods: @@ -136,73 +136,11 @@ For example: ```diff - client.example.retrieve(encodeURIComponent('string/with/slash')) -+ client.example.retrieve('string/with/slash') // renders example/string%2Fwith%2Fslash ++ client.example.retrieve('string/with/slash') // retrieves /example/string%2Fwith%2Fslash ``` Previously without the `encodeURIComponent()` call we would have used the path `/example/string/with/slash`; now we'll use `/example/string%2Fwith%2Fslash`. -### Removed `httpAgent` in favor of `fetchOptions` - -The `httpAgent` client option has been removed in favor of a [platform-specific `fetchOptions` property](https://github.com/stainless-sdks/openai-typescript#fetch-options). -This change was made as `httpAgent` relied on `node:http` agents which are not supported by any runtime's builtin fetch implementation. - -If you were using `httpAgent` for proxy support, check out the [new proxy documentation](https://github.com/stainless-sdks/openai-typescript#configuring-proxies). - -Before: - -```ts -import OpenAI from 'openai'; -import http from 'http'; -import { HttpsProxyAgent } from 'https-proxy-agent'; - -// Configure the default for all requests: -const client = new OpenAI({ - httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), -}); -``` - -After: - -```ts -import OpenAI from 'openai'; -import * as undici from 'undici'; - -const proxyAgent = new undici.ProxyAgent(process.env.PROXY_URL); -const client = new OpenAI({ - fetchOptions: { - dispatcher: proxyAgent, - }, -}); -``` - -### HTTP method naming - -Some methods could not be named intuitively due to an internal naming conflict. This has been resolved and the methods are now correctly named. - -```ts -// Before -client.chat.completions.del(); -client.files.del(); -client.models.del(); -client.vectorStores.del(); -client.vectorStores.files.del(); -client.beta.assistants.del(); -client.beta.threads.del(); -client.beta.threads.messages.del(); -client.responses.del(); - -// After -client.chat.completions.delete(); -client.files.delete(); -client.models.delete(); -client.vectorStores.delete(); -client.vectorStores.files.delete(); -client.beta.assistants.delete(); -client.beta.threads.delete(); -client.beta.threads.messages.delete(); -client.responses.delete(); -``` - ### Removed request options overloads When making requests with no required body, query or header parameters, you must now explicitly pass `null`, `undefined` or an empty object `{}` to the params argument in order to customise request options. @@ -234,106 +172,69 @@ This affects the following methods: - `client.responses.retrieve()` - `client.responses.inputItems.list()` -### Pagination changes - -Note that the `for await` syntax is _not_ affected. This still works as-is: - -```ts -// Automatically fetches more pages as needed. -for await (const fineTuningJob of client.fineTuning.jobs.list()) { - console.log(fineTuningJob); -} -``` - -#### Simplified interface +### HTTP method naming -The pagination interface has been simplified: +Previously some methods could not be named intuitively due to an internal naming conflict. This has been fixed and the affected methods are now correctly named. ```ts // Before -page.nextPageParams(); -page.nextPageInfo(); -// Required manually handling { url } | { params } type +client.chat.completions.del(); +client.files.del(); +client.models.del(); +client.vectorStores.del(); +client.vectorStores.files.del(); +client.beta.assistants.del(); +client.beta.threads.del(); +client.beta.threads.messages.del(); +client.responses.del(); // After -page.nextPageRequestOptions(); +client.chat.completions.delete(); +client.files.delete(); +client.models.delete(); +client.vectorStores.delete(); +client.vectorStores.files.delete(); +client.beta.assistants.delete(); +client.beta.threads.delete(); +client.beta.threads.messages.delete(); +client.responses.delete(); ``` -#### Removed unnecessary classes - -Page classes for individual methods are now type aliases: - -```ts -// Before -export class FineTuningJobsPage extends CursorPage {} - -// After -export type FineTuningJobsPage = CursorPage; -``` +### Removed `httpAgent` in favor of `fetchOptions` -If you were importing these classes at runtime, you'll need to switch to importing the base class or only import them at the type-level. +The `httpAgent` client option has been removed in favor of a [platform-specific `fetchOptions` property](https://github.com/stainless-sdks/openai-typescript#fetch-options). +This change was made as `httpAgent` relied on `node:http` agents which are not supported by any runtime's builtin fetch implementation. -### File handling +If you were using `httpAgent` for proxy support, check out the [new proxy documentation](https://github.com/stainless-sdks/openai-typescript#configuring-proxies). -The deprecated `fileFromPath` helper has been removed in favor of native Node.js streams: +Before: ```ts -// Before -OpenAI.fileFromPath('path/to/file'); +import OpenAI from 'openai'; +import http from 'http'; +import { HttpsProxyAgent } from 'https-proxy-agent'; -// After -import fs from 'fs'; -fs.createReadStream('path/to/file'); +// Configure the default for all requests: +const client = new OpenAI({ + httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), +}); ``` -Note that this function previously only worked on Node.js. If you're using Bun, you can use [`Bun.file`](https://bun.sh/docs/api/file-io) instead. - -### Shims removal - -Previously you could configure the types that the SDK used like this: +After: ```ts -// Tell TypeScript and the package to use the global Web fetch instead of node-fetch. -import 'openai/shims/web'; import OpenAI from 'openai'; -``` - -The `openai/shims` imports have been removed. Your global types must now be [correctly configured](#minimum-types-requirements). - -### `openai/src` directory removed - -Previously IDEs may have auto-completed imports from the `openai/src` directory, however this -directory was only included for an improved go-to-definition experience and should not have been used at runtime. - -If you have any `openai/src` imports, you must replace it with `openai`. - -```ts -// Before -import OpenAI from 'openai/src'; +import * as undici from 'undici'; -// After -import OpenAI from 'openai'; +const proxyAgent = new undici.ProxyAgent(process.env.PROXY_URL); +const client = new OpenAI({ + fetchOptions: { + dispatcher: proxyAgent, + }, +}); ``` -### Headers - -The `headers` property on `APIError` objects is now an instance of the Web [Headers](https://developer.mozilla.org/en-US/docs/Web/API/Headers) class. It was previously just `Record`. - -### Removed exports - -#### Resource classes - -If you were importing resource classes from the root package then you must now import them from the file they are defined in. -This was never valid at the type level and only worked in CommonJS files. - -```typescript -// Before -const { Completions } = require('openai'); - -// After -const { OpenAI } = require('openai'); -OpenAI.Completions; // or import directly from openai/resources/completions -``` +### Changed exports #### Refactor of `openai/core`, `error`, `pagination`, `resource`, `streaming` and `uploads` @@ -359,6 +260,20 @@ import 'openai/core/uploads'; If you were relying on anything that was only exported from `openai/core` and is also not accessible anywhere else, please open an issue and we'll consider adding it to the public API. +#### Resource classes + +Previously under certain circumstances it was possible to import resource classes like `Completions` directly from the root of the package. This was never valid at the type level and only worked in CommonJS files. +Now you must always either reference them as static class properties or import them directly from the files in which they are defined. + +```typescript +// Before +const { Completions } = require('openai'); + +// After +const { OpenAI } = require('openai'); +OpenAI.Completions; // or import directly from openai/resources/completions +``` + #### Cleaned up `uploads` exports As part of the `core` refactor, `openai/uploads` was moved to `openai/core/uploads` @@ -395,3 +310,86 @@ import { APIClient } from 'openai/core'; // After import { OpenAI } from 'openai'; ``` + +### File handling + +The deprecated `fileFromPath` helper has been removed in favor of native Node.js streams: + +```ts +// Before +OpenAI.fileFromPath('path/to/file'); + +// After +import fs from 'fs'; +fs.createReadStream('path/to/file'); +``` + +Note that this function previously only worked on Node.js. If you're using Bun, you can use [`Bun.file`](https://bun.sh/docs/api/file-io) instead. + +### Shims removal + +Previously you could configure the types that the SDK used like this: + +```ts +// Tell TypeScript and the package to use the global Web fetch instead of node-fetch. +import 'openai/shims/web'; +import OpenAI from 'openai'; +``` + +The `openai/shims` imports have been removed. Your global types must now be [correctly configured](#minimum-types-requirements). + +### Pagination changes + +The `for await` syntax **is not affected**. This still works as-is: + +```ts +// Automatically fetches more pages as needed. +for await (const fineTuningJob of client.fineTuning.jobs.list()) { + console.log(fineTuningJob); +} +``` + +The interface for manually paginating through list results has been simplified: + +```ts +// Before +page.nextPageParams(); +page.nextPageInfo(); +// Required manually handling { url } | { params } type + +// After +page.nextPageRequestOptions(); +``` + +#### Removed unnecessary classes + +Page classes for individual methods are now type aliases: + +```ts +// Before +export class FineTuningJobsPage extends CursorPage {} + +// After +export type FineTuningJobsPage = CursorPage; +``` + +If you were importing these classes at runtime, you'll need to switch to importing the base class or only import them at the type-level. + +### `openai/src` directory removed + +Previously IDEs may have auto-completed imports from the `openai/src` directory, however this +directory was only included for an improved go-to-definition experience and should not have been used at runtime. + +If you have any `openai/src/*` imports, you will need to replace them with `openai/*`. + +```ts +// Before +import OpenAI from 'openai/src'; + +// After +import OpenAI from 'openai'; +``` + +### Headers + +The `headers` property on `APIError` objects is now an instance of the Web [Headers](https://developer.mozilla.org/en-US/docs/Web/API/Headers) class. It was previously just `Record`. From 3c2608c77087cb9ed592413e4951896b2d40838c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 21:09:13 +0000 Subject: [PATCH 41/73] fix(mcp): remove unused tools.ts --- packages/mcp-server/src/tools.ts | 1 - 1 file changed, 1 deletion(-) delete mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts deleted file mode 100644 index 7e516de7c..000000000 --- a/packages/mcp-server/src/tools.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './tools/index'; From 63f10a04594df507796aff5ff8f7ff8e95183c25 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 12:35:25 +0000 Subject: [PATCH 42/73] fix(client): send all configured auth headers --- src/client.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.ts b/src/client.ts index 002151aca..5ff149e9a 100644 --- a/src/client.ts +++ b/src/client.ts @@ -332,8 +332,8 @@ export class OpenAI { return; } - protected authHeaders(opts: FinalRequestOptions): Headers | undefined { - return new Headers({ Authorization: `Bearer ${this.apiKey}` }); + protected authHeaders(opts: FinalRequestOptions): NullableHeaders | undefined { + return buildHeaders([{ Authorization: `Bearer ${this.apiKey}` }]); } protected stringifyQuery(query: Record): string { From e3f58b4cd59ce4671b92d91147bd07c98eb6294b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:25:51 +0000 Subject: [PATCH 43/73] chore(tests): improve enum examples --- tests/api-resources/images.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 9f00da3b3..fab30e32d 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -27,7 +27,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -54,7 +54,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -77,7 +77,7 @@ describe('resource images', () => { n: 1, quality: 'standard', response_format: 'url', - size: '256x256', + size: '1024x1024', style: 'vivid', user: 'user-1234', }); From fef5ac36171bf6abbcc91a3427dc93a69937cc5b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 18:49:44 +0000 Subject: [PATCH 44/73] feat(api): Add evalapi to sdk Adding the evalsapi to the sdk. --- .stats.yml | 8 +- MIGRATION.md | 20 +- api.md | 72 ++ src/client.ts | 37 + src/resources/evals.ts | 3 + src/resources/evals/evals.ts | 776 ++++++++++++ src/resources/evals/index.ts | 36 + src/resources/evals/runs.ts | 3 + src/resources/evals/runs/index.ts | 27 + src/resources/evals/runs/output-items.ts | 413 +++++++ src/resources/evals/runs/runs.ts | 1073 +++++++++++++++++ src/resources/fine-tuning/checkpoints.ts | 3 + .../fine-tuning/checkpoints/checkpoints.ts | 31 + .../fine-tuning/checkpoints/index.ts | 12 + .../fine-tuning/checkpoints/permissions.ts | 183 +++ src/resources/fine-tuning/fine-tuning.ts | 6 + src/resources/fine-tuning/index.ts | 1 + src/resources/index.ts | 17 + tests/api-resources/evals/evals.test.ts | 395 ++++++ .../evals/runs/output-items.test.ts | 52 + tests/api-resources/evals/runs/runs.test.ts | 101 ++ .../checkpoints/permissions.test.ts | 66 + 22 files changed, 3330 insertions(+), 5 deletions(-) create mode 100644 src/resources/evals.ts create mode 100644 src/resources/evals/evals.ts create mode 100644 src/resources/evals/index.ts create mode 100644 src/resources/evals/runs.ts create mode 100644 src/resources/evals/runs/index.ts create mode 100644 src/resources/evals/runs/output-items.ts create mode 100644 src/resources/evals/runs/runs.ts create mode 100644 src/resources/fine-tuning/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/index.ts create mode 100644 src/resources/fine-tuning/checkpoints/permissions.ts create mode 100644 tests/api-resources/evals/evals.test.ts create mode 100644 tests/api-resources/evals/runs/output-items.test.ts create mode 100644 tests/api-resources/evals/runs/runs.test.ts create mode 100644 tests/api-resources/fine-tuning/checkpoints/permissions.test.ts diff --git a/.stats.yml b/.stats.yml index aebb90c8c..ebe07c137 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml -openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: bcd2cacdcb9fae9938f273cd167f613c +configured_endpoints: 97 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +config_hash: ef19d36c307306f14f2e1cd5c834a151 diff --git a/MIGRATION.md b/MIGRATION.md index 0ee18b224..448d99b6e 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -108,7 +108,9 @@ client.parents.children.retrieve('p_123', 'c_456'); client.parents.children.retrieve('c_456', { parent_id: 'p_123' }); ``` -This affects the following methods: +
+ +This affects the following methods - `client.vectorStores.files.retrieve()` - `client.vectorStores.files.update()` @@ -126,6 +128,13 @@ This affects the following methods: - `client.beta.threads.messages.retrieve()` - `client.beta.threads.messages.update()` - `client.beta.threads.messages.delete()` +- `client.evals.runs.retrieve()` +- `client.evals.runs.delete()` +- `client.evals.runs.cancel()` +- `client.evals.runs.outputItems.retrieve()` +- `client.evals.runs.outputItems.list()` + +
### URI encoded path parameters @@ -162,6 +171,7 @@ This affects the following methods: - `client.fineTuning.jobs.list()` - `client.fineTuning.jobs.listEvents()` - `client.fineTuning.jobs.checkpoints.list()` +- `client.fineTuning.checkpoints.permissions.retrieve()` - `client.vectorStores.list()` - `client.vectorStores.files.list()` - `client.beta.assistants.list()` @@ -171,6 +181,8 @@ This affects the following methods: - `client.batches.list()` - `client.responses.retrieve()` - `client.responses.inputItems.list()` +- `client.evals.list()` +- `client.evals.runs.list()` ### HTTP method naming @@ -181,23 +193,29 @@ Previously some methods could not be named intuitively due to an internal naming client.chat.completions.del(); client.files.del(); client.models.del(); +client.fineTuning.checkpoints.permissions.del(); client.vectorStores.del(); client.vectorStores.files.del(); client.beta.assistants.del(); client.beta.threads.del(); client.beta.threads.messages.del(); client.responses.del(); +client.evals.del(); +client.evals.runs.del(); // After client.chat.completions.delete(); client.files.delete(); client.models.delete(); +client.fineTuning.checkpoints.permissions.delete(); client.vectorStores.delete(); client.vectorStores.files.delete(); client.beta.assistants.delete(); client.beta.threads.delete(); client.beta.threads.messages.delete(); client.responses.delete(); +client.evals.delete(); +client.evals.runs.delete(); ``` ### Removed `httpAgent` in favor of `fetchOptions` diff --git a/api.md b/api.md index 8ac9d3374..b983b5a3b 100644 --- a/api.md +++ b/api.md @@ -233,6 +233,22 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobID, { ...params }) -> FineTuningJobCheckpointsPage +## Checkpoints + +### Permissions + +Types: + +- PermissionCreateResponse +- PermissionRetrieveResponse +- PermissionDeleteResponse + +Methods: + +- client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage +- client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse +- client.fineTuning.checkpoints.permissions.delete(fineTunedModelCheckpoint) -> PermissionDeleteResponse + # VectorStores Types: @@ -643,3 +659,59 @@ Types: Methods: - client.responses.inputItems.list(responseID, { ...params }) -> ResponseItemsPage + +# Evals + +Types: + +- EvalCustomDataSourceConfig +- EvalLabelModelGrader +- EvalStoredCompletionsDataSourceConfig +- EvalStringCheckGrader +- EvalTextSimilarityGrader +- EvalCreateResponse +- EvalRetrieveResponse +- EvalUpdateResponse +- EvalListResponse +- EvalDeleteResponse + +Methods: + +- client.evals.create({ ...params }) -> EvalCreateResponse +- client.evals.retrieve(evalID) -> EvalRetrieveResponse +- client.evals.update(evalID, { ...params }) -> EvalUpdateResponse +- client.evals.list({ ...params }) -> EvalListResponsesPage +- client.evals.delete(evalID) -> EvalDeleteResponse + +## Runs + +Types: + +- CreateEvalCompletionsRunDataSource +- CreateEvalJSONLRunDataSource +- EvalAPIError +- RunCreateResponse +- RunRetrieveResponse +- RunListResponse +- RunDeleteResponse +- RunCancelResponse + +Methods: + +- client.evals.runs.create(evalID, { ...params }) -> RunCreateResponse +- client.evals.runs.retrieve(runID, { ...params }) -> RunRetrieveResponse +- client.evals.runs.list(evalID, { ...params }) -> RunListResponsesPage +- client.evals.runs.delete(runID, { ...params }) -> RunDeleteResponse +- client.evals.runs.cancel(runID, { ...params }) -> RunCancelResponse + +### OutputItems + +Types: + +- OutputItemRetrieveResponse +- OutputItemListResponse + +Methods: + +- client.evals.runs.outputItems.retrieve(outputItemID, { ...params }) -> OutputItemRetrieveResponse +- client.evals.runs.outputItems.list(runID, { ...params }) -> OutputItemListResponsesPage diff --git a/src/client.ts b/src/client.ts index 5ff149e9a..4469a5628 100644 --- a/src/client.ts +++ b/src/client.ts @@ -85,6 +85,23 @@ import { isEmptyObj } from './internal/utils/values'; import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; import { Chat } from './resources/chat/chat'; +import { + EvalCreateParams, + EvalCreateResponse, + EvalCustomDataSourceConfig, + EvalDeleteResponse, + EvalLabelModelGrader, + EvalListParams, + EvalListResponse, + EvalListResponsesPage, + EvalRetrieveResponse, + EvalStoredCompletionsDataSourceConfig, + EvalStringCheckGrader, + EvalTextSimilarityGrader, + EvalUpdateParams, + EvalUpdateResponse, + Evals, +} from './resources/evals/evals'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; import { Responses } from './resources/responses/responses'; import { @@ -856,6 +873,7 @@ export class OpenAI { batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); responses: API.Responses = new API.Responses(this); + evals: API.Evals = new API.Evals(this); } OpenAI.Completions = Completions; OpenAI.Chat = Chat; @@ -871,6 +889,7 @@ OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.Uploads = UploadsAPIUploads; OpenAI.Responses = Responses; +OpenAI.Evals = Evals; export declare namespace OpenAI { export type RequestOptions = Opts.RequestOptions; @@ -1022,6 +1041,24 @@ export declare namespace OpenAI { export { Responses as Responses }; + export { + Evals as Evals, + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + type EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; diff --git a/src/resources/evals.ts b/src/resources/evals.ts new file mode 100644 index 000000000..b611710e1 --- /dev/null +++ b/src/resources/evals.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './evals/index'; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts new file mode 100644 index 000000000..ca5d82b42 --- /dev/null +++ b/src/resources/evals/evals.ts @@ -0,0 +1,776 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../core/resource'; +import * as Shared from '../shared'; +import * as RunsAPI from './runs/runs'; +import { + CreateEvalCompletionsRunDataSource, + CreateEvalJSONLRunDataSource, + EvalAPIError, + RunCancelParams, + RunCancelResponse, + RunCreateParams, + RunCreateResponse, + RunDeleteParams, + RunDeleteResponse, + RunListParams, + RunListResponse, + RunListResponsesPage, + RunRetrieveParams, + RunRetrieveResponse, + Runs, +} from './runs/runs'; +import { APIPromise } from '../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagination'; +import { RequestOptions } from '../../internal/request-options'; +import { path } from '../../internal/utils/path'; + +export class Evals extends APIResource { + runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); + + /** + * Create the structure of an evaluation that can be used to test a model's + * performance. An evaluation is a set of testing criteria and a datasource. After + * creating an evaluation, you can run it on different models and model parameters. + * We support several types of graders and datasources. For more information, see + * the [Evals guide](https://platform.openai.com/docs/guides/evals). + */ + create(body: EvalCreateParams, options?: RequestOptions): APIPromise { + return this._client.post('/evals', { body, ...options }); + } + + /** + * Get an evaluation by ID. + */ + retrieve(evalID: string, options?: RequestOptions): APIPromise { + return this._client.get(path`/evals/${evalID}`, options); + } + + /** + * Update certain properties of an evaluation. + */ + update(evalID: string, body: EvalUpdateParams, options?: RequestOptions): APIPromise { + return this._client.post(path`/evals/${evalID}`, { body, ...options }); + } + + /** + * List evaluations for a project. + */ + list( + query: EvalListParams | null | undefined = {}, + options?: RequestOptions, + ): PagePromise { + return this._client.getAPIList('/evals', CursorPage, { query, ...options }); + } + + /** + * Delete an evaluation. + */ + delete(evalID: string, options?: RequestOptions): APIPromise { + return this._client.delete(path`/evals/${evalID}`, options); + } +} + +export type EvalListResponsesPage = CursorPage; + +/** + * A CustomDataSourceConfig which specifies the schema of your `item` and + * optionally `sample` namespaces. The response schema defines the shape of the + * data that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ +export interface EvalCustomDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; +} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface EvalLabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace EvalLabelModelGrader { + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface Assistant { + content: Assistant.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace Assistant { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StoredCompletionsDataSourceConfig which specifies the metadata property of + * your stored completions query. This is usually metadata like `usecase=chatbot` + * or `prompt-version=v2`, etc. The schema returned by this data source config is + * used to defined what variables are available in your evals. `item` and `sample` + * are both defined when using this data source config. + */ +export interface EvalStoredCompletionsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface EvalStringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface EvalTextSimilarityGrader { + /** + * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l' + | 'cosine'; + + /** + * The text being graded. + */ + input: string; + + /** + * A float score where a value greater than or equal indicates a passing grade. + */ + pass_threshold: number; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; + + /** + * The name of the grader. + */ + name?: string; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalCreateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalRetrieveResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalUpdateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalListResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +export interface EvalDeleteResponse { + deleted: boolean; + + eval_id: string; + + object: string; +} + +export interface EvalCreateParams { + /** + * The configuration for the data source used for the evaluation runs. + */ + data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + + /** + * A list of graders for all eval runs in this group. + */ + testing_criteria: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name?: string; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai?: boolean; +} + +export namespace EvalCreateParams { + /** + * A CustomDataSourceConfig object that defines the schema for the data source used + * for the evaluation runs. This schema is used to define the shape of the data + * that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ + export interface Custom { + /** + * The json schema for the run data source items. + */ + item_schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; + + /** + * Whether to include the sample schema in the data source. + */ + include_sample_schema?: boolean; + } + + /** + * A data source config which specifies the metadata property of your stored + * completions query. This is usually metadata like `usecase=chatbot` or + * `prompt-version=v2`, etc. + */ + export interface StoredCompletions { + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ + export interface LabelModel { + input: Array; + + /** + * The labels to classify to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; + } + + export namespace LabelModel { + export interface SimpleInputMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } +} + +export interface EvalUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Rename the evaluation. + */ + name?: string; +} + +export interface EvalListParams extends CursorPageParams { + /** + * Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + * descending order. + */ + order?: 'asc' | 'desc'; + + /** + * Evals can be ordered by creation time or last updated time. Use `created_at` for + * creation time or `updated_at` for last updated time. + */ + order_by?: 'created_at' | 'updated_at'; +} + +Evals.Runs = Runs; + +export declare namespace Evals { + export { + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + type EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + + export { + Runs as Runs, + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + type RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunRetrieveParams as RunRetrieveParams, + type RunListParams as RunListParams, + type RunDeleteParams as RunDeleteParams, + type RunCancelParams as RunCancelParams, + }; +} diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts new file mode 100644 index 000000000..4d1e30a09 --- /dev/null +++ b/src/resources/evals/index.ts @@ -0,0 +1,36 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, + type EvalListResponsesPage, +} from './evals'; +export { + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunRetrieveParams, + type RunListParams, + type RunDeleteParams, + type RunCancelParams, + type RunListResponsesPage, +} from './runs/index'; diff --git a/src/resources/evals/runs.ts b/src/resources/evals/runs.ts new file mode 100644 index 000000000..a3cc2bc7f --- /dev/null +++ b/src/resources/evals/runs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './runs/index'; diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts new file mode 100644 index 000000000..e51e04c11 --- /dev/null +++ b/src/resources/evals/runs/index.ts @@ -0,0 +1,27 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + OutputItems, + type OutputItemRetrieveResponse, + type OutputItemListResponse, + type OutputItemRetrieveParams, + type OutputItemListParams, + type OutputItemListResponsesPage, +} from './output-items'; +export { + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunRetrieveParams, + type RunListParams, + type RunDeleteParams, + type RunCancelParams, + type RunListResponsesPage, +} from './runs'; diff --git a/src/resources/evals/runs/output-items.ts b/src/resources/evals/runs/output-items.ts new file mode 100644 index 000000000..e7c33f27e --- /dev/null +++ b/src/resources/evals/runs/output-items.ts @@ -0,0 +1,413 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../core/resource'; +import * as RunsAPI from './runs'; +import { APIPromise } from '../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; +import { RequestOptions } from '../../../internal/request-options'; +import { path } from '../../../internal/utils/path'; + +export class OutputItems extends APIResource { + /** + * Get an evaluation run output item by ID. + */ + retrieve( + outputItemID: string, + params: OutputItemRetrieveParams, + options?: RequestOptions, + ): APIPromise { + const { eval_id, run_id } = params; + return this._client.get(path`/evals/${eval_id}/runs/${run_id}/output_items/${outputItemID}`, options); + } + + /** + * Get a list of output items for an evaluation run. + */ + list( + runID: string, + params: OutputItemListParams, + options?: RequestOptions, + ): PagePromise { + const { eval_id, ...query } = params; + return this._client.getAPIList( + path`/evals/${eval_id}/runs/${runID}/output_items`, + CursorPage, + { query, ...options }, + ); + } +} + +export type OutputItemListResponsesPage = CursorPage; + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemRetrieveResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemRetrieveResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemRetrieveResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemListResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemListResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemListResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +export interface OutputItemRetrieveParams { + /** + * The ID of the evaluation to retrieve runs for. + */ + eval_id: string; + + /** + * The ID of the run to retrieve. + */ + run_id: string; +} + +export interface OutputItemListParams extends CursorPageParams { + /** + * Path param: The ID of the evaluation to retrieve runs for. + */ + eval_id: string; + + /** + * Query param: Sort order for output items by timestamp. Use `asc` for ascending + * order or `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Query param: Filter output items by status. Use `failed` to filter by failed + * output items or `pass` to filter by passed output items. + */ + status?: 'fail' | 'pass'; +} + +export declare namespace OutputItems { + export { + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + type OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemRetrieveParams as OutputItemRetrieveParams, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts new file mode 100644 index 000000000..ca5c265d9 --- /dev/null +++ b/src/resources/evals/runs/runs.ts @@ -0,0 +1,1073 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../core/resource'; +import * as Shared from '../../shared'; +import * as OutputItemsAPI from './output-items'; +import { + OutputItemListParams, + OutputItemListResponse, + OutputItemListResponsesPage, + OutputItemRetrieveParams, + OutputItemRetrieveResponse, + OutputItems, +} from './output-items'; +import { APIPromise } from '../../../core/api-promise'; +import { CursorPage, type CursorPageParams, PagePromise } from '../../../core/pagination'; +import { RequestOptions } from '../../../internal/request-options'; +import { path } from '../../../internal/utils/path'; + +export class Runs extends APIResource { + outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); + + /** + * Create a new evaluation run. This is the endpoint that will kick off grading. + */ + create(evalID: string, body: RunCreateParams, options?: RequestOptions): APIPromise { + return this._client.post(path`/evals/${evalID}/runs`, { body, ...options }); + } + + /** + * Get an evaluation run by ID. + */ + retrieve( + runID: string, + params: RunRetrieveParams, + options?: RequestOptions, + ): APIPromise { + const { eval_id } = params; + return this._client.get(path`/evals/${eval_id}/runs/${runID}`, options); + } + + /** + * Get a list of runs for an evaluation. + */ + list( + evalID: string, + query: RunListParams | null | undefined = {}, + options?: RequestOptions, + ): PagePromise { + return this._client.getAPIList(path`/evals/${evalID}/runs`, CursorPage, { + query, + ...options, + }); + } + + /** + * Delete an eval run. + */ + delete(runID: string, params: RunDeleteParams, options?: RequestOptions): APIPromise { + const { eval_id } = params; + return this._client.delete(path`/evals/${eval_id}/runs/${runID}`, options); + } + + /** + * Cancel an ongoing evaluation run. + */ + cancel(runID: string, params: RunCancelParams, options?: RequestOptions): APIPromise { + const { eval_id } = params; + return this._client.post(path`/evals/${eval_id}/runs/${runID}`, options); + } +} + +export type RunListResponsesPage = CursorPage; + +/** + * A CompletionsRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalCompletionsRunDataSource { + input_messages: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model: string; + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + source: + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID + | CreateEvalCompletionsRunDataSource.StoredCompletions; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; +} + +export namespace CreateEvalCompletionsRunDataSource { + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + export interface StoredCompletions { + /** + * An optional Unix timestamp to filter items created after this time. + */ + created_after: number | null; + + /** + * An optional Unix timestamp to filter items created before this time. + */ + created_before: number | null; + + /** + * An optional maximum number of items to return. + */ + limit: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * An optional model to filter by (e.g., 'gpt-4o'). + */ + model: string | null; + + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + +/** + * A JsonlRunDataSource object with that specifies a JSONL file that matches the + * eval + */ +export interface CreateEvalJSONLRunDataSource { + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + + /** + * The type of data source. Always `jsonl`. + */ + type: 'jsonl'; +} + +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } +} + +/** + * An object representing an error response from the Eval API. + */ +export interface EvalAPIError { + /** + * The error code. + */ + code: string; + + /** + * The error message. + */ + message: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCreateResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCreateResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCreateResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunRetrieveResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunRetrieveResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunRetrieveResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunListResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunCreateParams { + /** + * Details about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the run. + */ + name?: string; +} + +export interface RunRetrieveParams { + /** + * The ID of the evaluation to retrieve runs for. + */ + eval_id: string; +} + +export interface RunListParams extends CursorPageParams { + /** + * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + * descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | + * "canceled". + */ + status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; +} + +export interface RunDeleteParams { + /** + * The ID of the evaluation to delete the run from. + */ + eval_id: string; +} + +export interface RunCancelParams { + /** + * The ID of the evaluation whose run you want to cancel. + */ + eval_id: string; +} + +Runs.OutputItems = OutputItems; + +export declare namespace Runs { + export { + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + type RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunRetrieveParams as RunRetrieveParams, + type RunListParams as RunListParams, + type RunDeleteParams as RunDeleteParams, + type RunCancelParams as RunCancelParams, + }; + + export { + OutputItems as OutputItems, + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + type OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemRetrieveParams as OutputItemRetrieveParams, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints.ts b/src/resources/fine-tuning/checkpoints.ts new file mode 100644 index 000000000..eb09063f6 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './checkpoints/index'; diff --git a/src/resources/fine-tuning/checkpoints/checkpoints.ts b/src/resources/fine-tuning/checkpoints/checkpoints.ts new file mode 100644 index 000000000..91cab6fc9 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/checkpoints.ts @@ -0,0 +1,31 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../core/resource'; +import * as PermissionsAPI from './permissions'; +import { + PermissionCreateParams, + PermissionCreateResponse, + PermissionCreateResponsesPage, + PermissionDeleteResponse, + PermissionRetrieveParams, + PermissionRetrieveResponse, + Permissions, +} from './permissions'; + +export class Checkpoints extends APIResource { + permissions: PermissionsAPI.Permissions = new PermissionsAPI.Permissions(this._client); +} + +Checkpoints.Permissions = Permissions; + +export declare namespace Checkpoints { + export { + Permissions as Permissions, + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + type PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints/index.ts b/src/resources/fine-tuning/checkpoints/index.ts new file mode 100644 index 000000000..c5b018cea --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Checkpoints } from './checkpoints'; +export { + Permissions, + type PermissionCreateResponse, + type PermissionRetrieveResponse, + type PermissionDeleteResponse, + type PermissionCreateParams, + type PermissionRetrieveParams, + type PermissionCreateResponsesPage, +} from './permissions'; diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts new file mode 100644 index 000000000..ba1e79ca8 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -0,0 +1,183 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../core/resource'; +import { APIPromise } from '../../../core/api-promise'; +import { Page, PagePromise } from '../../../core/pagination'; +import { RequestOptions } from '../../../internal/request-options'; +import { path } from '../../../internal/utils/path'; + +export class Permissions extends APIResource { + /** + * **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + * + * This enables organization owners to share fine-tuned models with other projects + * in their organization. + */ + create( + fineTunedModelCheckpoint: string, + body: PermissionCreateParams, + options?: RequestOptions, + ): PagePromise { + return this._client.getAPIList( + path`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + Page, + { body, method: 'post', ...options }, + ); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to view all permissions for a + * fine-tuned model checkpoint. + */ + retrieve( + fineTunedModelCheckpoint: string, + query: PermissionRetrieveParams | null | undefined = {}, + options?: RequestOptions, + ): APIPromise { + return this._client.get(path`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, { + query, + ...options, + }); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to delete a permission for a + * fine-tuned model checkpoint. + */ + delete(fineTunedModelCheckpoint: string, options?: RequestOptions): APIPromise { + return this._client.delete( + path`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + options, + ); + } +} + +// Note: no pagination actually occurs yet, this is for forwards-compatibility. +export type PermissionCreateResponsesPage = Page; + +/** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ +export interface PermissionCreateResponse { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; +} + +export interface PermissionRetrieveResponse { + data: Array; + + has_more: boolean; + + object: 'list'; + + first_id?: string | null; + + last_id?: string | null; +} + +export namespace PermissionRetrieveResponse { + /** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ + export interface Data { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; + } +} + +export interface PermissionDeleteResponse { + /** + * The ID of the fine-tuned model checkpoint permission that was deleted. + */ + id: string; + + /** + * Whether the fine-tuned model checkpoint permission was successfully deleted. + */ + deleted: boolean; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; +} + +export interface PermissionCreateParams { + /** + * The project identifiers to grant access to. + */ + project_ids: Array; +} + +export interface PermissionRetrieveParams { + /** + * Identifier for the last permission ID from the previous pagination request. + */ + after?: string; + + /** + * Number of permissions to retrieve. + */ + limit?: number; + + /** + * The order in which to retrieve permissions. + */ + order?: 'ascending' | 'descending'; + + /** + * The ID of the project to get permissions for. + */ + project_id?: string; +} + +export declare namespace Permissions { + export { + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + type PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index be9eb0f89..6836f2127 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../core/resource'; +import * as CheckpointsAPI from './checkpoints/checkpoints'; +import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; import { FineTuningJob, @@ -18,9 +20,11 @@ import { export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); + checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); } FineTuning.Jobs = Jobs; +FineTuning.Checkpoints = Checkpoints; export declare namespace FineTuning { export { @@ -36,4 +40,6 @@ export declare namespace FineTuning { type JobListParams as JobListParams, type JobListEventsParams as JobListEventsParams, }; + + export { Checkpoints as Checkpoints }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 52ef721b8..29e57394a 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,5 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { Jobs, diff --git a/src/resources/index.ts b/src/resources/index.ts index 99a703037..11b37eb14 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -29,6 +29,23 @@ export { type EmbeddingModel, type EmbeddingCreateParams, } from './embeddings'; +export { + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, + type EvalListResponsesPage, +} from './evals/evals'; export { Files, type FileContent, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts new file mode 100644 index 000000000..f8efe2713 --- /dev/null +++ b/tests/api-resources/evals/evals.test.ts @@ -0,0 +1,395 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource evals', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + include_sample_schema: true, + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + metadata: { foo: 'string' }, + name: 'name', + share_with_openai: true, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.retrieve('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update', async () => { + const responsePromise = client.evals.update('eval_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = client.evals.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.list( + { after: 'after', limit: 0, order: 'asc', order_by: 'created_at' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.evals.delete('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); diff --git a/tests/api-resources/evals/runs/output-items.test.ts b/tests/api-resources/evals/runs/output-items.test.ts new file mode 100644 index 000000000..e173b2a28 --- /dev/null +++ b/tests/api-resources/evals/runs/output-items.test.ts @@ -0,0 +1,52 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource outputItems', () => { + test('retrieve: only required params', async () => { + const responsePromise = client.evals.runs.outputItems.retrieve('output_item_id', { + eval_id: 'eval_id', + run_id: 'run_id', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: required and optional params', async () => { + const response = await client.evals.runs.outputItems.retrieve('output_item_id', { + eval_id: 'eval_id', + run_id: 'run_id', + }); + }); + + test('list: only required params', async () => { + const responsePromise = client.evals.runs.outputItems.list('run_id', { eval_id: 'eval_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: required and optional params', async () => { + const response = await client.evals.runs.outputItems.list('run_id', { + eval_id: 'eval_id', + after: 'after', + limit: 0, + order: 'asc', + status: 'fail', + }); + }); +}); diff --git a/tests/api-resources/evals/runs/runs.test.ts b/tests/api-resources/evals/runs/runs.test.ts new file mode 100644 index 000000000..d17cd2a9a --- /dev/null +++ b/tests/api-resources/evals/runs/runs.test.ts @@ -0,0 +1,101 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource runs', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.runs.create('eval_id', { + data_source: { source: { content: [{ item: { foo: 'bar' } }], type: 'file_content' }, type: 'jsonl' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.runs.create('eval_id', { + data_source: { + source: { content: [{ item: { foo: 'bar' }, sample: { foo: 'bar' } }], type: 'file_content' }, + type: 'jsonl', + }, + metadata: { foo: 'string' }, + name: 'name', + }); + }); + + test('retrieve: only required params', async () => { + const responsePromise = client.evals.runs.retrieve('run_id', { eval_id: 'eval_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: required and optional params', async () => { + const response = await client.evals.runs.retrieve('run_id', { eval_id: 'eval_id' }); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.list('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.list( + 'eval_id', + { after: 'after', limit: 0, order: 'asc', status: 'queued' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('delete: only required params', async () => { + const responsePromise = client.evals.runs.delete('run_id', { eval_id: 'eval_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: required and optional params', async () => { + const response = await client.evals.runs.delete('run_id', { eval_id: 'eval_id' }); + }); + + test('cancel: only required params', async () => { + const responsePromise = client.evals.runs.cancel('run_id', { eval_id: 'eval_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: required and optional params', async () => { + const response = await client.evals.runs.cancel('run_id', { eval_id: 'eval_id' }); + }); +}); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts new file mode 100644 index 000000000..1e0be0537 --- /dev/null +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -0,0 +1,66 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource permissions', () => { + test('create: only required params', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + }); + + test('retrieve', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve( + 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + { after: 'after', limit: 0, order: 'ascending', project_id: 'project_id' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.delete( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); From b1c696998f3e2e4c166744ead380481386804e9a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:44:34 +0000 Subject: [PATCH 45/73] chore(internal): fix examples --- .stats.yml | 4 +- .../beta/threads/runs/runs.test.ts | 2 +- .../beta/threads/threads.test.ts | 2 +- tests/api-resources/evals/evals.test.ts | 293 +----------------- tests/api-resources/images.test.ts | 6 +- tests/api-resources/moderations.test.ts | 5 +- 6 files changed, 10 insertions(+), 302 deletions(-) diff --git a/.stats.yml b/.stats.yml index ebe07c137..4a82ee242 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: ef19d36c307306f14f2e1cd5c834a151 +config_hash: d6c61213488683418adb860a9ee1501b diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 118a4f324..becea1425 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -36,7 +36,7 @@ describe('resource runs', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, reasoning_effort: 'low', response_format: 'auto', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 532bacb7c..342e673b3 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -99,7 +99,7 @@ describe('resource threads', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, response_format: 'auto', stream: false, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index f8efe2713..7d896e55f 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -10,151 +10,7 @@ const client = new OpenAI({ describe('resource evals', () => { test('create: only required params', async () => { const responsePromise = client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom' }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], @@ -177,152 +33,7 @@ describe('resource evals', () => { test('create: required and optional params', async () => { const response = await client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - include_sample_schema: true, - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom', include_sample_schema: true }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index fab30e32d..67b608d7e 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -24,7 +24,7 @@ describe('resource images', () => { test('createVariation: required and optional params', async () => { const response = await client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -51,7 +51,7 @@ describe('resource images', () => { image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -73,7 +73,7 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', - model: 'dall-e-3', + model: 'string', n: 1, quality: 'standard', response_format: 'url', diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index d50175b2d..c3dce6843 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -20,9 +20,6 @@ describe('resource moderations', () => { }); test('create: required and optional params', async () => { - const response = await client.moderations.create({ - input: 'I want to kill them.', - model: 'omni-moderation-2024-09-26', - }); + const response = await client.moderations.create({ input: 'I want to kill them.', model: 'string' }); }); }); From d8a20e49beb481862964b6658045bab8be19d39d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:46:27 +0000 Subject: [PATCH 46/73] chore(internal): skip broken test --- .stats.yml | 2 +- .../api-resources/fine-tuning/checkpoints/permissions.test.ts | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 4a82ee242..c39ce1186 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: d6c61213488683418adb860a9ee1501b +config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index 1e0be0537..ce632add1 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -51,7 +51,8 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - test('delete', async () => { + // OpenAPI spec is slightly incorrect + test.skip('delete', async () => { const responsePromise = client.fineTuning.checkpoints.permissions.delete( 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', ); From 3fd6eb5966bb7a9c495be4561c1e02ad7f0238af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 20:05:16 +0000 Subject: [PATCH 47/73] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index c39ce1186..d4a4370a7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 +config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 From c794f0d56343d5dd2d1cd29687a40f6c0a3ad89d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 09:26:33 +0000 Subject: [PATCH 48/73] chore: workaround build errors --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d4a4370a7..9d8d07c6a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 +config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea From ddff8f37e40dd7169dd882e1f2c4b0293d7b3caf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 16:31:41 +0000 Subject: [PATCH 49/73] chore(internal): upload builds and expand CI branch coverage --- .github/workflows/ci.yml | 35 +++++++++++++++++++++++--------- scripts/utils/upload-artifact.sh | 25 +++++++++++++++++++++++ 2 files changed, 50 insertions(+), 10 deletions(-) create mode 100755 scripts/utils/upload-artifact.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6d253070c..7f3d44157 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,19 +1,18 @@ name: CI on: push: - branches: - - master - pull_request: - branches: - - master - - next - - alpha + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'preview-head/**' + - 'preview-base/**' + - 'preview/**' jobs: lint: name: lint runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 @@ -31,7 +30,9 @@ jobs: build: name: build runs-on: ubuntu-latest - + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 @@ -45,10 +46,24 @@ jobs: - name: Check build run: ./scripts/build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/openai-typescript' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/openai-typescript' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh test: name: test runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 000000000..0ee2e688a --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-typescript/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi From 0b47a849152b46dbd8b77a6ec685bcd0e942b5ec Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 18:02:16 +0000 Subject: [PATCH 50/73] chore(internal): improve node 18 shims --- package.json | 4 ++ scripts/build | 2 - src/internal/shims/crypto.node.d.mts | 1 - src/internal/shims/crypto.node.d.ts | 10 ---- src/internal/shims/crypto.node.js | 11 ---- src/internal/shims/crypto.node.mjs | 2 - src/internal/shims/crypto.ts | 18 ++++++ src/internal/shims/file.node.d.mts | 1 - src/internal/shims/file.node.d.ts | 20 ------- src/internal/shims/file.node.js | 11 ---- src/internal/shims/file.node.mjs | 2 - src/internal/shims/file.ts | 32 +++++++++++ src/internal/shims/getBuiltinModule.ts | 64 ++++++++++++++++++++++ src/internal/shims/nullGetBuiltinModule.ts | 1 + src/internal/to-file.ts | 4 +- src/internal/uploads.ts | 9 +-- src/internal/utils/uuid.ts | 16 ++++-- tests/uploads.test.ts | 2 +- 18 files changed, 136 insertions(+), 74 deletions(-) delete mode 100644 src/internal/shims/crypto.node.d.mts delete mode 100644 src/internal/shims/crypto.node.d.ts delete mode 100644 src/internal/shims/crypto.node.js delete mode 100644 src/internal/shims/crypto.node.mjs create mode 100644 src/internal/shims/crypto.ts delete mode 100644 src/internal/shims/file.node.d.mts delete mode 100644 src/internal/shims/file.node.d.ts delete mode 100644 src/internal/shims/file.node.js delete mode 100644 src/internal/shims/file.node.mjs create mode 100644 src/internal/shims/file.ts create mode 100644 src/internal/shims/getBuiltinModule.ts create mode 100644 src/internal/shims/nullGetBuiltinModule.ts diff --git a/package.json b/package.json index 15e5c4143..1361fd047 100644 --- a/package.json +++ b/package.json @@ -54,6 +54,10 @@ "resolutions": { "synckit": "0.8.8" }, + "browser": { + "./internal/shims/getBuiltinModule.mjs": "./internal/shims/nullGetBuiltinModule.mjs", + "./internal/shims/getBuiltinModule.js": "./internal/shims/nullGetBuiltinModule.js" + }, "imports": { "openai": ".", "openai/*": "./src/*" diff --git a/scripts/build b/scripts/build index e9ebac741..5cec10804 100755 --- a/scripts/build +++ b/scripts/build @@ -35,8 +35,6 @@ node scripts/utils/fix-index-exports.cjs cp tsconfig.dist-src.json dist/src/tsconfig.json cp src/internal/shim-types.d.ts dist/internal/shim-types.d.ts cp src/internal/shim-types.d.ts dist/internal/shim-types.d.mts -mkdir -p dist/internal/shims -cp src/internal/shims/*.{mjs,js,d.ts,d.mts} dist/internal/shims node scripts/utils/postprocess-files.cjs diff --git a/src/internal/shims/crypto.node.d.mts b/src/internal/shims/crypto.node.d.mts deleted file mode 100644 index 5cc196301..000000000 --- a/src/internal/shims/crypto.node.d.mts +++ /dev/null @@ -1 +0,0 @@ -export { crypto } from './crypto.node.js'; diff --git a/src/internal/shims/crypto.node.d.ts b/src/internal/shims/crypto.node.d.ts deleted file mode 100644 index dc7caac8d..000000000 --- a/src/internal/shims/crypto.node.d.ts +++ /dev/null @@ -1,10 +0,0 @@ -export declare const crypto: { - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) */ - getRandomValues(array: T): T; - /** - * Available only in secure contexts. - * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID) - */ - randomUUID?: () => string; -}; diff --git a/src/internal/shims/crypto.node.js b/src/internal/shims/crypto.node.js deleted file mode 100644 index 83062a3d7..000000000 --- a/src/internal/shims/crypto.node.js +++ /dev/null @@ -1,11 +0,0 @@ -if (typeof require !== 'undefined') { - if (globalThis.crypto) { - exports.crypto = globalThis.crypto; - } else { - try { - // Use [require][0](...) and not require(...) so bundlers don't try to bundle the - // crypto module. - exports.crypto = [require][0]('node:crypto').webcrypto; - } catch (e) {} - } -} diff --git a/src/internal/shims/crypto.node.mjs b/src/internal/shims/crypto.node.mjs deleted file mode 100644 index 24c6f3b9e..000000000 --- a/src/internal/shims/crypto.node.mjs +++ /dev/null @@ -1,2 +0,0 @@ -import * as mod from './crypto.node.js'; -export const crypto = globalThis.crypto || mod.crypto; diff --git a/src/internal/shims/crypto.ts b/src/internal/shims/crypto.ts new file mode 100644 index 000000000..905f81c6f --- /dev/null +++ b/src/internal/shims/crypto.ts @@ -0,0 +1,18 @@ +import { getBuiltinModule } from './getBuiltinModule'; + +type Crypto = { + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) */ + getRandomValues(array: T): T; + /** + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID) + */ + randomUUID?: () => string; +}; +export let getCrypto: () => Crypto | undefined = function lazyGetCrypto() { + if (getCrypto !== lazyGetCrypto) return getCrypto(); + const crypto: Crypto = (globalThis as any).crypto || (getBuiltinModule?.('node:crypto') as any)?.webcrypto; + getCrypto = () => crypto; + return crypto; +}; diff --git a/src/internal/shims/file.node.d.mts b/src/internal/shims/file.node.d.mts deleted file mode 100644 index 38cc9ff7a..000000000 --- a/src/internal/shims/file.node.d.mts +++ /dev/null @@ -1 +0,0 @@ -export { File } from './file.node.js'; diff --git a/src/internal/shims/file.node.d.ts b/src/internal/shims/file.node.d.ts deleted file mode 100644 index 9dc6b2fcc..000000000 --- a/src/internal/shims/file.node.d.ts +++ /dev/null @@ -1,20 +0,0 @@ -// The infer is to make TS show it as a nice union type, -// instead of literally `ConstructorParameters[0]` -type FallbackBlobSource = ConstructorParameters[0] extends infer T ? T : never; -/** - * A [`File`](https://developer.mozilla.org/en-US/docs/Web/API/File) provides information about files. - */ -declare class FallbackFile extends Blob { - constructor(sources: FallbackBlobSource, fileName: string, options?: any); - /** - * The name of the `File`. - */ - readonly name: string; - /** - * The last modified date of the `File`. - */ - readonly lastModified: number; -} -export type File = InstanceType; -export const File: typeof globalThis extends { File: infer fileConstructor } ? fileConstructor -: typeof FallbackFile; diff --git a/src/internal/shims/file.node.js b/src/internal/shims/file.node.js deleted file mode 100644 index 3f8c2ed68..000000000 --- a/src/internal/shims/file.node.js +++ /dev/null @@ -1,11 +0,0 @@ -if (typeof require !== 'undefined') { - if (globalThis.File) { - exports.File = globalThis.File; - } else { - try { - // Use [require][0](...) and not require(...) so bundlers don't try to bundle the - // buffer module. - exports.File = [require][0]('node:buffer').File; - } catch (e) {} - } -} diff --git a/src/internal/shims/file.node.mjs b/src/internal/shims/file.node.mjs deleted file mode 100644 index 1f103f5d3..000000000 --- a/src/internal/shims/file.node.mjs +++ /dev/null @@ -1,2 +0,0 @@ -import * as mod from './file.node.js'; -export const File = globalThis.File || mod.File; diff --git a/src/internal/shims/file.ts b/src/internal/shims/file.ts new file mode 100644 index 000000000..d5dc82091 --- /dev/null +++ b/src/internal/shims/file.ts @@ -0,0 +1,32 @@ +import { getBuiltinModule } from './getBuiltinModule'; + +export let getFile = function lazyGetFile(): FileConstructor { + if (getFile !== lazyGetFile) return getFile(); + // We can drop getBuiltinModule once we no longer support Node < 20.0.0 + const File = (globalThis as any).File ?? (getBuiltinModule?.('node:buffer') as any)?.File; + if (!File) throw new Error('`File` is not defined as a global, which is required for file uploads.'); + getFile = () => File; + return File; +}; + +type FileConstructor = + typeof globalThis extends { File: infer fileConstructor } ? fileConstructor : typeof FallbackFile; +export type File = InstanceType; + +// The infer is to make TS show it as a nice union type, +// instead of literally `ConstructorParameters[0]` +type FallbackBlobSource = ConstructorParameters[0] extends infer T ? T : never; +/** + * A [`File`](https://developer.mozilla.org/en-US/docs/Web/API/File) provides information about files. + */ +declare class FallbackFile extends Blob { + constructor(sources: FallbackBlobSource, fileName: string, options?: any); + /** + * The name of the `File`. + */ + readonly name: string; + /** + * The last modified date of the `File`. + */ + readonly lastModified: number; +} diff --git a/src/internal/shims/getBuiltinModule.ts b/src/internal/shims/getBuiltinModule.ts new file mode 100644 index 000000000..a202f2097 --- /dev/null +++ b/src/internal/shims/getBuiltinModule.ts @@ -0,0 +1,64 @@ +/** + * Load a Node built-in module. ID may or may not be prefixed by `node:` and + * will be normalized. If we used static imports then our bundle size would be bloated by + * injected polyfills, and if we used dynamic require then in addition to bundlers logging warnings, + * our code would not work when bundled to ESM and run in Node 18. + * @param {string} id ID of the built-in to be loaded. + * @returns {object|undefined} exports of the built-in. Undefined if the built-in + * does not exist. + */ +export let getBuiltinModule: null | ((id: string) => object | undefined) = function getBuiltinModuleLazy( + id: string, +): object | undefined { + try { + if (getBuiltinModule !== getBuiltinModuleLazy) return getBuiltinModule!(id); + if ((process as any).getBuiltinModule) { + getBuiltinModule = (process as any).getBuiltinModule; + } else { + /* Fallback implementation for Node 18 */ + function createFallbackGetBuiltinModule(BuiltinModule: any) { + return function getBuiltinModule(id: string): object | undefined { + id = BuiltinModule.normalizeRequirableId(String(id)); + if (!BuiltinModule.canBeRequiredByUsers(id)) { + return; + } + const mod = BuiltinModule.map.get(id); + mod.compileForPublicLoader(); + return mod.exports; + }; + } + const magicKey = Math.random() + ''; + let module: { BuiltinModule: any } | undefined; + try { + const kClone = Object.getOwnPropertySymbols(Blob.prototype).find( + (e) => e.description?.includes('clone'), + )!; + Object.defineProperty(Object.prototype, magicKey, { + get() { + module = this; + throw null; + }, + configurable: true, + }); + structuredClone( + new (class extends Blob { + [kClone]() { + return { + deserializeInfo: 'internal/bootstrap/realm:' + magicKey, + }; + } + })([]), + ); + } catch {} + delete (Object.prototype as any)[magicKey]; + if (module) { + getBuiltinModule = createFallbackGetBuiltinModule(module.BuiltinModule); + } else { + getBuiltinModule = () => undefined; + } + } + return getBuiltinModule!(id); + } catch { + return undefined; + } +}; diff --git a/src/internal/shims/nullGetBuiltinModule.ts b/src/internal/shims/nullGetBuiltinModule.ts new file mode 100644 index 000000000..8bd2280d3 --- /dev/null +++ b/src/internal/shims/nullGetBuiltinModule.ts @@ -0,0 +1 @@ +export const getBuiltinModule = null; diff --git a/src/internal/to-file.ts b/src/internal/to-file.ts index 69b76d3a6..e92ac6944 100644 --- a/src/internal/to-file.ts +++ b/src/internal/to-file.ts @@ -1,4 +1,4 @@ -import { File } from './shims/file.node.js'; +import { type File, getFile } from './shims/file'; import { BlobPart, getName, makeFile, isAsyncIterable } from './uploads'; import type { FilePropertyBag } from './builtin-types'; @@ -90,7 +90,7 @@ export async function toFile( // If we've been given a `File` we don't need to do anything if (isFileLike(value)) { - if (File && value instanceof File) { + if (value instanceof getFile()) { return value; } return makeFile([await value.arrayBuffer()], value.name); diff --git a/src/internal/uploads.ts b/src/internal/uploads.ts index 2c286497c..116a5ab4e 100644 --- a/src/internal/uploads.ts +++ b/src/internal/uploads.ts @@ -1,7 +1,7 @@ import { type RequestOptions } from './request-options'; import type { FilePropertyBag, Fetch } from './builtin-types'; import type { OpenAI } from '../client'; -import { File } from './shims/file.node.js'; +import { type File, getFile } from './shims/file'; import { ReadableStreamFrom } from './shims'; export type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | DataView; @@ -32,10 +32,7 @@ export function makeFile( fileName: string | undefined, options?: FilePropertyBag, ): File { - if (typeof File === 'undefined') { - throw new Error('`File` is not defined as a global which is required for file uploads'); - } - + const File = getFile(); return new File(fileBits as any, fileName ?? 'unknown_file', options); } @@ -129,7 +126,7 @@ export const createForm = async >( // We check for Blob not File because Bun.File doesn't inherit from File, // but they both inherit from Blob and have a `name` property at runtime. const isNamedBlob = (value: object) => - (File && value instanceof File) || (value instanceof Blob && 'name' in value); + value instanceof getFile() || (value instanceof Blob && 'name' in value); const isUploadable = (value: unknown) => typeof value === 'object' && diff --git a/src/internal/utils/uuid.ts b/src/internal/utils/uuid.ts index 1349c42c3..5a262c6d3 100644 --- a/src/internal/utils/uuid.ts +++ b/src/internal/utils/uuid.ts @@ -1,13 +1,19 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { crypto } from '../shims/crypto.node.js'; +import { getCrypto } from '../shims/crypto'; /** * https://stackoverflow.com/a/2117523 */ -export function uuid4() { - if (crypto.randomUUID) return crypto.randomUUID(); +export let uuid4 = function () { + const crypto = getCrypto(); + if (crypto?.randomUUID) { + uuid4 = crypto.randomUUID.bind(crypto); + return crypto.randomUUID(); + } + const u8 = new Uint8Array(1); + const randomByte = crypto ? () => crypto.getRandomValues(u8)[0]! : () => (Math.random() * 0xff) & 0xff; return '10000000-1000-4000-8000-100000000000'.replace(/[018]/g, (c) => - (+c ^ (crypto.getRandomValues(new Uint8Array(1))[0]! & (15 >> (+c / 4)))).toString(16), + (+c ^ (randomByte() & (15 >> (+c / 4)))).toString(16), ); -} +}; diff --git a/tests/uploads.test.ts b/tests/uploads.test.ts index 902a788a4..d66ed412f 100644 --- a/tests/uploads.test.ts +++ b/tests/uploads.test.ts @@ -101,7 +101,7 @@ describe('missing File error message', () => { await expect( uploads.toFile(mockResponse({ url: 'https://example.com/my/audio.mp3' })), ).rejects.toMatchInlineSnapshot( - `[Error: \`File\` is not defined as a global which is required for file uploads]`, + `[Error: \`File\` is not defined as a global, which is required for file uploads.]`, ); }); }); From f86298181a858e184e4b51400ba2e652d6d4c105 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:08:41 +0000 Subject: [PATCH 51/73] chore(internal): reduce CI branch coverage --- .github/workflows/ci.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f3d44157..6bb27f06c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,13 +1,12 @@ name: CI on: push: - branches-ignore: - - 'generated' - - 'codegen/**' - - 'integrated/**' - - 'preview-head/**' - - 'preview-base/**' - - 'preview/**' + branches: + - main + pull_request: + branches: + - main + - next jobs: lint: From d10fe3110fca7f7b4e364283de659466434a8842 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:56:21 +0000 Subject: [PATCH 52/73] fix(internal): fix file uploads in node 18 jest --- src/internal/shims/getBuiltinModule.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/internal/shims/getBuiltinModule.ts b/src/internal/shims/getBuiltinModule.ts index a202f2097..64daa2ce0 100644 --- a/src/internal/shims/getBuiltinModule.ts +++ b/src/internal/shims/getBuiltinModule.ts @@ -29,11 +29,13 @@ export let getBuiltinModule: null | ((id: string) => object | undefined) = funct } const magicKey = Math.random() + ''; let module: { BuiltinModule: any } | undefined; + let ObjectPrototype: {} = Blob; + for (let next; (next = Reflect.getPrototypeOf(ObjectPrototype)); ObjectPrototype = next); try { const kClone = Object.getOwnPropertySymbols(Blob.prototype).find( (e) => e.description?.includes('clone'), )!; - Object.defineProperty(Object.prototype, magicKey, { + Object.defineProperty(ObjectPrototype, magicKey, { get() { module = this; throw null; @@ -50,7 +52,7 @@ export let getBuiltinModule: null | ((id: string) => object | undefined) = funct })([]), ); } catch {} - delete (Object.prototype as any)[magicKey]; + delete (ObjectPrototype as any)[magicKey]; if (module) { getBuiltinModule = createFallbackGetBuiltinModule(module.BuiltinModule); } else { From 8750a532ccf71839e1ed9f32919bf50239d01a5b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 14:30:50 +0000 Subject: [PATCH 53/73] chore(client): minor internal fixes --- src/client.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client.ts b/src/client.ts index 4469a5628..0d3887384 100644 --- a/src/client.ts +++ b/src/client.ts @@ -738,17 +738,17 @@ export class OpenAI { } buildRequest( - options: FinalRequestOptions, + inputOptions: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: FinalizedRequestInit; url: string; timeout: number } { - options = { ...options }; + const options = { ...inputOptions }; const { method, path, query } = options; const url = this.buildURL(path!, query as Record); if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); options.timeout = options.timeout ?? this.timeout; const { bodyHeaders, body } = this.buildBody({ options }); - const reqHeaders = this.buildHeaders({ options, method, bodyHeaders, retryCount }); + const reqHeaders = this.buildHeaders({ options: inputOptions, method, bodyHeaders, retryCount }); const req: FinalizedRequestInit = { method, From e254d9c33c30a4cd6871c78ac8e1340b27f97d53 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:40:49 +0000 Subject: [PATCH 54/73] feat(api): adding gpt-4.1 family of model IDs --- .stats.yml | 4 ++-- src/resources/beta/assistants.ts | 6 ++++++ src/resources/shared.ts | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9d8d07c6a..b40485bd0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml -openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml +openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 217782aa7..40cc82384 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1327,6 +1327,12 @@ export interface AssistantUpdateParams { */ model?: | (string & {}) + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3e8ded763..94ef50585 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -9,6 +9,12 @@ export type AllModels = | 'computer-use-preview-2025-03-11'; export type ChatModel = + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' From e717a3d7510e5b2ec70865b442e0887c2c928607 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:42:26 +0000 Subject: [PATCH 55/73] feat(api): add o3 and o4-mini model IDs --- .stats.yml | 6 +- src/resources/chat/completions/completions.ts | 55 ++++++++++++++++--- src/resources/completions.ts | 2 + src/resources/responses/responses.ts | 46 +++++++++++++++- src/resources/shared.ts | 19 +++++-- .../api-resources/responses/responses.test.ts | 3 +- 6 files changed, 112 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index b40485bd0..848c5b5ad 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml -openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a -config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml +openapi_spec_hash: c855121b2b2324b99499c9244c21d24d +config_hash: d20837393b73efdb19cd08e04c1cc9a1 diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 720e8a583..94b9108e5 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -129,9 +129,25 @@ export interface ChatCompletion { object: 'chat.completion'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -308,11 +324,11 @@ export interface ChatCompletionAudioParam { * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, * or `pcm16`. */ - format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** * The voice the model uses to respond. Supported voices are `alloy`, `ash`, - * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + * `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. */ voice: | (string & {}) @@ -364,9 +380,25 @@ export interface ChatCompletionChunk { object: 'chat.completion.chunk'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -1098,7 +1130,7 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -1178,7 +1210,7 @@ export interface ChatCompletionCreateParamsBase { * * This value is now deprecated in favor of `max_completion_tokens`, and is not * compatible with - * [o1 series models](https://platform.openai.com/docs/guides/reasoning). + * [o-series models](https://platform.openai.com/docs/guides/reasoning). */ max_tokens?: number | null; @@ -1280,14 +1312,19 @@ export interface ChatCompletionCreateParamsBase { * latency guarentee. * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). * - When not set, the default behavior is 'auto'. * * When this parameter is set, the response body will include the `service_tier` * utilized. */ - service_tier?: 'auto' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 4777a359d..1930a0bc0 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -290,6 +290,8 @@ export interface CompletionCreateParamsBase { seed?: number | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 83835a0f7..3bcca579e 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -249,7 +249,7 @@ export interface Response { metadata: Shared.Metadata | null; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -342,6 +342,27 @@ export interface Response { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * The status of the response generation. One of `completed`, `failed`, * `in_progress`, or `incomplete`. @@ -2601,7 +2622,7 @@ export interface ResponseCreateParamsBase { input: string | ResponseInput; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -2668,6 +2689,27 @@ export interface ResponseCreateParamsBase { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * Whether to store the generated model response for later retrieval via API. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 94ef50585..1c0006b18 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -15,6 +15,10 @@ export type ChatModel = | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano-2025-04-14' + | 'o4-mini' + | 'o4-mini-2025-04-16' + | 'o3' + | 'o3-2025-04-16' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' @@ -187,13 +191,20 @@ export interface Reasoning { effort?: ReasoningEffort | null; /** - * **computer_use_preview only** + * @deprecated **Deprecated:** use `summary` instead. * * A summary of the reasoning performed by the model. This can be useful for - * debugging and understanding the model's reasoning process. One of `concise` or - * `detailed`. + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. */ - generate_summary?: 'concise' | 'detailed' | null; + generate_summary?: 'auto' | 'concise' | 'detailed' | null; + + /** + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. + */ + summary?: 'auto' | 'concise' | 'detailed' | null; } /** diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index e025facc4..40b39ab55 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -29,7 +29,8 @@ describe('resource responses', () => { metadata: { foo: 'string' }, parallel_tool_calls: true, previous_response_id: 'previous_response_id', - reasoning: { effort: 'low', generate_summary: 'concise' }, + reasoning: { effort: 'low', generate_summary: 'auto', summary: 'auto' }, + service_tier: 'auto', store: true, stream: false, temperature: 1, From 37b1a1dbb944f0123ba02f40575a4415bd2324dd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 21:17:33 +0000 Subject: [PATCH 56/73] chore(internal): run example files in CI (#1357) --- .github/workflows/ci.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6bb27f06c..62dddec02 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -77,6 +77,26 @@ jobs: - name: Run tests run: ./scripts/test + examples: + name: examples + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '18' + - name: Install dependencies + run: | + yarn install + + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + yarn tsn examples/demo.ts + ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) runs-on: ubuntu-latest From 21592fb6da3a93ffa0caea2a3b794f68de5547e5 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 14:19:09 -0400 Subject: [PATCH 57/73] feat(api): add custom code for responses and built-in tools --- README.md | 289 +++---------------- src/azure.ts | 6 +- src/core/streaming.ts | 2 +- src/helpers/zod.ts | 46 +++ src/lib/ResponsesParser.ts | 262 +++++++++++++++++ src/lib/parser.ts | 28 ++ src/lib/responses/EventTypes.ts | 76 +++++ src/lib/responses/ResponseStream.ts | 298 ++++++++++++++++++++ src/resources/audio/transcriptions.ts | 4 +- src/resources/chat/completions/index.ts | 1 + src/resources/responses/responses.ts | 71 ++++- src/resources/shared.ts | 5 + src/resources/vector-stores/file-batches.ts | 3 + 13 files changed, 841 insertions(+), 250 deletions(-) create mode 100644 src/lib/ResponsesParser.ts create mode 100644 src/lib/responses/EventTypes.ts create mode 100644 src/lib/responses/ResponseStream.ts diff --git a/README.md b/README.md index 76aa800f2..00e6dece6 100644 --- a/README.md +++ b/README.md @@ -21,9 +21,7 @@ deno add jsr:@openai/openai npx jsr add @openai/openai ``` -These commands will make the module importable from the `@openai/openai` scope: - -You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: +These commands will make the module importable from the `@openai/openai` scope. You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; @@ -31,7 +29,9 @@ import OpenAI from 'jsr:@openai/openai'; ## Usage -The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. +The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). + +The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below. ```js @@ -41,255 +41,57 @@ const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const chatCompletion = await client.chat.completions.create({ - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }); -} +const response = await client.responses.create({ + model: 'gpt-4o', + instructions: 'You are a coding assistant that talks like a pirate', + input: 'Are semicolons optional in JavaScript?', +}); -main(); +console.log(response.output_text); ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). +The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below. ```ts import OpenAI from 'openai'; -const client = new OpenAI(); - -async function main() { - const stream = await client.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } -} - -main(); -``` - -If you need to cancel a stream, you can `break` from the loop -or call `stream.controller.abort()`. - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; - const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); -} - -main(); -``` - -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - -> [!IMPORTANT] -> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). - -### Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes -helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in 'AndPoll'. - -For instance to create a Run and poll until it reaches a terminal state you can run: - -```ts -const run = await openai.beta.threads.runs.createAndPoll(thread.id, { - assistant_id: assistantId, +const completion = await client.chat.completions.create({ + model: 'gpt-4o', + messages: [ + { role: 'developer', content: 'Talk like a pirate.' }, + { role: 'user', content: 'Are semicolons optional in JavaScript?' }, + ], }); -``` - -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/deep-dive/run-lifecycle) - -### Bulk Upload Helpers - -When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. -For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. - -```ts -const fileList = [ - createReadStream('/home/data/example.pdf'), - ... -]; - -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); -``` - -### Streaming Helpers - -The SDK also includes helpers to process streams and handle the incoming events. - -```ts -const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) - .on('textCreated', (text) => process.stdout.write('\nassistant > ')) - .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) - .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) - .on('toolCallDelta', (toolCallDelta, snapshot) => { - if (toolCallDelta.type === 'code_interpreter') { - if (toolCallDelta.code_interpreter.input) { - process.stdout.write(toolCallDelta.code_interpreter.input); - } - if (toolCallDelta.code_interpreter.outputs) { - process.stdout.write('\noutput >\n'); - toolCallDelta.code_interpreter.outputs.forEach((output) => { - if (output.type === 'logs') { - process.stdout.write(`\n${output.logs}\n`); - } - }); - } - } - }); -``` - -More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) - -### Streaming responses - -This library provides several conveniences for streaming chat completions, for example: - -```ts -import OpenAI from 'openai'; - -const openai = new OpenAI(); - -async function main() { - const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - stream.on('content', (delta, snapshot) => { - process.stdout.write(delta); - }); - - // or, equivalently: - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } - - const chatCompletion = await stream.finalChatCompletion(); - console.log(chatCompletion); // {id: "…", choices: […], …} -} - -main(); +console.log(completion.choices[0].message.content); ``` -Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#chat-events) including event handlers and promises. - -Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` -which only returns an async iterable of the chunks in the stream and thus uses less memory -(it does not build up a final chat completion object for you). - -If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. - -### Automated function calls - -We provide the `openai.beta.chat.completions.runTools({…})` -convenience helper for using function tool calls with the `/chat/completions` endpoint -which automatically call the JavaScript functions you provide -and sends their results back to the `/chat/completions` endpoint, -looping as long as the model requests tool calls. - -If you pass a `parse` function, it will automatically parse the `arguments` for you -and returns any parsing errors to the model to attempt auto-recovery. -Otherwise, the args will be passed to the function you provide as a string. +## Streaming responses -If you pass `tool_choice: {function: {name: …}}` instead of `auto`, -it returns immediately after calling that function (and only loops to auto-recover parsing errors). +We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const client = new OpenAI(); - -async function main() { - const runner = client.beta.chat.completions - .runTools({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getCurrentLocation, - parameters: { type: 'object', properties: {} }, - }, - }, - { - type: 'function', - function: { - function: getWeather, - parse: JSON.parse, // or use a validation library like zod for typesafe parsing. - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - }, - }, - }, - ], - }) - .on('message', (message) => console.log(message)); - - const finalContent = await runner.finalContent(); - console.log(); - console.log('Final content:', finalContent); -} +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); -async function getCurrentLocation() { - return 'Boston'; // Simulate lookup -} +const stream = await client.responses.create({ + model: 'gpt-4o', + input: 'Say "Sheep sleep deep" ten times fast!', + stream: true, +}); -async function getWeather(args: { location: string }) { - const { location } = args; - // … do lookup … - return { temperature, precipitation }; +for await (const event of stream) { + console.log(event); } - -main(); - -// {role: "user", content: "How's the weather this week?"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} -// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} -// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} -// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} -// -// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). - -Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-with-nextjs), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). - ## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -370,17 +172,20 @@ Error codes are as followed: All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 +const completion = await client.responses.create({ + model: 'gpt-4o', + input: 'Say this is a test', +}); +console.log(completion._request_id); // req_123 ``` You can also access the Request ID using the `.withResponse()` method: ```ts -const { data: stream, request_id } = await openai.chat.completions +const { data: stream, request_id } = await openai.responses .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + input: 'Say this is a test', stream: true, }) .withResponse(); @@ -427,7 +232,7 @@ const result = await openai.chat.completions.create({ console.log(result.choices[0]!.message?.content); ``` -### Retries +## Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, @@ -443,12 +248,12 @@ const client = new OpenAI({ }); // Or, configure per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in JavaScript?' }], model: 'gpt-4o' }, { +await client.responses.create({ model: 'gpt-4o', input: 'How can I get the name of the current day in JavaScript?' }, { maxRetries: 5, }); ``` -### Timeouts +## Timeouts Requests time out after 10 minutes by default. You can configure this with a `timeout` option: @@ -460,7 +265,7 @@ const client = new OpenAI({ }); // Override per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-4o' }, { +await client.responses.create({ model: 'gpt-4o', input: 'How can I list all files in a directory using Python?' }, { timeout: 5 * 1000, }); ``` @@ -514,17 +319,17 @@ Unlike `.asResponse()` this method consumes the body, returning once it is parse ```ts const client = new OpenAI(); -const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +const response = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +const { data, response: raw } = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletion); +console.log(data); ``` ### Logging diff --git a/src/azure.ts b/src/azure.ts index 5d8e9a98a..7c1193ba0 100644 --- a/src/azure.ts +++ b/src/azure.ts @@ -3,7 +3,7 @@ import * as Errors from './error'; import { FinalRequestOptions } from './internal/request-options'; import { isObj, readEnv } from './internal/utils'; import { ClientOptions, OpenAI } from './client'; -import { buildHeaders } from './internal/headers'; +import { buildHeaders, NullableHeaders } from './internal/headers'; /** API Client for interfacing with the Azure OpenAI API. */ export interface AzureClientOptions extends ClientOptions { @@ -157,8 +157,8 @@ export class AzureOpenAI extends OpenAI { return undefined; } - protected override authHeaders(opts: FinalRequestOptions): Headers { - return new Headers(); + protected override authHeaders(opts: FinalRequestOptions): NullableHeaders { + return buildHeaders([]); } protected override async prepareOptions(opts: FinalRequestOptions): Promise { diff --git a/src/core/streaming.ts b/src/core/streaming.ts index e47c088d2..49fff52aa 100644 --- a/src/core/streaming.ts +++ b/src/core/streaming.ts @@ -43,7 +43,7 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null) { + if (sse.event === null || sse.event.startsWith('response.')) { let data; try { diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index 99b9eb4b0..064c56314 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -2,11 +2,15 @@ import { ResponseFormatJSONSchema } from '../resources/index'; import type { infer as zodInfer, ZodType } from 'zod'; import { AutoParseableResponseFormat, + AutoParseableTextFormat, AutoParseableTool, makeParseableResponseFormat, + makeParseableTextFormat, makeParseableTool, } from '../lib/parser'; import { zodToJsonSchema as _zodToJsonSchema } from '../_vendor/zod-to-json-schema'; +import { ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; +import { AutoParseableResponseTool, makeParseableResponseTool } from '../lib/ResponsesParser'; function zodToJsonSchema(schema: ZodType, options: { name: string }): Record { return _zodToJsonSchema(schema, { @@ -74,6 +78,23 @@ export function zodResponseFormat( ); } +export function zodTextFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableTextFormat> { + return makeParseableTextFormat( + { + type: 'json_schema', + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject, { name }), + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + /** * Creates a chat completion `function` tool that can be invoked * automatically by the chat completion `.runTools()` method or automatically @@ -106,3 +127,28 @@ export function zodFunction(options: { }, ); } + +export function zodResponsesFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: zodInfer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableResponseTool<{ + arguments: Parameters; + name: string; + function: (args: zodInfer) => unknown; +}> { + return makeParseableResponseTool( + { + type: 'function', + name: options.name, + parameters: zodToJsonSchema(options.parameters, { name: options.name }), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts new file mode 100644 index 000000000..8d762d5bb --- /dev/null +++ b/src/lib/ResponsesParser.ts @@ -0,0 +1,262 @@ +import { OpenAIError } from '../error'; +import type { ChatCompletionTool } from '../resources/chat/completions'; +import { + type FunctionTool, + type ParsedContent, + type ParsedResponse, + type ParsedResponseFunctionToolCall, + type ParsedResponseOutputItem, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsNonStreaming, + type ResponseFunctionToolCall, + type Tool, +} from '../resources/responses/responses'; +import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; + +type ParseableToolsParams = Array | ChatCompletionTool | null; + +export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { + tools?: ParseableToolsParams; +}; + +export type ExtractParsedContentFromParams = + NonNullable['format'] extends AutoParseableTextFormat ? P : null; + +export function maybeParseResponse< + Params extends ResponseCreateParamsBase | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(response: Response, params: Params): ParsedResponse { + if (!params || !hasAutoParseableInput(params)) { + return { + ...response, + output_parsed: null, + output: response.output.map((item) => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: null, + }; + } + + if (item.type === 'message') { + return { + ...item, + content: item.content.map((content) => ({ + ...content, + parsed: null, + })), + }; + } else { + return item; + } + }), + }; + } + + return parseResponse(response, params); +} + +export function parseResponse< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(response: Response, params: Params): ParsedResponse { + const output: Array> = response.output.map( + (item): ParsedResponseOutputItem => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: parseToolCall(params, item), + }; + } + if (item.type === 'message') { + const content: Array> = item.content.map((content) => { + if (content.type === 'output_text') { + return { + ...content, + parsed: parseTextFormat(params, content.text), + }; + } + + return content; + }); + + return { + ...item, + content, + }; + } + + return item; + }, + ); + + const parsed: Omit, 'output_parsed'> = Object.assign({}, response, { output }); + if (!Object.getOwnPropertyDescriptor(response, 'output_text')) { + addOutputText(parsed); + } + + Object.defineProperty(parsed, 'output_parsed', { + enumerable: true, + get() { + for (const output of parsed.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text' && content.parsed !== null) { + return content.parsed; + } + } + } + + return null; + }, + }); + + return parsed as ParsedResponse; +} + +function parseTextFormat< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.text?.format?.type !== 'json_schema') { + return null; + } + + if ('$parseRaw' in params.text?.format) { + const text_format = params.text?.format as unknown as AutoParseableTextFormat; + return text_format.$parseRaw(content); + } + + return JSON.parse(content); +} + +export function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean { + if (isAutoParsableResponseFormat(params.text?.format)) { + return true; + } + + return false; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableResponseTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = FunctionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableResponseTool( + tool: FunctionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableResponseTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +function getInputToolByName(input_tools: Array, name: string): FunctionTool | undefined { + return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as + | FunctionTool + | undefined; +} + +function parseToolCall( + params: Params, + toolCall: ResponseFunctionToolCall, +): ParsedResponseFunctionToolCall { + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + + return { + ...toolCall, + ...toolCall, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments) + : inputTool?.strict ? JSON.parse(toolCall.arguments) + : null, + }; +} + +export function shouldParseToolCall( + params: ResponseCreateParamsNonStreaming | null | undefined, + toolCall: ResponseFunctionToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + return isAutoParsableTool(inputTool) || inputTool?.strict || false; +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} + +export function addOutputText(rsp: Response): void { + const texts: string[] = []; + for (const output of rsp.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text') { + texts.push(content.text); + } + } + } + + rsp.output_text = texts.join(''); +} diff --git a/src/lib/parser.ts b/src/lib/parser.ts index a750375dc..61df1019c 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -14,6 +14,7 @@ import { } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams @@ -24,6 +25,33 @@ type AnyChatCompletionCreateParams = export type ExtractParsedContentFromParams = Params['response_format'] extends AutoParseableResponseFormat ? P : null; +export type AutoParseableTextFormat = ResponseFormatTextJSONSchemaConfig & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableTextFormat( + response_format: ResponseFormatTextJSONSchemaConfig, + parser: (content: string) => ParsedT, +): AutoParseableTextFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableTextFormat; +} + export type AutoParseableResponseFormat = ResponseFormatJSONSchema & { __output: ParsedT; // type-level only diff --git a/src/lib/responses/EventTypes.ts b/src/lib/responses/EventTypes.ts new file mode 100644 index 000000000..fc1620988 --- /dev/null +++ b/src/lib/responses/EventTypes.ts @@ -0,0 +1,76 @@ +import { + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent as RawResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseIncompleteEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, +} from '../../resources/responses/responses'; + +export type ResponseFunctionCallArgumentsDeltaEvent = RawResponseFunctionCallArgumentsDeltaEvent & { + snapshot: string; +}; + +export type ResponseTextDeltaEvent = RawResponseTextDeltaEvent & { + snapshot: string; +}; + +export type ParsedResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts new file mode 100644 index 000000000..c724805ab --- /dev/null +++ b/src/lib/responses/ResponseStream.ts @@ -0,0 +1,298 @@ +import { + type ParsedResponse, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsStreaming, + type ResponseStreamEvent, +} from '../../resources/responses/responses'; +import { APIUserAbortError, OpenAIError } from '../../error'; +import OpenAI from '../../index'; +import { type BaseEvents, EventStream } from '../EventStream'; +import { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes'; +import { maybeParseResponse } from '../ResponsesParser'; +import { RequestOptions } from '../../internal/request-options'; + +export type ResponseStreamParams = Omit & { + stream?: true; +}; + +type ResponseEvents = BaseEvents & + Omit< + { + [K in ResponseStreamEvent['type']]: (event: Extract) => void; + }, + 'response.output_text.delta' | 'response.function_call_arguments.delta' + > & { + event: (event: ResponseStreamEvent) => void; + 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void; + 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void; + }; + +export type ResponseStreamingParams = Omit & { + stream?: true; +}; + +export class ResponseStream + extends EventStream + implements AsyncIterable +{ + #params: ResponseStreamingParams | null; + #currentResponseSnapshot: Response | undefined; + #finalResponse: ParsedResponse | undefined; + + constructor(params: ResponseStreamingParams | null) { + super(); + this.#params = params; + } + + static createResponse( + client: OpenAI, + params: ResponseStreamParams, + options?: RequestOptions, + ): ResponseStream { + const runner = new ResponseStream(params as ResponseCreateParamsStreaming); + runner._run(() => + runner._createResponse(client, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentResponseSnapshot = undefined; + } + + #addEvent(this: ResponseStream, event: ResponseStreamEvent) { + if (this.ended) return; + + const response = this.#accumulateResponse(event); + this._emit('event', event); + + switch (event.type) { + case 'response.output_text.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + + this._emit('response.output_text.delta', { + ...event, + snapshot: content.text, + }); + } + break; + } + case 'response.function_call_arguments.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + this._emit('response.function_call_arguments.delta', { + ...event, + snapshot: output.arguments, + }); + } + break; + } + default: + // @ts-ignore + this._emit(event.type, event); + break; + } + } + + #endRequest(): ParsedResponse { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any events`); + } + this.#currentResponseSnapshot = undefined; + const parsedResponse = finalizeResponse(snapshot, this.#params); + this.#finalResponse = parsedResponse; + + return parsedResponse; + } + + protected async _createResponse( + client: OpenAI, + params: ResponseStreamingParams, + options?: RequestOptions, + ): Promise> { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + + const stream = await client.responses.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this.#endRequest(); + } + + #accumulateResponse(event: ResponseStreamEvent): Response { + let snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + if (event.type !== 'response.created') { + throw new OpenAIError( + `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`, + ); + } + snapshot = this.#currentResponseSnapshot = event.response; + return snapshot; + } + + switch (event.type) { + case 'response.output_item.added': { + snapshot.output.push(event.item); + break; + } + case 'response.content_part.added': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + output.content.push(event.part); + } + break; + } + case 'response.output_text.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + content.text += event.delta; + } + break; + } + case 'response.function_call_arguments.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + output.arguments += event.delta; + } + break; + } + case 'response.completed': { + this.#currentResponseSnapshot = event.response; + break; + } + } + + return snapshot; + } + + [Symbol.asyncIterator](this: ResponseStream): AsyncIterator { + const pushQueue: ResponseStreamEvent[] = []; + const readQueue: { + resolve: (event: ResponseStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true })); + } + const event = pushQueue.shift()!; + return { value: event, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + /** + * @returns a promise that resolves with the final Response, or rejects + * if an error occurred or the stream ended prematurely without producing a REsponse. + */ + async finalResponse(): Promise> { + await this.done(); + const response = this.#finalResponse; + if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return response; + } +} + +function finalizeResponse( + snapshot: Response, + params: ResponseStreamingParams | null, +): ParsedResponse { + return maybeParseResponse(snapshot, params); +} diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index f8e3c24dd..a00a48d84 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -285,7 +285,9 @@ export type TranscriptionCreateParams = | TranscriptionCreateParamsNonStreaming | TranscriptionCreateParamsStreaming; -export interface TranscriptionCreateParamsBase { +export interface TranscriptionCreateParamsBase< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> { /** * The audio file object (not file name) to transcribe, in one of these formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 32d0eb408..0cc65b6a7 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -32,6 +32,7 @@ export { type ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam, type ChatCompletionCreateParams, + type ChatCompletionCreateParamsBase, type ChatCompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming, type ChatCompletionUpdateParams, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 3bcca579e..2694996ad 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -11,7 +11,43 @@ import { Stream } from '../../core/streaming'; import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; +import { + type ExtractParsedContentFromParams, + parseResponse, + type ResponseCreateParamsWithTools, + addOutputText, +} from '../../lib/ResponsesParser'; +import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; +export interface ParsedResponseOutputText extends ResponseOutputText { + parsed: ParsedT | null; +} + +export type ParsedContent = ParsedResponseOutputText | ResponseOutputRefusal; + +export interface ParsedResponseOutputMessage extends ResponseOutputMessage { + content: ParsedContent[]; +} + +export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall { + parsed_arguments: any; +} + +export type ParsedResponseOutputItem = + | ParsedResponseOutputMessage + | ParsedResponseFunctionToolCall + | ResponseFileSearchToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseReasoningItem; + +export interface ParsedResponse extends Response { + output: Array>; + + output_parsed: ParsedT | null; +} + +export type ResponseParseParams = ResponseCreateParamsNonStreaming; export class Responses extends APIResource { inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); @@ -41,9 +77,17 @@ export class Responses extends APIResource { body: ResponseCreateParams, options?: RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as - | APIPromise - | APIPromise>; + return ( + this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise> + )._thenUnwrap((rsp) => { + if ('object' in rsp && rsp.object === 'response') { + addOutputText(rsp as Response); + } + + return rsp; + }) as APIPromise | APIPromise>; } /** @@ -66,6 +110,25 @@ export class Responses extends APIResource { headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), }); } + + parse>( + body: Params, + options?: RequestOptions, + ): APIPromise> { + return this._client.responses + .create(body, options) + ._thenUnwrap((response) => parseResponse(response as Response, body)); + } + + /** + * Creates a chat completion stream + */ + stream>( + body: Params, + options?: RequestOptions, + ): ResponseStream { + return ResponseStream.createResponse(this._client, body, options); + } } export type ResponseItemsPage = CursorPage; @@ -218,6 +281,8 @@ export interface Response { */ created_at: number; + output_text: string; + /** * An error object returned when the model fails to generate a Response. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 1c0006b18..44881545f 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -27,6 +27,11 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' + | 'computer-use-preview' + | 'computer-use-preview-2025-02-04' + | 'computer-use-preview-2025-03-11' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index 7471b5d43..e7428000e 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -9,6 +9,9 @@ import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagin import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; +import { allSettledWithThrow } from '../../lib/util'; +import { sleep } from '../../internal/utils/sleep'; +import { Uploadable } from '../../internal/uploads'; export class FileBatches extends APIResource { /** From 256ebfcb1bbd4666fe0805e86298082822cc6da1 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 14:25:11 -0400 Subject: [PATCH 58/73] chore(internal): run CI on update-specs branch --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 62dddec02..1927329d2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,7 @@ on: push: branches: - main + - update-specs pull_request: branches: - main From 3aa6da28bbe28f2e13b91206143902f2eaf671ba Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 14:35:32 -0400 Subject: [PATCH 59/73] chore(internal): update next.js --- ecosystem-tests/vercel-edge/package-lock.json | 2779 +++++++++++------ ecosystem-tests/vercel-edge/package.json | 2 +- examples/package.json | 2 +- 3 files changed, 1749 insertions(+), 1034 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index bc820a010..c16da1275 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -27,127 +27,61 @@ } }, "node_modules/@ampproject/remapping": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", - "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "license": "Apache-2.0", "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/code-frame/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/code-frame/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true - }, - "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/code-frame/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/compat-data": { - "version": "7.22.9", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz", - "integrity": "sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==", + "version": "7.26.8", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.8.tgz", + "integrity": "sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.15.tgz", - "integrity": "sha512-PtZqMmgRrvj8ruoEOIwVA3yoF91O+Hgw9o7DAUTNBA6Mo2jpu31clx9a7Nz/9JznqetTR6zwfC4L3LAjKQXUwA==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.10.tgz", + "integrity": "sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==", "dev": true, + "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.22.15", - "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-module-transforms": "^7.22.15", - "@babel/helpers": "^7.22.15", - "@babel/parser": "^7.22.15", - "@babel/template": "^7.22.15", - "@babel/traverse": "^7.22.15", - "@babel/types": "^7.22.15", - "convert-source-map": "^1.7.0", + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.10", + "@babel/helper-compilation-targets": "^7.26.5", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.10", + "@babel/parser": "^7.26.10", + "@babel/template": "^7.26.9", + "@babel/traverse": "^7.26.10", + "@babel/types": "^7.26.10", + "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", @@ -161,45 +95,33 @@ "url": "https://opencollective.com/babel" } }, - "node_modules/@babel/core/node_modules/convert-source-map": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", - "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", - "dev": true - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@babel/generator": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.15.tgz", - "integrity": "sha512-Zu9oWARBqeVOW0dZOjXc3JObrzuqothQ3y/n1kUtrjCoCPLkXUwMvOo/F/TCfoHMbWIFlWwpZtkZVb9ga4U2pA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.0.tgz", + "integrity": "sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.15", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", - "jsesc": "^2.5.1" + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", - "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz", + "integrity": "sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.22.9", - "@babel/helper-validator-option": "^7.22.15", - "browserslist": "^4.21.9", + "@babel/compat-data": "^7.26.8", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -207,87 +129,30 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dev": true, - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true - }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz", - "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz", - "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==", - "dev": true, - "dependencies": { - "@babel/template": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-module-imports": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", - "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.15" + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.15.tgz", - "integrity": "sha512-l1UiX4UyHSFsYt17iQ3Se5pQQZZHa22zyIXURmvkmLCD4t/aU+dvNWHatKac/D9Vm9UES7nvIqHs4jZqKviUmQ==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/helper-validator-identifier": "^7.22.15" + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -297,168 +162,65 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz", + "integrity": "sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==", "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", - "dev": true, + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.15.tgz", - "integrity": "sha512-4E/F9IIEi8WR94324mbDUMo074YTheJmd7eZF5vITTeYchqAi6sYXRLHUVsmkdmY4QjfKTcB2jB7dVP3NaBElQ==", - "dev": true, + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", - "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.15.tgz", - "integrity": "sha512-7pAjK0aSdxOwR+CcYAqgWOGy5dcfvzsTIfFTb2odQqW47MDfv14UaJDY6eng8ylM2EaeKXdxaSWESbkmaQHTmw==", - "dev": true, - "dependencies": { - "@babel/template": "^7.22.15", - "@babel/traverse": "^7.22.15", - "@babel/types": "^7.22.15" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", - "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz", + "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.5", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, + "node_modules/@babel/parser": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz", + "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", + "license": "MIT", "dependencies": { - "has-flag": "^3.0.0" + "@babel/types": "^7.27.0" }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.22.16", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.16.tgz", - "integrity": "sha512-+gPfKv8UWeKKeJTUxe59+OobVcrYHETCsORl61EmSkmgymguYk/X5bp7GuUIXaFsc6y++v8ZxPsLSSuujqDphA==", "bin": { "parser": "bin/babel-parser.js" }, @@ -471,6 +233,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -483,6 +246,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -495,6 +259,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.12.13" }, @@ -502,11 +267,44 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz", + "integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/plugin-syntax-import-meta": { "version": "7.10.4", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -519,6 +317,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -527,12 +326,13 @@ } }, "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz", - "integrity": "sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz", + "integrity": "sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -546,6 +346,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -558,6 +359,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -570,6 +372,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -582,6 +385,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -594,6 +398,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -606,6 +411,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -613,11 +419,28 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/plugin-syntax-top-level-await": { "version": "7.14.5", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -629,12 +452,13 @@ } }, "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz", - "integrity": "sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz", + "integrity": "sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -648,39 +472,39 @@ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.1.tgz", "integrity": "sha512-J5AIf3vPj3UwXaAzb5j1xM4WAQDX3EMgemF8rjCP3SoW09LfRKAXQKt6CoVYl230P6iWdRcBbnLDDdnqWxZSCA==", "dev": true, + "license": "MIT", "dependencies": { "regenerator-runtime": "^0.13.4" } }, "node_modules/@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz", + "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", "dev": true, + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/code-frame": "^7.26.2", + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.15.tgz", - "integrity": "sha512-DdHPwvJY0sEeN4xJU5uRLmZjgMMDIvMPniLuYzUVXj/GGzysPl0/fwt44JBkyUIzGJPV8QgHMcQdQ34XFuKTYQ==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.22.15", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15", - "debug": "^4.1.0", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.0.tgz", + "integrity": "sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.27.0", + "@babel/parser": "^7.27.0", + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0", + "debug": "^4.3.1", "globals": "^11.1.0" }, "engines": { @@ -688,14 +512,13 @@ } }, "node_modules/@babel/types": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.15.tgz", - "integrity": "sha512-X+NLXr0N8XXmN5ZsaQdm9U2SSC3UbIYq/doL++sueHOTisgZHoKaQtZxGuV2cUPQHMfjKEfg/g6oy7Hm6SKFtA==", - "dev": true, + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz", + "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", + "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.15", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -705,13 +528,15 @@ "version": "0.2.3", "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "0.3.9" }, @@ -724,6 +549,7 @@ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.0.3", "@jridgewell/sourcemap-codec": "^1.4.10" @@ -734,6 +560,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.2.1.tgz", "integrity": "sha512-JQTRVuiusQLNNLe2W9tnzBlV/GvSVcozLl4XZHk5swnRZ/v6jp8TqR8P7sqmJsQqblDZ3EztcWmLDbhRje/+8g==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=16" } @@ -743,6 +570,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/node-utils/-/node-utils-2.0.3.tgz", "integrity": "sha512-JUSbi5xu/A8+D2t9B9wfirCI1J8n8q0660FfmqZgA+n3RqxD3y7SnamL1sKRE5/AbHsKs9zcqCbK2YDklbc9Bg==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=14" } @@ -752,6 +580,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/ponyfill/-/ponyfill-2.4.2.tgz", "integrity": "sha512-oN17GjFr69chu6sDLvXxdhg0Qe8EZviGSuqzR9qOiKh4MhFYGdBBcqRNzdmYeAdeRzOW2mM9yil4RftUQ7sUOA==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=16" } @@ -761,6 +590,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-4.1.0.tgz", "integrity": "sha512-Vw0lbJ2lvRUqc7/soqygUX216Xb8T3WBZ987oywz6aJqRxcwSVWwr9e+Nqo2m9bxobA9mdbWNNoRY6S9eko1EQ==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=16" } @@ -770,6 +600,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.2.0.tgz", "integrity": "sha512-0dEVyRLM/lG4gp1R/Ik5bfPl/1wX00xFwd5KcNH602tzBa09oF7pbTKETEhR1GjZ75K6OJnYFu8II2dyMhONMw==", "dev": true, + "license": "MPL-2.0", "dependencies": { "@edge-runtime/primitives": "4.1.0" }, @@ -781,13 +612,15 @@ "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", - "dev": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@hapi/topo": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@hapi/hoek": "^9.0.0" } @@ -797,6 +630,7 @@ "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, + "license": "ISC", "dependencies": { "camelcase": "^5.3.1", "find-up": "^4.1.0", @@ -813,6 +647,7 @@ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -822,6 +657,7 @@ "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -839,6 +675,7 @@ "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", "dev": true, + "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/reporters": "^29.7.0", @@ -886,6 +723,7 @@ "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/fake-timers": "^29.7.0", "@jest/types": "^29.6.3", @@ -901,6 +739,7 @@ "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", "dev": true, + "license": "MIT", "dependencies": { "expect": "^29.7.0", "jest-snapshot": "^29.7.0" @@ -914,6 +753,7 @@ "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", "dev": true, + "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3" }, @@ -926,6 +766,7 @@ "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@sinonjs/fake-timers": "^10.0.2", @@ -943,6 +784,7 @@ "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/expect": "^29.7.0", @@ -958,6 +800,7 @@ "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", "dev": true, + "license": "MIT", "dependencies": { "@bcoe/v8-coverage": "^0.2.3", "@jest/console": "^29.7.0", @@ -1001,6 +844,7 @@ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", "dev": true, + "license": "MIT", "dependencies": { "@sinclair/typebox": "^0.27.8" }, @@ -1008,17 +852,12 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@jest/schemas/node_modules/@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "dev": true - }, "node_modules/@jest/source-map": { "version": "29.6.3", "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.18", "callsites": "^3.0.0", @@ -1033,6 +872,7 @@ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", "dev": true, + "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/types": "^29.6.3", @@ -1048,6 +888,7 @@ "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/test-result": "^29.7.0", "graceful-fs": "^4.2.9", @@ -1063,6 +904,7 @@ "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@jest/types": "^29.6.3", @@ -1089,6 +931,7 @@ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", @@ -1102,43 +945,48 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -1149,6 +997,7 @@ "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", "integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "detect-libc": "^2.0.0", "https-proxy-agent": "^5.0.0", @@ -1164,14 +1013,38 @@ "node-pre-gyp": "bin/node-pre-gyp" } }, - "node_modules/@mapbox/node-pre-gyp/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "node_modules/@mapbox/node-pre-gyp/node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", "dev": true, + "license": "MIT", "dependencies": { - "lru-cache": "^6.0.0" + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@mapbox/node-pre-gyp/node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@mapbox/node-pre-gyp/node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "dev": true, + "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -1180,17 +1053,19 @@ } }, "node_modules/@next/env": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", - "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", - "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1200,12 +1075,13 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", - "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1215,12 +1091,13 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", - "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1230,12 +1107,13 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", - "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1245,12 +1123,13 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", - "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1260,12 +1139,13 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", - "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1275,12 +1155,13 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", - "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1290,12 +1171,13 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", - "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", "cpu": [ "ia32" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1305,12 +1187,13 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", - "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1324,6 +1207,7 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, + "license": "MIT", "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -1337,6 +1221,7 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, + "license": "MIT", "engines": { "node": ">= 8" } @@ -1346,6 +1231,7 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, + "license": "MIT", "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -1359,6 +1245,7 @@ "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz", "integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==", "dev": true, + "license": "MIT", "dependencies": { "estree-walker": "^2.0.1", "picomatch": "^2.2.2" @@ -1371,13 +1258,15 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@sideway/address": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", - "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@hapi/hoek": "^9.0.0" } @@ -1386,25 +1275,29 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", - "dev": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@sideway/pinpoint": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", - "dev": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@sinclair/typebox": { - "version": "0.25.24", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", - "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==", - "dev": true + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" }, "node_modules/@sinonjs/commons": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", - "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "type-detect": "4.0.8" } @@ -1414,15 +1307,24 @@ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.0" } }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, "node_modules/@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", "dependencies": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, @@ -1431,6 +1333,7 @@ "resolved": "https://registry.npmjs.org/@ts-morph/common/-/common-0.11.1.tgz", "integrity": "sha512-7hWZS0NRpEsNV8vWJzg7FEz6V8MaLNeJOmwmghqUXTpzk16V1LLZhdo+4QvE/+zv4cVci0OviuJFnqhEfoV3+g==", "dev": true, + "license": "MIT", "dependencies": { "fast-glob": "^3.2.7", "minimatch": "^3.0.4", @@ -1439,34 +1342,39 @@ } }, "node_modules/@tsconfig/node10": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", - "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", - "dev": true + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true, + "license": "MIT" }, "node_modules/@tsconfig/node12": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@tsconfig/node14": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@tsconfig/node16": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/babel__core": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.1.tgz", - "integrity": "sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==", + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", @@ -1476,37 +1384,41 @@ } }, "node_modules/@types/babel__generator": { - "version": "7.6.4", - "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz", - "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__template": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz", - "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==", + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, + "license": "MIT", "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__traverse": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.1.tgz", - "integrity": "sha512-MitHFXnhtgwsGZWtT68URpOvLN4EREih1u3QtQiN4VdAxWKRVvGCSvw/Qth0M0Qq3pJpnGOu5JaM/ydK7OGbqg==", + "version": "7.20.7", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.7.tgz", + "integrity": "sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==", "dev": true, + "license": "MIT", "dependencies": { "@babel/types": "^7.20.7" } }, "node_modules/@types/estree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", - "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", + "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", + "license": "MIT", "peer": true }, "node_modules/@types/graceful-fs": { @@ -1514,67 +1426,76 @@ "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", - "dev": true + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" }, "node_modules/@types/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", "dev": true, + "license": "MIT", "dependencies": { "@types/istanbul-lib-coverage": "*" } }, "node_modules/@types/istanbul-reports": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", - "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/istanbul-lib-report": "*" } }, "node_modules/@types/json-schema": { - "version": "7.0.12", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", - "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", - "dev": true + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" }, "node_modules/@types/node": { "version": "20.3.3", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.3.tgz", "integrity": "sha512-wheIYdr4NYML61AjC8MKj/2jrR/kDQri/CIpVoZwldwhnIrD/j9jIU5bJ8yBKuB2VhpFV7Ab6G2XkBjv9r9Zzw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/node-fetch": { "version": "2.6.3", "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.3.tgz", "integrity": "sha512-ETTL1mOEdq/sxUtgtOhKjyB2Irra4cjxksvcMUR5Zr4n+PxVhsCD9WS46oPbHL3et9Zde7CNRr+WUNlcHvsX+w==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", "form-data": "^3.0.0" } }, "node_modules/@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", - "dev": true + "version": "15.7.14", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.14.tgz", + "integrity": "sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ==", + "dev": true, + "license": "MIT" }, "node_modules/@types/react": { "version": "18.2.74", "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.74.tgz", "integrity": "sha512-9AEqNZZyBx8OdZpxzQlaFEVCSFUM2YXJH46yPOiOpm078k6ZLOCcuAzGum/zK8YBwY+dbahVNbHrbgrAwIRlqw==", "dev": true, + "license": "MIT", "dependencies": { "@types/prop-types": "*", "csstype": "^3.0.2" @@ -1585,6 +1506,7 @@ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.23.tgz", "integrity": "sha512-ZQ71wgGOTmDYpnav2knkjr3qXdAFu0vsk8Ci5w3pGAIdj7/kKAyn+VsQDhXsmzzzepAiI9leWMmubXz690AI/A==", "dev": true, + "license": "MIT", "dependencies": { "@types/react": "*" } @@ -1593,40 +1515,46 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/yargs": { - "version": "17.0.24", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", - "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", "dev": true, + "license": "MIT", "dependencies": { "@types/yargs-parser": "*" } }, "node_modules/@types/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", - "dev": true + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" }, "node_modules/@vercel/build-utils": { "version": "6.8.3", "resolved": "https://registry.npmjs.org/@vercel/build-utils/-/build-utils-6.8.3.tgz", "integrity": "sha512-C86OPuPAvG/pSr27DPKecmptkYYsgyhOKdHTLv9jI3Pv1yvru78k+JjrAyn7N+0ev75KNV0Prv4P3p76168ePw==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/error-utils": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/@vercel/error-utils/-/error-utils-1.0.10.tgz", "integrity": "sha512-nsKy2sy+pjUWyKI1V/XXKspVzHMYgSalmj5+EsKWFXZbnNZicqxNtMR94J8Hs7SB4TQxh0s4KhczJtL59AVGMg==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/gatsby-plugin-vercel-analytics": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/@vercel/gatsby-plugin-vercel-analytics/-/gatsby-plugin-vercel-analytics-1.0.10.tgz", "integrity": "sha512-v329WHdtIce+y7oAmaWRvEx59Xfo0FxlQqK4BJG0u6VWYoKWPaflohDAiehIZf/YHCRVb59ZxnzmMOcm/LR8YQ==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@babel/runtime": "7.12.1", "web-vitals": "0.2.4" @@ -1647,29 +1575,40 @@ "fs-extra": "11.1.0" } }, + "node_modules/@vercel/gatsby-plugin-vercel-builder/node_modules/@sinclair/typebox": { + "version": "0.25.24", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", + "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==", + "dev": true, + "license": "MIT" + }, "node_modules/@vercel/go": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/@vercel/go/-/go-2.5.1.tgz", "integrity": "sha512-yZGzzGmVXt2Rsy1cR0EDbst0fMhdELQY8c3jXy6/FTWJFG1e/40JYksu+WiRCxRBp8e7zfcxMrv0dN8JWRmbPQ==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/hydrogen": { "version": "0.0.64", "resolved": "https://registry.npmjs.org/@vercel/hydrogen/-/hydrogen-0.0.64.tgz", "integrity": "sha512-1rzFB664G6Yzp7j4ezW9hvVjqnaU2BhyUdhchbsxtRuxkMpGgPBZKhjzRQHFvlmkz37XLC658T5Nb1P91b4sBw==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/next": { "version": "3.9.4", "resolved": "https://registry.npmjs.org/@vercel/next/-/next-3.9.4.tgz", "integrity": "sha512-6qH/dNSEEN2pQW5iVi6RUfjro6v9mxdXLtiRf65gQim89CXfPR9CKcCW3AxcKSkYPX9Q7fPiaEGwTr68fPklCw==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/nft": { "version": "0.22.5", "resolved": "https://registry.npmjs.org/@vercel/nft/-/nft-0.22.5.tgz", "integrity": "sha512-mug57Wd1BL7GMj9gXMgMeKUjdqO0e4u+0QLPYMFE1rwdJ+55oPy6lp3nIBCS8gOvigT62UI4QKUL2sGqcoW4Hw==", "dev": true, + "license": "MIT", "dependencies": { "@mapbox/node-pre-gyp": "^1.0.5", "@rollup/pluginutils": "^4.0.0", @@ -1694,13 +1633,15 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@vercel/node": { "version": "2.15.10", "resolved": "https://registry.npmjs.org/@vercel/node/-/node-2.15.10.tgz", "integrity": "sha512-IfnqnKAJlL1+0FSDJgxoe9J3kfYAgPGDjz4aO/H5FSjvqP7cKJnns1F9GsQq4pM499+TY8T8mKAdos7/m+WOEw==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@edge-runtime/node-utils": "2.0.3", "@edge-runtime/primitives": "2.1.2", @@ -1727,6 +1668,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.1.0.tgz", "integrity": "sha512-gc2qbYEIIJRczBApBPznVI1c5vZgzrZQOsFZnAxxFiYah9qldHiu1YEitzSvXI8X8ZgvAguuIiyIbpWz17nlXA==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=14" } @@ -1736,6 +1678,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-2.1.2.tgz", "integrity": "sha512-SR04SMDybALlhIYIi0hiuEUwIl0b7Sn+RKwQkX6hydg4+AKMzBNDFhj2nqHDD1+xkHArV9EhmJIb6iGjShwSzg==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=14" } @@ -1745,6 +1688,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.0.1.tgz", "integrity": "sha512-69twXLIcqVx0iNlc1vFqnXgka2CZi2c/QBAmMzXBk0M6mPG+ICCBh2dd+cv1K+HW2pfLuSW+EskkFXWGeCf1Vw==", "dev": true, + "license": "MPL-2.0", "dependencies": { "@edge-runtime/primitives": "3.0.1" }, @@ -1757,6 +1701,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-3.0.1.tgz", "integrity": "sha512-l5NNDcPkKW4N6qRmB8zzpCF6uRW1S808V/zm72z7b/aWwZUYbmEPPkzyhGAW0aQxLU1pGdZ8u2gNjamdaU6RXw==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=14" } @@ -1765,13 +1710,22 @@ "version": "14.18.33", "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.33.tgz", "integrity": "sha512-qelS/Ra6sacc4loe/3MSjXNL1dNQ/GjxNHVzuChwMfmk7HuycRLVQN2qNY3XahK+fZc5E2szqQSKUyAF0E+2bg==", - "dev": true + "dev": true, + "license": "MIT" + }, + "node_modules/@vercel/node/node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" }, "node_modules/@vercel/node/node_modules/async-listen": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/async-listen/-/async-listen-3.0.0.tgz", "integrity": "sha512-V+SsTpDqkrWTimiotsyl33ePSjA5/KrithwupuvJ6ztsqPvGv6ge4OredFhPffVXiLN/QUWvE0XcqJaYgt6fOg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 14" } @@ -1781,6 +1735,7 @@ "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.4.4.tgz", "integrity": "sha512-uq1YdIxkMDsBYLdSSp/w62PciCL46ic4m1Z/2G6N8RcAPI8p35O8u6hJQT83j28Dnt4U5iyvmwFMYouHMK51uA==", "dev": true, + "license": "MPL-2.0", "dependencies": { "@edge-runtime/format": "2.1.0", "@edge-runtime/vm": "3.0.3", @@ -1804,6 +1759,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-3.0.3.tgz", "integrity": "sha512-YnfMWMRQABAH8IsnFMJWMW+SyB4ZeYBPnR7V0aqdnew7Pq60cbH5DyFjS/FhiLwvHQk9wBREmXD7PP0HooEQ1A==", "dev": true, + "license": "MPL-2.0", "engines": { "node": ">=14" } @@ -1813,6 +1769,7 @@ "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.0.3.tgz", "integrity": "sha512-SPfI1JeIRNs/4EEE2Oc0X6gG3RqjD1TnKu2lwmwFXq0435xgZGKhc3UiKkYAdoMn2dNFD73nlabMKHBRoMRpxg==", "dev": true, + "license": "MPL-2.0", "dependencies": { "@edge-runtime/primitives": "3.0.3" }, @@ -1820,11 +1777,56 @@ "node": ">=14" } }, + "node_modules/@vercel/node/node_modules/ts-node": { + "version": "10.9.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", + "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, "node_modules/@vercel/node/node_modules/typescript": { "version": "4.9.5", "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true, + "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -1837,24 +1839,37 @@ "version": "3.1.60", "resolved": "https://registry.npmjs.org/@vercel/python/-/python-3.1.60.tgz", "integrity": "sha512-1aYinyTfejS8Us+sOum+RQPYcre0vF3XoL7ohL170ZCcHA0l35qV0b1slGAmLt3pqaHKYy3g/nkzUhuR8XXIrQ==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/redwood": { "version": "1.1.15", "resolved": "https://registry.npmjs.org/@vercel/redwood/-/redwood-1.1.15.tgz", "integrity": "sha512-j0XaXe4ZpGVHG7XQSmZ3kza6s+ZtOBfRhnSxA70yCkrvPNN3tZgF3fevSKXizfL9fzVDd7Tdj++SCGWMdGfsyA==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@vercel/nft": "0.22.5", "@vercel/routing-utils": "2.2.1", "semver": "6.1.1" } }, + "node_modules/@vercel/redwood/node_modules/semver": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.1.1.tgz", + "integrity": "sha512-rWYq2e5iYW+fFe/oPPtYJxYgjBm8sC4rmoGdUOgBB7VnwKt6HrL793l2voH1UlsyYZpJ4g0wfjnTEO1s1NP2eQ==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, "node_modules/@vercel/remix-builder": { "version": "1.10.1", "resolved": "https://registry.npmjs.org/@vercel/remix-builder/-/remix-builder-1.10.1.tgz", "integrity": "sha512-qkK8Lv9KR4BVmLreKpwtJ9iaKh0NKF9SMZSsT5rLdX8F6EpkayUwSN3EEv4QN/9wFfEb8s1Nf2RY5Pj0zo8Itw==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@vercel/build-utils": "6.8.3", "@vercel/nft": "0.22.5", @@ -1864,11 +1879,25 @@ "ts-morph": "12.0.0" } }, + "node_modules/@vercel/remix-builder/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/@vercel/remix-builder/node_modules/semver": { "version": "7.3.8", "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", "dev": true, + "license": "ISC", "dependencies": { "lru-cache": "^6.0.0" }, @@ -1879,11 +1908,19 @@ "node": ">=10" } }, + "node_modules/@vercel/remix-builder/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/@vercel/routing-utils": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@vercel/routing-utils/-/routing-utils-2.2.1.tgz", "integrity": "sha512-kzMZsvToDCDskNRZD71B9UAgstec7ujmlGH8cBEo6F/07VaFeji6GQdgd6Zwnrj+TvzQBggKoPQR64VkVY8Lzw==", "dev": true, + "license": "Apache-2.0", "dependencies": { "path-to-regexp": "6.1.0" }, @@ -1895,19 +1932,22 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.1.0.tgz", "integrity": "sha512-h9DqehX3zZZDCEm+xbfU0ZmwCGFCAAraPJWMXJ4+v32NjZJilVg3k1TcKsRgIb8IQ/izZSaydDc1OhJCZvs2Dw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@vercel/ruby": { "version": "1.3.76", "resolved": "https://registry.npmjs.org/@vercel/ruby/-/ruby-1.3.76.tgz", "integrity": "sha512-J8I0B7wAn8piGoPhBroBfJWgMEJTMEL/2o8MCoCyWdaE7MRtpXhI10pj8IvcUvAECoGJ+SM1Pm+SvBqtbtZ5FQ==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/@vercel/static-build": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/@vercel/static-build/-/static-build-1.4.0.tgz", "integrity": "sha512-rCFVBve9nFaXrqP0pGiPaDciTTJ8CHeage8blF8xOEYMYdFRCg5nzFAOPERwUvl80RNpZrnGC7eJKxTHxfY2Ew==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@vercel/gatsby-plugin-vercel-analytics": "1.0.10", "@vercel/gatsby-plugin-vercel-builder": "1.3.18" @@ -1918,6 +1958,7 @@ "resolved": "https://registry.npmjs.org/@vercel/static-config/-/static-config-2.0.17.tgz", "integrity": "sha512-2f50OTVrN07x7pH+XNW0e7cj7T+Ufg+19+a2N3/XZBjQmV+FaMlmSLiaQ4tBxp2H8lWWHzENua7ZSSQPtRZ3/A==", "dev": true, + "license": "Apache-2.0", "dependencies": { "ajv": "8.6.3", "json-schema-to-ts": "1.6.4", @@ -1929,6 +1970,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.3.tgz", "integrity": "sha512-SMJOdDP6LqTkD0Uq8qLi+gMwSt0imXLSV080qFVwJCpH9U6Mb+SUGHAXM0KNbcBPguytWyvFxcHgMLe2D2XSpw==", "dev": true, + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -1944,148 +1986,199 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@vue/compiler-core": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.3.4.tgz", - "integrity": "sha512-cquyDNvZ6jTbf/+x+AgM2Arrp6G4Dzbb0R64jiG804HRMfRiFXWI6kqUVqZ6ZR0bQhIoQjB4+2bhNtVwndW15g==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.13.tgz", + "integrity": "sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==", + "license": "MIT", "peer": true, "dependencies": { - "@babel/parser": "^7.21.3", - "@vue/shared": "3.3.4", + "@babel/parser": "^7.25.3", + "@vue/shared": "3.5.13", + "entities": "^4.5.0", "estree-walker": "^2.0.2", - "source-map-js": "^1.0.2" + "source-map-js": "^1.2.0" } }, "node_modules/@vue/compiler-core/node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT", "peer": true }, "node_modules/@vue/compiler-dom": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.3.4.tgz", - "integrity": "sha512-wyM+OjOVpuUukIq6p5+nwHYtj9cFroz9cwkfmP9O1nzH68BenTTv0u7/ndggT8cIQlnBeOo6sUT/gvHcIkLA5w==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.13.tgz", + "integrity": "sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA==", + "license": "MIT", "peer": true, "dependencies": { - "@vue/compiler-core": "3.3.4", - "@vue/shared": "3.3.4" + "@vue/compiler-core": "3.5.13", + "@vue/shared": "3.5.13" } }, "node_modules/@vue/compiler-sfc": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.3.4.tgz", - "integrity": "sha512-6y/d8uw+5TkCuzBkgLS0v3lSM3hJDntFEiUORM11pQ/hKvkhSKZrXW6i69UyXlJQisJxuUEJKAWEqWbWsLeNKQ==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.13.tgz", + "integrity": "sha512-6VdaljMpD82w6c2749Zhf5T9u5uLBWKnVue6XWxprDobftnletJ8+oel7sexFfM3qIxNmVE7LSFGTpv6obNyaQ==", + "license": "MIT", "peer": true, "dependencies": { - "@babel/parser": "^7.20.15", - "@vue/compiler-core": "3.3.4", - "@vue/compiler-dom": "3.3.4", - "@vue/compiler-ssr": "3.3.4", - "@vue/reactivity-transform": "3.3.4", - "@vue/shared": "3.3.4", + "@babel/parser": "^7.25.3", + "@vue/compiler-core": "3.5.13", + "@vue/compiler-dom": "3.5.13", + "@vue/compiler-ssr": "3.5.13", + "@vue/shared": "3.5.13", "estree-walker": "^2.0.2", - "magic-string": "^0.30.0", - "postcss": "^8.1.10", - "source-map-js": "^1.0.2" + "magic-string": "^0.30.11", + "postcss": "^8.4.48", + "source-map-js": "^1.2.0" } }, "node_modules/@vue/compiler-sfc/node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT", "peer": true }, - "node_modules/@vue/compiler-ssr": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.3.4.tgz", - "integrity": "sha512-m0v6oKpup2nMSehwA6Uuu+j+wEwcy7QmwMkVNVfrV9P2qE5KshC6RwOCq8fjGS/Eak/uNb8AaWekfiXxbBB6gQ==", + "node_modules/@vue/compiler-sfc/node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", "peer": true, - "dependencies": { - "@vue/compiler-dom": "3.3.4", - "@vue/shared": "3.3.4" + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, - "node_modules/@vue/reactivity": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.3.4.tgz", - "integrity": "sha512-kLTDLwd0B1jG08NBF3R5rqULtv/f8x3rOFByTDz4J53ttIQEDmALqKqXY0J+XQeN0aV2FBxY8nJDf88yvOPAqQ==", + "node_modules/@vue/compiler-sfc/node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC", + "peer": true + }, + "node_modules/@vue/compiler-sfc/node_modules/postcss": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", + "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", "peer": true, "dependencies": { - "@vue/shared": "3.3.4" + "nanoid": "^3.3.8", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" } }, - "node_modules/@vue/reactivity-transform": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/reactivity-transform/-/reactivity-transform-3.3.4.tgz", - "integrity": "sha512-MXgwjako4nu5WFLAjpBnCj/ieqcjE2aJBINUNQzkZQfzIZA4xn+0fV1tIYBJvvva3N3OvKGofRLvQIwEQPpaXw==", + "node_modules/@vue/compiler-ssr": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.13.tgz", + "integrity": "sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA==", + "license": "MIT", "peer": true, "dependencies": { - "@babel/parser": "^7.20.15", - "@vue/compiler-core": "3.3.4", - "@vue/shared": "3.3.4", - "estree-walker": "^2.0.2", - "magic-string": "^0.30.0" + "@vue/compiler-dom": "3.5.13", + "@vue/shared": "3.5.13" } }, - "node_modules/@vue/reactivity-transform/node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "peer": true + "node_modules/@vue/reactivity": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.13.tgz", + "integrity": "sha512-NaCwtw8o48B9I6L1zl2p41OHo/2Z4wqYGGIK1Khu5T7yxrn+ATOixn/Udn2m+6kZKB/J7cuT9DbWWhRxqixACg==", + "license": "MIT", + "peer": true, + "dependencies": { + "@vue/shared": "3.5.13" + } }, "node_modules/@vue/runtime-core": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.3.4.tgz", - "integrity": "sha512-R+bqxMN6pWO7zGI4OMlmvePOdP2c93GsHFM/siJI7O2nxFRzj55pLwkpCedEY+bTMgp5miZ8CxfIZo3S+gFqvA==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.13.tgz", + "integrity": "sha512-Fj4YRQ3Az0WTZw1sFe+QDb0aXCerigEpw418pw1HBUKFtnQHWzwojaukAs2X/c9DQz4MQ4bsXTGlcpGxU/RCIw==", + "license": "MIT", "peer": true, "dependencies": { - "@vue/reactivity": "3.3.4", - "@vue/shared": "3.3.4" + "@vue/reactivity": "3.5.13", + "@vue/shared": "3.5.13" } }, "node_modules/@vue/runtime-dom": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.3.4.tgz", - "integrity": "sha512-Aj5bTJ3u5sFsUckRghsNjVTtxZQ1OyMWCr5dZRAPijF/0Vy4xEoRCwLyHXcj4D0UFbJ4lbx3gPTgg06K/GnPnQ==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.13.tgz", + "integrity": "sha512-dLaj94s93NYLqjLiyFzVs9X6dWhTdAlEAciC3Moq7gzAc13VJUdCnjjRurNM6uTLFATRHexHCTu/Xp3eW6yoog==", + "license": "MIT", "peer": true, "dependencies": { - "@vue/runtime-core": "3.3.4", - "@vue/shared": "3.3.4", - "csstype": "^3.1.1" + "@vue/reactivity": "3.5.13", + "@vue/runtime-core": "3.5.13", + "@vue/shared": "3.5.13", + "csstype": "^3.1.3" } }, "node_modules/@vue/server-renderer": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.3.4.tgz", - "integrity": "sha512-Q6jDDzR23ViIb67v+vM1Dqntu+HUexQcsWKhhQa4ARVzxOY2HbC7QRW/ggkDBd5BU+uM1sV6XOAP0b216o34JQ==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.13.tgz", + "integrity": "sha512-wAi4IRJV/2SAW3htkTlB+dHeRmpTiVIK1OGLWV1yeStVSebSQQOwGwIq0D3ZIoBj2C2qpgz5+vX9iEBkTdk5YA==", + "license": "MIT", "peer": true, "dependencies": { - "@vue/compiler-ssr": "3.3.4", - "@vue/shared": "3.3.4" + "@vue/compiler-ssr": "3.5.13", + "@vue/shared": "3.5.13" }, "peerDependencies": { - "vue": "3.3.4" + "vue": "3.5.13" } }, "node_modules/@vue/shared": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.3.4.tgz", - "integrity": "sha512-7OjdcV8vQ74eiz1TZLzZP4JwqM5fA94K6yntPS5Z25r9HDuGNzaGdgvwKYq6S+MxwF0TFRwe50fIR/MYnakdkQ==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.13.tgz", + "integrity": "sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ==", + "license": "MIT", "peer": true }, "node_modules/abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -2094,10 +2187,14 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, "engines": { "node": ">=0.4.0" } @@ -2107,6 +2204,7 @@ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", "dev": true, + "license": "MIT", "dependencies": { "debug": "4" }, @@ -2118,6 +2216,7 @@ "version": "2.1.34", "resolved": "https://registry.npmjs.org/ai/-/ai-2.1.34.tgz", "integrity": "sha512-gZawUnYhZHJ1PiE+x7iDuy2GQg67AKs0uHgdS8Jw3o/3NouGeJ/5ytyqbgHqczgvoquSpykumR+5TyRieF8x/w==", + "license": "Apache-2.0", "dependencies": { "eventsource-parser": "1.0.0", "nanoid": "3.3.6", @@ -2156,6 +2255,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, + "license": "MIT", "optional": true, "dependencies": { "fast-deep-equal": "^3.1.1", @@ -2173,6 +2273,7 @@ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", "dev": true, + "license": "MIT", "dependencies": { "type-fest": "^0.21.3" }, @@ -2188,6 +2289,7 @@ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -2197,6 +2299,7 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -2212,6 +2315,7 @@ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, + "license": "ISC", "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -2224,13 +2328,16 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/are-we-there-yet": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "deprecated": "This package is no longer supported.", "dev": true, + "license": "ISC", "dependencies": { "delegates": "^1.0.0", "readable-stream": "^3.6.0" @@ -2240,34 +2347,45 @@ } }, "node_modules/arg": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", - "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" }, "node_modules/argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/aria-query": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", - "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "license": "Apache-2.0", "peer": true, - "dependencies": { - "dequal": "^2.0.3" + "engines": { + "node": ">= 0.4" } }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, "node_modules/async-listen": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/async-listen/-/async-listen-3.0.1.tgz", "integrity": "sha512-cWMaNwUJnf37C/S5TfCkk/15MwbPRwVYALA2jtjkbHjCmAPiDXyNJy2q3p1KAZzDLHAWyarUWSujUoHR4pEgrA==", "dev": true, + "license": "MIT", "engines": { "node": ">= 14" } @@ -2276,33 +2394,38 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/async-sema/-/async-sema-3.1.1.tgz", "integrity": "sha512-tLRNUXati5MFePdAk8dw7Qt7DpxPB60ofAgn8WRhW6a2rcimZnYBP9oxHiv0OHy+Wz7kPMG+t4LGdt31+4EmGg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/axios": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.2.tgz", - "integrity": "sha512-7i24Ri4pmDRfJTR7LDBhsOTtcm+9kjX5WiY1X3wIisx6G9So3pfMkEiU7emUBe46oceVImccTEM3k6C5dbVW8A==", + "version": "1.8.4", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz", + "integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==", "dev": true, + "license": "MIT", "dependencies": { - "follow-redirects": "^1.15.0", + "follow-redirects": "^1.15.6", "form-data": "^4.0.0", "proxy-from-env": "^1.1.0" } }, "node_modules/axios/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", + "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", "dev": true, + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", "mime-types": "^2.1.12" }, "engines": { @@ -2310,12 +2433,13 @@ } }, "node_modules/axobject-query": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz", - "integrity": "sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "license": "Apache-2.0", "peer": true, - "dependencies": { - "dequal": "^2.0.3" + "engines": { + "node": ">= 0.4" } }, "node_modules/babel-jest": { @@ -2323,6 +2447,7 @@ "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", "dev": true, + "license": "MIT", "dependencies": { "@jest/transform": "^29.7.0", "@types/babel__core": "^7.1.14", @@ -2344,6 +2469,7 @@ "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", @@ -2360,6 +2486,7 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@babel/core": "^7.12.3", "@babel/parser": "^7.14.7", @@ -2371,20 +2498,12 @@ "node": ">=8" } }, - "node_modules/babel-plugin-istanbul/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/babel-plugin-jest-hoist": { "version": "29.6.3", "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/template": "^7.3.3", "@babel/types": "^7.3.3", @@ -2396,23 +2515,27 @@ } }, "node_modules/babel-preset-current-node-syntax": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", - "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz", + "integrity": "sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-bigint": "^7.8.3", - "@babel/plugin-syntax-class-properties": "^7.8.3", - "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.8.3" + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" }, "peerDependencies": { "@babel/core": "^7.0.0" @@ -2423,6 +2546,7 @@ "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", "dev": true, + "license": "MIT", "dependencies": { "babel-plugin-jest-hoist": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0" @@ -2438,13 +2562,15 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/bindings": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", "dev": true, + "license": "MIT", "dependencies": { "file-uri-to-path": "1.0.0" } @@ -2453,34 +2579,37 @@ "version": "3.7.2", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, + "license": "MIT", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.21.10", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", - "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", + "version": "4.24.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", "dev": true, "funding": [ { @@ -2496,11 +2625,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001517", - "electron-to-chromium": "^1.4.477", - "node-releases": "^2.0.13", - "update-browserslist-db": "^1.0.11" + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" }, "bin": { "browserslist": "cli.js" @@ -2514,6 +2644,7 @@ "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", "dev": true, + "license": "MIT", "dependencies": { "fast-json-stable-stringify": "2.x" }, @@ -2526,6 +2657,7 @@ "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", "dev": true, + "license": "Apache-2.0", "dependencies": { "node-int64": "^0.4.0" } @@ -2534,7 +2666,8 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/busboy": { "version": "1.6.0", @@ -2547,11 +2680,26 @@ "node": ">=10.16.0" } }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -2561,14 +2709,15 @@ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/caniuse-lite": { - "version": "1.0.30001617", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001617.tgz", - "integrity": "sha512-mLyjzNI9I+Pix8zwcrpxEbGlfqOkF9kM3ptzmKNw5tizSyYwMe+nGLTqMK9cO+0E+Bh6TsBxNAaHWEM8xwSsmA==", + "version": "1.0.30001715", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001715.tgz", + "integrity": "sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw==", "funding": [ { "type": "opencollective", @@ -2582,13 +2731,15 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2605,6 +2756,7 @@ "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" } @@ -2614,6 +2766,7 @@ "resolved": "https://registry.npmjs.org/check-more-types/-/check-more-types-2.24.0.tgz", "integrity": "sha512-Pj779qHxV2tuapviy1bSZNEL1maXr13bPYpsvSDB68HlYcYuhlDrmGd63i0JHMCLKzc7rUSNIrpdJlhVlNwrxA==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.8.0" } @@ -2623,14 +2776,15 @@ "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", "dev": true, + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", "dev": true, "funding": [ { @@ -2638,26 +2792,30 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/cjs-module-lexer": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", - "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", - "dev": true + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", "dev": true, + "license": "ISC", "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", @@ -2672,6 +2830,7 @@ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", "dev": true, + "license": "MIT", "engines": { "iojs": ">= 1.0.0", "node": ">= 0.12.0" @@ -2681,12 +2840,14 @@ "version": "10.1.1", "resolved": "https://registry.npmjs.org/code-block-writer/-/code-block-writer-10.1.1.tgz", "integrity": "sha512-67ueh2IRGst/51p0n6FvPrnRjAGHY5F8xdjkgrYE7DDzpJe6qA07RYQ9VcoUeo5ATOjSOiWpSL3SWBRRbempMw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/code-red": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/code-red/-/code-red-1.0.4.tgz", "integrity": "sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==", + "license": "MIT", "peer": true, "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.15", @@ -2700,13 +2861,15 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -2718,13 +2881,15 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/color-support": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", "dev": true, + "license": "ISC", "bin": { "color-support": "bin.js" } @@ -2734,6 +2899,7 @@ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", "dev": true, + "license": "MIT", "dependencies": { "delayed-stream": "~1.0.0" }, @@ -2745,19 +2911,22 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/console-control-strings": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/content-type": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -2767,6 +2936,7 @@ "resolved": "https://registry.npmjs.org/convert-hrtime/-/convert-hrtime-3.0.0.tgz", "integrity": "sha512-7V+KqSvMiHp8yWDuwfww06XleMWVVB9b9tURBx+G7UTADuo5hYPuowKloz4OzOqbPezxgo+fdQ1522WzPG4OeA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -2775,13 +2945,15 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/create-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "chalk": "^4.0.0", @@ -2802,13 +2974,15 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, + "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -2822,6 +2996,7 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "license": "MIT", "peer": true, "dependencies": { "mdn-data": "2.0.30", @@ -2832,17 +3007,19 @@ } }, "node_modules/csstype": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "license": "MIT" }, "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", "dev": true, + "license": "MIT", "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -2854,10 +3031,11 @@ } }, "node_modules/dedent": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", - "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz", + "integrity": "sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==", "dev": true, + "license": "MIT", "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, @@ -2872,6 +3050,7 @@ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -2881,6 +3060,7 @@ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.4.0" } @@ -2889,21 +3069,24 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/dequal": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=8" } @@ -2913,6 +3096,7 @@ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -2922,6 +3106,7 @@ "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } @@ -2931,21 +3116,39 @@ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/duplexer": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/edge-runtime": { - "version": "2.5.9", - "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.9.tgz", - "integrity": "sha512-pk+k0oK0PVXdlT4oRp4lwh+unuKB7Ng4iZ2HB+EZ7QCEQizX360Rp/F4aRpgpRgdP2ufB35N+1KppHmYjqIGSg==", + "version": "2.5.10", + "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.10.tgz", + "integrity": "sha512-oe6JjFbU1MbISzeSBMHqmzBhNEwmy2AYDY0LxStl8FAIWSGdGO+CqzWub9nbgmANuJYPXZA0v3XAlbxeKV/Omw==", "dev": true, + "license": "MPL-2.0", "dependencies": { "@edge-runtime/format": "2.2.1", "@edge-runtime/ponyfill": "2.4.2", @@ -2964,17 +3167,35 @@ "node": ">=16" } }, + "node_modules/ejs": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz", + "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "jake": "^10.8.5" + }, + "bin": { + "ejs": "bin/cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/electron-to-chromium": { - "version": "1.4.509", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.509.tgz", - "integrity": "sha512-G5KlSWY0zzhANtX15tkikHl4WB7zil2Y65oT52EZUL194abjUXBZym12Ht7Bhuwm/G3LJFEqMADyv2Cks56dmg==", - "dev": true + "version": "1.5.139", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.139.tgz", + "integrity": "sha512-GGnRYOTdN5LYpwbIr0rwP/ZHOQSvAF6TG0LSzp28uCBb9JiXHJGmaaKw29qjNJc5bGnnp6kXJqRnGMQoELwi5w==", + "dev": true, + "license": "ISC" }, "node_modules/emittery": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -2986,23 +3207,88 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true + "dev": true, + "license": "MIT" + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", "dev": true, + "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" } }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/esbuild": { "version": "0.14.47", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.14.47.tgz", "integrity": "sha512-wI4ZiIfFxpkuxB8ju4MHrGwGLyp1+awEHAHVpx6w7a+1pmYIq8T9FGEVVwFo0iFierDoMj++Xq69GXWYn2EiwA==", "dev": true, "hasInstallScript": true, + "license": "MIT", "bin": { "esbuild": "bin/esbuild" }, @@ -3040,6 +3326,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "android" @@ -3056,6 +3343,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "android" @@ -3072,6 +3360,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -3088,6 +3377,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -3104,6 +3394,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "freebsd" @@ -3120,6 +3411,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "freebsd" @@ -3136,6 +3428,7 @@ "ia32" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3152,6 +3445,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3168,6 +3462,7 @@ "arm" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3184,6 +3479,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3200,6 +3496,7 @@ "mips64el" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3216,6 +3513,7 @@ "ppc64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3232,6 +3530,7 @@ "riscv64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3248,6 +3547,7 @@ "s390x" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -3264,6 +3564,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "netbsd" @@ -3280,6 +3581,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "openbsd" @@ -3296,6 +3598,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "sunos" @@ -3312,6 +3615,7 @@ "ia32" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -3328,6 +3632,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -3344,6 +3649,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -3353,10 +3659,11 @@ } }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -3366,6 +3673,7 @@ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -3375,6 +3683,7 @@ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, + "license": "BSD-2-Clause", "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -3387,6 +3696,7 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", "peer": true, "dependencies": { "@types/estree": "^1.0.0" @@ -3397,6 +3707,7 @@ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -3406,6 +3717,7 @@ "resolved": "https://registry.npmjs.org/event-stream/-/event-stream-3.3.4.tgz", "integrity": "sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==", "dev": true, + "license": "MIT", "dependencies": { "duplexer": "~0.1.1", "from": "~0", @@ -3420,6 +3732,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-1.0.0.tgz", "integrity": "sha512-9jgfSCa3dmEme2ES3mPByGXfgZ87VbP97tng1G2nWwWx6bV2nYxm2AWCrbQjXToSe+yYlqaZNtxffR9IeQr95g==", + "license": "MIT", "engines": { "node": ">=14.18" } @@ -3429,6 +3742,7 @@ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, + "license": "MIT", "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", @@ -3451,7 +3765,8 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/exit": { "version": "0.1.2", @@ -3467,6 +3782,7 @@ "resolved": "https://registry.npmjs.org/exit-hook/-/exit-hook-2.2.1.tgz", "integrity": "sha512-eNTPlAD67BmP31LDINZ3U7HSF8l57TxOY2PmBJ1shpCvpnxBF93mWCE8YHBnXs8qiUZJc9WDcWIeC3a2HIAMfw==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" }, @@ -3479,6 +3795,7 @@ "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/expect-utils": "^29.7.0", "jest-get-type": "^29.6.3", @@ -3494,19 +3811,21 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/fast-glob": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", - "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", "dev": true, + "license": "MIT", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", - "micromatch": "^4.0.4" + "micromatch": "^4.0.8" }, "engines": { "node": ">=8.6.0" @@ -3516,22 +3835,25 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/fastest-levenshtein": { "version": "1.0.16", "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 4.9.1" } }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", "dev": true, + "license": "ISC", "dependencies": { "reusify": "^1.0.4" } @@ -3541,6 +3863,7 @@ "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", "dev": true, + "license": "Apache-2.0", "dependencies": { "bser": "2.1.1" } @@ -3549,13 +3872,48 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "dev": true + "dev": true, + "license": "MIT" + }, + "node_modules/filelist": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz", + "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "minimatch": "^5.0.1" + } + }, + "node_modules/filelist/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/filelist/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, + "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -3568,6 +3926,7 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, + "license": "MIT", "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -3577,9 +3936,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", - "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", "dev": true, "funding": [ { @@ -3587,6 +3946,7 @@ "url": "https://github.com/sponsors/RubenVerborgh" } ], + "license": "MIT", "engines": { "node": ">=4.0" }, @@ -3597,14 +3957,16 @@ } }, "node_modules/form-data": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", - "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.3.tgz", + "integrity": "sha512-q5YBMeWy6E2Un0nMGWMgI65MAKtaylxfNJGJxpGh45YDciZB4epbWpaAfImil6CPAPTYB4sh0URQNDRIZG5F2w==", "dev": true, + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" + "es-set-tostringtag": "^2.1.0", + "mime-types": "^2.1.35" }, "engines": { "node": ">= 6" @@ -3614,13 +3976,15 @@ "version": "0.1.7", "resolved": "https://registry.npmjs.org/from/-/from-0.1.7.tgz", "integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/fs-extra": { "version": "11.1.0", "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.0.tgz", "integrity": "sha512-0rcTq621PD5jM/e0a3EJoGC/1TC5ZBCERW82LQuwfGnCa1V8w7dpYH1yNu+SLb6E5dkeCBzKEyLGlFrnr+dUyw==", "dev": true, + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", @@ -3635,6 +3999,7 @@ "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", "dev": true, + "license": "ISC", "dependencies": { "minipass": "^3.0.0" }, @@ -3647,6 +4012,7 @@ "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", "dev": true, + "license": "ISC", "dependencies": { "yallist": "^4.0.0" }, @@ -3654,11 +4020,19 @@ "node": ">=8" } }, + "node_modules/fs-minipass/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/fsevents": { "version": "2.3.3", @@ -3666,6 +4040,7 @@ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -3679,6 +4054,7 @@ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -3687,7 +4063,9 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz", "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "deprecated": "This package is no longer supported.", "dev": true, + "license": "ISC", "dependencies": { "aproba": "^1.0.3 || ^2.0.0", "color-support": "^1.1.2", @@ -3707,13 +4085,15 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -3723,24 +4103,66 @@ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", "dev": true, + "license": "ISC", "engines": { "node": "6.* || 8.* || >= 10.*" } }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/get-package-type": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=8.0.0" } }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/get-stream": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -3752,7 +4174,9 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -3773,6 +4197,7 @@ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, + "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -3785,35 +4210,82 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/has-unicode": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/hasown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", - "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, + "license": "MIT", "dependencies": { "function-bind": "^1.1.2" }, @@ -3825,13 +4297,15 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/https-proxy-agent": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", "dev": true, + "license": "MIT", "dependencies": { "agent-base": "6", "debug": "4" @@ -3845,15 +4319,17 @@ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=10.17.0" } }, "node_modules/import-local": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", - "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", "dev": true, + "license": "MIT", "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" @@ -3873,6 +4349,7 @@ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.8.19" } @@ -3881,7 +4358,9 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, + "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -3891,21 +4370,27 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/is-core-module": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", - "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dev": true, + "license": "MIT", "dependencies": { - "hasown": "^2.0.0" + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -3916,6 +4401,7 @@ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -3925,6 +4411,7 @@ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -3934,6 +4421,7 @@ "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -3943,6 +4431,7 @@ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, + "license": "MIT", "dependencies": { "is-extglob": "^2.1.1" }, @@ -3955,17 +4444,19 @@ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.12.0" } }, "node_modules/is-reference": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.1.tgz", - "integrity": "sha512-baJJdQLiYaJdvFbJqXrcGv3WU3QCzBlUcI5QhbesIm6/xPsvmO+2CDoi/GMOFBQEQm+PXkwOPrp9KK5ozZsp2w==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", + "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", + "license": "MIT", "peer": true, "dependencies": { - "@types/estree": "*" + "@types/estree": "^1.0.6" } }, "node_modules/is-stream": { @@ -3973,6 +4464,7 @@ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" }, @@ -3984,26 +4476,29 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/istanbul-lib-coverage": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", - "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=8" } }, "node_modules/istanbul-lib-instrument": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz", - "integrity": "sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" }, @@ -4012,13 +4507,11 @@ } }, "node_modules/istanbul-lib-instrument/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, + "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -4031,40 +4524,11 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "istanbul-lib-coverage": "^3.0.0", "make-dir": "^4.0.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-lib-report/node_modules/make-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", - "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", - "dev": true, - "dependencies": { - "semver": "^7.5.3" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/istanbul-lib-report/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" + "supports-color": "^7.1.0" }, "engines": { "node": ">=10" @@ -4075,6 +4539,7 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "debug": "^4.1.1", "istanbul-lib-coverage": "^3.0.0", @@ -4085,10 +4550,11 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", - "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" @@ -4097,11 +4563,31 @@ "node": ">=8" } }, + "node_modules/jake": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.2.tgz", + "integrity": "sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "async": "^3.2.3", + "chalk": "^4.0.2", + "filelist": "^1.0.4", + "minimatch": "^3.1.2" + }, + "bin": { + "jake": "bin/cli.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/core": "^29.7.0", "@jest/types": "^29.6.3", @@ -4128,6 +4614,7 @@ "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", "dev": true, + "license": "MIT", "dependencies": { "execa": "^5.0.0", "jest-util": "^29.7.0", @@ -4142,6 +4629,7 @@ "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/expect": "^29.7.0", @@ -4173,6 +4661,7 @@ "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", "dev": true, + "license": "MIT", "dependencies": { "@jest/core": "^29.7.0", "@jest/test-result": "^29.7.0", @@ -4206,6 +4695,7 @@ "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@jest/test-sequencer": "^29.7.0", @@ -4251,6 +4741,7 @@ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", "diff-sequences": "^29.6.3", @@ -4266,6 +4757,7 @@ "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", "dev": true, + "license": "MIT", "dependencies": { "detect-newline": "^3.0.0" }, @@ -4278,6 +4770,7 @@ "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "chalk": "^4.0.0", @@ -4294,6 +4787,7 @@ "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/fake-timers": "^29.7.0", @@ -4311,6 +4805,7 @@ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -4320,6 +4815,7 @@ "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/graceful-fs": "^4.1.3", @@ -4345,6 +4841,7 @@ "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", "dev": true, + "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3", "pretty-format": "^29.7.0" @@ -4358,6 +4855,7 @@ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", "jest-diff": "^29.7.0", @@ -4373,6 +4871,7 @@ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", "dev": true, + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.12.13", "@jest/types": "^29.6.3", @@ -4393,6 +4892,7 @@ "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -4407,6 +4907,7 @@ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" }, @@ -4424,6 +4925,7 @@ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -4433,6 +4935,7 @@ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", "graceful-fs": "^4.2.9", @@ -4453,6 +4956,7 @@ "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", "dev": true, + "license": "MIT", "dependencies": { "jest-regex-util": "^29.6.3", "jest-snapshot": "^29.7.0" @@ -4466,6 +4970,7 @@ "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/environment": "^29.7.0", @@ -4498,6 +5003,7 @@ "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/fake-timers": "^29.7.0", @@ -4531,6 +5037,7 @@ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@babel/generator": "^7.7.2", @@ -4558,13 +5065,11 @@ } }, "node_modules/jest-snapshot/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, + "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -4577,6 +5082,7 @@ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -4594,6 +5100,7 @@ "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "camelcase": "^6.2.0", @@ -4611,6 +5118,7 @@ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -4623,6 +5131,7 @@ "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", "dev": true, + "license": "MIT", "dependencies": { "@jest/test-result": "^29.7.0", "@jest/types": "^29.6.3", @@ -4642,6 +5151,7 @@ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", "jest-util": "^29.7.0", @@ -4657,6 +5167,7 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -4668,14 +5179,15 @@ } }, "node_modules/joi": { - "version": "17.11.0", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz", - "integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==", + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { - "@hapi/hoek": "^9.0.0", - "@hapi/topo": "^5.0.0", - "@sideway/address": "^4.1.3", + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", "@sideway/formula": "^3.0.1", "@sideway/pinpoint": "^2.0.0" } @@ -4683,13 +5195,15 @@ "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" }, "node_modules/js-yaml": { "version": "3.14.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -4699,28 +5213,31 @@ } }, "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true, + "license": "MIT", "bin": { "jsesc": "bin/jsesc" }, "engines": { - "node": ">=4" + "node": ">=6" } }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/json-schema-to-ts": { "version": "1.6.4", "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-1.6.4.tgz", "integrity": "sha512-pR4yQ9DHz6itqswtHCm26mw45FSNfQ9rEQjosaZErhn5J3J2sIViQiz8rDaezjKAhFGpmsoczYVBgGHzFw/stA==", "dev": true, + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.6", "ts-toolbelt": "^6.15.5" @@ -4731,6 +5248,7 @@ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true, + "license": "MIT", "optional": true }, "node_modules/json5": { @@ -4738,6 +5256,7 @@ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, + "license": "MIT", "bin": { "json5": "lib/cli.js" }, @@ -4750,6 +5269,7 @@ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", "dev": true, + "license": "MIT", "dependencies": { "universalify": "^2.0.0" }, @@ -4762,6 +5282,7 @@ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -4771,6 +5292,7 @@ "resolved": "https://registry.npmjs.org/lazy-ass/-/lazy-ass-1.6.0.tgz", "integrity": "sha512-cc8oEVoctTvsFZ/Oje/kGnHbpWHYBe8IAJe4C0QNc3t8uM/0Y8+erSz/7Y1ALuXTEZTMvxXwO6YbX1ey3ujiZw==", "dev": true, + "license": "MIT", "engines": { "node": "> 0.8" } @@ -4780,6 +5302,7 @@ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -4788,12 +5311,14 @@ "version": "1.2.4", "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/locate-character": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==", + "license": "MIT", "peer": true }, "node_modules/locate-path": { @@ -4801,6 +5326,7 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, + "license": "MIT", "dependencies": { "p-locate": "^4.1.0" }, @@ -4812,18 +5338,21 @@ "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/lodash.memoize": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, @@ -4832,55 +5361,67 @@ } }, "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, + "license": "ISC", "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" + "yallist": "^3.0.2" } }, "node_modules/magic-string": { - "version": "0.30.3", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.3.tgz", - "integrity": "sha512-B7xGbll2fG/VjP+SWg4sX3JynwIU0mjoTc6MPpKNuIvftk6u6vqhDnk1R80b8C2GBR6ywqy+1DcKBrevBg+bmw==", + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "license": "MIT", "peer": true, "dependencies": { - "@jridgewell/sourcemap-codec": "^1.4.15" - }, - "engines": { - "node": ">=12" + "@jridgewell/sourcemap-codec": "^1.5.0" } }, "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, + "license": "MIT", "dependencies": { - "semver": "^6.0.0" + "semver": "^7.5.3" }, "engines": { - "node": ">=8" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/make-error": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/makeerror": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "tmpl": "1.0.5" } @@ -4891,34 +5432,48 @@ "integrity": "sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==", "dev": true }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/mdn-data": { "version": "2.0.30", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "license": "CC0-1.0", "peer": true }, "node_modules/merge-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dev": true, + "license": "MIT", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -4930,6 +5485,7 @@ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -4939,6 +5495,7 @@ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dev": true, + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -4951,6 +5508,7 @@ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -4960,6 +5518,7 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, + "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -4972,6 +5531,7 @@ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", "dev": true, + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -4981,6 +5541,7 @@ "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", "dev": true, + "license": "ISC", "engines": { "node": ">=8" } @@ -4990,6 +5551,7 @@ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", "dev": true, + "license": "MIT", "dependencies": { "minipass": "^3.0.0", "yallist": "^4.0.0" @@ -5003,6 +5565,7 @@ "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", "dev": true, + "license": "ISC", "dependencies": { "yallist": "^4.0.0" }, @@ -5010,11 +5573,19 @@ "node": ">=8" } }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "dev": true, + "license": "MIT", "bin": { "mkdirp": "bin/cmd.js" }, @@ -5027,15 +5598,17 @@ "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" }, "node_modules/nanoid": { "version": "3.3.6", @@ -5047,6 +5620,7 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -5058,15 +5632,17 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/next": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", - "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", "dependencies": { - "@next/env": "14.1.1", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "graceful-fs": "^4.2.11", @@ -5080,18 +5656,19 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.1", - "@next/swc-darwin-x64": "14.1.1", - "@next/swc-linux-arm64-gnu": "14.1.1", - "@next/swc-linux-arm64-musl": "14.1.1", - "@next/swc-linux-x64-gnu": "14.1.1", - "@next/swc-linux-x64-musl": "14.1.1", - "@next/swc-win32-arm64-msvc": "14.1.1", - "@next/swc-win32-ia32-msvc": "14.1.1", - "@next/swc-win32-x64-msvc": "14.1.1" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5100,6 +5677,9 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } @@ -5110,6 +5690,7 @@ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", "dev": true, + "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -5126,10 +5707,11 @@ } }, "node_modules/node-gyp-build": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.1.tgz", - "integrity": "sha512-24vnklJmyRS8ViBNI8KbtK/r/DmXQMRiOMXTNz2nrTnAYUwjmEEbnnpB/+kt+yWRv73bPsSPRFddrcIbAxSiMQ==", + "version": "4.8.4", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.4.tgz", + "integrity": "sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==", "dev": true, + "license": "MIT", "bin": { "node-gyp-build": "bin.js", "node-gyp-build-optional": "optional.js", @@ -5140,19 +5722,22 @@ "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/node-releases": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", - "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", - "dev": true + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true, + "license": "MIT" }, "node_modules/nopt": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", "dev": true, + "license": "ISC", "dependencies": { "abbrev": "1" }, @@ -5168,6 +5753,7 @@ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -5177,6 +5763,7 @@ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, + "license": "MIT", "dependencies": { "path-key": "^3.0.0" }, @@ -5188,7 +5775,9 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz", "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "deprecated": "This package is no longer supported.", "dev": true, + "license": "ISC", "dependencies": { "are-we-there-yet": "^2.0.0", "console-control-strings": "^1.1.0", @@ -5201,6 +5790,7 @@ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -5210,6 +5800,7 @@ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, + "license": "ISC", "dependencies": { "wrappy": "1" } @@ -5219,6 +5810,7 @@ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, + "license": "MIT", "dependencies": { "mimic-fn": "^2.1.0" }, @@ -5234,6 +5826,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -5249,6 +5842,7 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, + "license": "MIT", "dependencies": { "p-limit": "^2.2.0" }, @@ -5261,6 +5855,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, + "license": "MIT", "dependencies": { "p-try": "^2.0.0" }, @@ -5276,6 +5871,7 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -5285,6 +5881,7 @@ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", @@ -5303,6 +5900,7 @@ "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-2.1.0.tgz", "integrity": "sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -5311,13 +5909,15 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -5327,6 +5927,7 @@ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -5336,6 +5937,7 @@ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -5344,19 +5946,25 @@ "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/path-to-regexp": { "version": "6.2.1", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/pause-stream": { "version": "0.0.11", "resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz", "integrity": "sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==", "dev": true, + "license": [ + "MIT", + "Apache2" + ], "dependencies": { "through": "~2.3" } @@ -5365,6 +5973,7 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "license": "MIT", "peer": true, "dependencies": { "@types/estree": "^1.0.0", @@ -5375,13 +5984,15 @@ "node_modules/picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8.6" }, @@ -5390,10 +6001,11 @@ } }, "node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", "dev": true, + "license": "MIT", "engines": { "node": ">= 6" } @@ -5403,6 +6015,7 @@ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, + "license": "MIT", "dependencies": { "find-up": "^4.0.0" }, @@ -5428,6 +6041,7 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", @@ -5442,6 +6056,7 @@ "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" }, @@ -5454,6 +6069,7 @@ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", @@ -5468,6 +6084,7 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -5480,6 +6097,7 @@ "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-7.0.1.tgz", "integrity": "sha512-973driJZvxiGOQ5ONsFhOF/DtzPMOMtgC11kCpUrPGMTgqp2q/1gwzCquocrN33is0VZ5GFHXZYMM9l6h67v2Q==", "dev": true, + "license": "MIT", "dependencies": { "parse-ms": "^2.1.0" }, @@ -5495,6 +6113,7 @@ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", "dev": true, + "license": "MIT", "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" @@ -5507,13 +6126,15 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/ps-tree": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/ps-tree/-/ps-tree-1.2.0.tgz", "integrity": "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA==", "dev": true, + "license": "MIT", "dependencies": { "event-stream": "=3.3.4" }, @@ -5525,18 +6146,19 @@ } }, "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/pure-rand": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz", - "integrity": "sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", "dev": true, "funding": [ { @@ -5547,7 +6169,8 @@ "type": "opencollective", "url": "https://opencollective.com/fast-check" } - ] + ], + "license": "MIT" }, "node_modules/queue-microtask": { "version": "1.2.3", @@ -5567,12 +6190,14 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/react": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0" }, @@ -5584,6 +6209,7 @@ "version": "18.2.0", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.0" @@ -5593,16 +6219,18 @@ } }, "node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", - "dev": true + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "dev": true, + "license": "MIT", "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -5616,13 +6244,15 @@ "version": "0.13.11", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -5632,23 +6262,28 @@ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", "dev": true, + "license": "MIT", "dependencies": { - "is-core-module": "^2.13.0", + "is-core-module": "^2.16.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -5658,6 +6293,7 @@ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, + "license": "MIT", "dependencies": { "resolve-from": "^5.0.0" }, @@ -5670,24 +6306,27 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/resolve.exports": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", - "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", "dev": true, + "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -5697,7 +6336,9 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -5727,15 +6368,17 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "dependencies": { "queue-microtask": "^1.2.2" } }, "node_modules/rxjs": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", - "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", "dev": true, + "license": "Apache-2.0", "dependencies": { "tslib": "^2.1.0" } @@ -5758,45 +6401,64 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/scheduler": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", - "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0" } }, "node_modules/semver": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.1.1.tgz", - "integrity": "sha512-rWYq2e5iYW+fFe/oPPtYJxYgjBm8sC4rmoGdUOgBB7VnwKt6HrL793l2voH1UlsyYZpJ4g0wfjnTEO1s1NP2eQ==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, + "license": "ISC", "bin": { - "semver": "bin/semver" + "semver": "bin/semver.js" } }, "node_modules/seroval": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/seroval/-/seroval-0.5.1.tgz", - "integrity": "sha512-ZfhQVB59hmIauJG5Ydynupy8KHyr5imGNtdDhbZG68Ufh1Ynkv9KOYOAABf71oVbQxJ8VkWnMHAjEHE7fWkH5g==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.2.1.tgz", + "integrity": "sha512-yBxFFs3zmkvKNmR0pFSU//rIsYjuX418TnlDmc2weaq5XFDqDIV/NOMPBoLrbxjLH42p4UzRuXHryXh9dYcKcw==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/seroval-plugins": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.2.1.tgz", + "integrity": "sha512-H5vs53+39+x4Udwp4J5rNZfgFuA+Lt+uU+09w1gYBVWomtAl98B+E9w7yC05Xc81/HgLvJdlyqJbU0fJCKCmdw==", + "license": "MIT", "peer": true, "engines": { "node": ">=10" + }, + "peerDependencies": { + "seroval": "^1.0" } }, "node_modules/set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, + "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -5809,6 +6471,7 @@ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -5818,6 +6481,7 @@ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.0.2.tgz", "integrity": "sha512-MY2/qGx4enyjprQnFaZsHib3Yadh3IXyV2C321GY0pjGfVBu4un0uDJkwgdxqO+Rdx8JMT8IfJIRwbYVz3Ob3Q==", "dev": true, + "license": "ISC", "engines": { "node": ">=14" }, @@ -5829,31 +6493,36 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/solid-js": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/solid-js/-/solid-js-1.7.11.tgz", - "integrity": "sha512-JkuvsHt8jqy7USsy9xJtT18aF9r2pFO+GB8JQ2XGTvtF49rGTObB46iebD25sE3qVNvIbwglXOXdALnJq9IHtQ==", + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/solid-js/-/solid-js-1.9.5.tgz", + "integrity": "sha512-ogI3DaFcyn6UhYhrgcyRAMbu/buBJitYQASZz5WzfQVPP10RD2AbCoRZ517psnezrasyCbWzIxZ6kVqet768xw==", + "license": "MIT", "peer": true, "dependencies": { "csstype": "^3.1.0", - "seroval": "^0.5.0" + "seroval": "^1.1.0", + "seroval-plugins": "^1.1.0" } }, "node_modules/solid-swr-store": { "version": "0.10.7", "resolved": "https://registry.npmjs.org/solid-swr-store/-/solid-swr-store-0.10.7.tgz", "integrity": "sha512-A6d68aJmRP471aWqKKPE2tpgOiR5fH4qXQNfKIec+Vap+MGQm3tvXlT8n0I8UgJSlNAsSAUuw2VTviH2h3Vv5g==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -5867,14 +6536,16 @@ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } @@ -5884,6 +6555,7 @@ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, + "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -5894,6 +6566,7 @@ "resolved": "https://registry.npmjs.org/split/-/split-0.3.3.tgz", "integrity": "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA==", "dev": true, + "license": "MIT", "dependencies": { "through": "2" }, @@ -5905,12 +6578,14 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/sswr": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/sswr/-/sswr-2.0.0.tgz", "integrity": "sha512-mV0kkeBHcjcb0M5NqKtKVg/uTIYNlIIniyDfSGrSfxpEdM9C365jK0z55pl9K0xAkNTJi2OAOVFQpgMPUk+V0w==", + "license": "MIT", "dependencies": { "swrev": "^4.0.0" }, @@ -5923,6 +6598,7 @@ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", "dev": true, + "license": "MIT", "dependencies": { "escape-string-regexp": "^2.0.0" }, @@ -5931,19 +6607,20 @@ } }, "node_modules/start-server-and-test": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/start-server-and-test/-/start-server-and-test-2.0.3.tgz", - "integrity": "sha512-QsVObjfjFZKJE6CS6bSKNwWZCKBG6975/jKRPPGFfFh+yOQglSeGXiNWjzgQNXdphcBI9nXbyso9tPfX4YAUhg==", + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/start-server-and-test/-/start-server-and-test-2.0.11.tgz", + "integrity": "sha512-TN39gLzPhHAflxyOkE/oMfQGj+pj3JgF6qVicFH/JrXt7xXktidKXwqfRga+ve7lVA8+RgPZVc25VrEPRScaDw==", "dev": true, + "license": "MIT", "dependencies": { "arg": "^5.0.2", "bluebird": "3.7.2", "check-more-types": "2.24.0", - "debug": "4.3.4", + "debug": "4.4.0", "execa": "5.1.1", "lazy-ass": "1.6.0", "ps-tree": "1.2.0", - "wait-on": "7.2.0" + "wait-on": "8.0.3" }, "bin": { "server-test": "src/bin/start.js", @@ -5954,17 +6631,12 @@ "node": ">=16" } }, - "node_modules/start-server-and-test/node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "dev": true - }, "node_modules/stream-combiner": { "version": "0.0.4", "resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.0.4.tgz", "integrity": "sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==", "dev": true, + "license": "MIT", "dependencies": { "duplexer": "~0.1.1" } @@ -5982,6 +6654,7 @@ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "dev": true, + "license": "MIT", "dependencies": { "safe-buffer": "~5.2.0" } @@ -5991,6 +6664,7 @@ "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", "dev": true, + "license": "MIT", "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" @@ -6004,6 +6678,7 @@ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dev": true, + "license": "MIT", "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -6018,6 +6693,7 @@ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, + "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -6030,6 +6706,7 @@ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -6039,6 +6716,7 @@ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -6048,6 +6726,7 @@ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" }, @@ -6059,6 +6738,7 @@ "version": "5.1.1", "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", "dependencies": { "client-only": "0.0.1" }, @@ -6082,6 +6762,7 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -6094,6 +6775,7 @@ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -6102,23 +6784,25 @@ } }, "node_modules/svelte": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/svelte/-/svelte-4.2.0.tgz", - "integrity": "sha512-kVsdPjDbLrv74SmLSUzAsBGquMs4MPgWGkGLpH+PjOYnFOziAvENVzgJmyOCV2gntxE32aNm8/sqNKD6LbIpeQ==", + "version": "4.2.19", + "resolved": "https://registry.npmjs.org/svelte/-/svelte-4.2.19.tgz", + "integrity": "sha512-IY1rnGr6izd10B0A8LqsBfmlT5OILVuZ7XsI0vdGPEvuonFV7NYEUK4dAkm9Zg2q0Um92kYjTpS1CAP3Nh/KWw==", + "license": "MIT", "peer": true, "dependencies": { "@ampproject/remapping": "^2.2.1", "@jridgewell/sourcemap-codec": "^1.4.15", "@jridgewell/trace-mapping": "^0.3.18", + "@types/estree": "^1.0.1", "acorn": "^8.9.0", "aria-query": "^5.3.0", - "axobject-query": "^3.2.1", + "axobject-query": "^4.0.0", "code-red": "^1.0.3", "css-tree": "^2.3.1", "estree-walker": "^3.0.3", "is-reference": "^3.0.1", "locate-character": "^3.0.0", - "magic-string": "^0.30.0", + "magic-string": "^0.30.4", "periscopic": "^3.1.0" }, "engines": { @@ -6129,6 +6813,7 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/swr/-/swr-2.2.0.tgz", "integrity": "sha512-AjqHOv2lAhkuUdIiBu9xbuettzAzWXmCEcLONNKJRba87WAefz8Ca9d6ds/SzrPc235n1IxWYdhJ2zF3MNUaoQ==", + "license": "MIT", "dependencies": { "use-sync-external-store": "^1.2.0" }, @@ -6140,6 +6825,7 @@ "version": "0.10.6", "resolved": "https://registry.npmjs.org/swr-store/-/swr-store-0.10.6.tgz", "integrity": "sha512-xPjB1hARSiRaNNlUQvWSVrG5SirCjk2TmaUyzzvk69SZQan9hCJqw/5rG9iL7xElHU784GxRPISClq4488/XVw==", + "license": "MIT", "dependencies": { "dequal": "^2.0.3" }, @@ -6150,21 +6836,24 @@ "node_modules/swrev": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/swrev/-/swrev-4.0.0.tgz", - "integrity": "sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA==" + "integrity": "sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA==", + "license": "MIT" }, "node_modules/swrv": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/swrv/-/swrv-1.0.4.tgz", "integrity": "sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g==", + "license": "Apache-2.0", "peerDependencies": { "vue": ">=3.2.26 < 4" } }, "node_modules/tar": { - "version": "6.1.15", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.15.tgz", - "integrity": "sha512-/zKt9UyngnxIT/EAGYuxaMYgOIJiP81ab9ZfkILq4oNLPFX50qyYmu7jRj9qeXoxmJHjGlbH0+cm2uy1WCs10A==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "dev": true, + "license": "ISC", "dependencies": { "chownr": "^2.0.0", "fs-minipass": "^2.0.0", @@ -6177,11 +6866,19 @@ "node": ">=10" } }, + "node_modules/tar/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/test-exclude": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", "dev": true, + "license": "ISC", "dependencies": { "@istanbuljs/schema": "^0.1.2", "glob": "^7.1.4", @@ -6195,13 +6892,15 @@ "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/time-span": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/time-span/-/time-span-4.0.0.tgz", "integrity": "sha512-MyqZCTGLDZ77u4k+jqg4UlrzPTPZ49NDlaekU6uuFaJLzPIN1woaRXCbGeqOfxwc3Y37ZROGAJ614Rdv7Olt+g==", "dev": true, + "license": "MIT", "dependencies": { "convert-hrtime": "^3.0.0" }, @@ -6216,22 +6915,15 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", - "dev": true - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", "dev": true, - "engines": { - "node": ">=4" - } + "license": "BSD-3-Clause" }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -6243,31 +6935,36 @@ "version": "0.0.3", "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/ts-jest": { - "version": "29.1.2", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", - "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", + "version": "29.3.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.3.2.tgz", + "integrity": "sha512-bJJkrWc6PjFVz5g2DGCNUo8z7oFEYaz1xP1NpeDU7KNLMWPpEyV8Chbpkn8xjzgRDpQhnGMyvyldoL7h8JXyug==", "dev": true, + "license": "MIT", "dependencies": { - "bs-logger": "0.x", - "fast-json-stable-stringify": "2.x", + "bs-logger": "^0.2.6", + "ejs": "^3.1.10", + "fast-json-stable-stringify": "^2.1.0", "jest-util": "^29.0.0", "json5": "^2.2.3", - "lodash.memoize": "4.x", - "make-error": "1.x", - "semver": "^7.5.3", - "yargs-parser": "^21.0.1" + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.1", + "type-fest": "^4.39.1", + "yargs-parser": "^21.1.1" }, "bin": { "ts-jest": "cli.js" }, "engines": { - "node": "^16.10.0 || ^18.0.0 || >=20.0.0" + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0", "@jest/types": "^29.0.0", "babel-jest": "^29.0.0", "jest": "^29.0.0", @@ -6277,6 +6974,9 @@ "@babel/core": { "optional": true }, + "@jest/transform": { + "optional": true + }, "@jest/types": { "optional": true }, @@ -6289,13 +6989,11 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", - "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, + "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -6303,75 +7001,49 @@ "node": ">=10" } }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.40.0.tgz", + "integrity": "sha512-ABHZ2/tS2JkvH1PEjxFDTUWC8dB5OsIGZP4IFLhR293GqT5Y5qB1WwL2kMPYhQW9DVgVD8Hd7I8gjwPIf5GFkw==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/ts-morph": { "version": "12.0.0", "resolved": "https://registry.npmjs.org/ts-morph/-/ts-morph-12.0.0.tgz", "integrity": "sha512-VHC8XgU2fFW7yO1f/b3mxKDje1vmyzFXHWzOYmKEkCEwcLjDtbdLgBQviqj4ZwP4MJkQtRo6Ha2I29lq/B+VxA==", "dev": true, + "license": "MIT", "dependencies": { "@ts-morph/common": "~0.11.0", "code-block-writer": "^10.1.1" } }, - "node_modules/ts-node": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", - "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", - "dev": true, - "dependencies": { - "@cspotcode/source-map-support": "^0.8.0", - "@tsconfig/node10": "^1.0.7", - "@tsconfig/node12": "^1.0.7", - "@tsconfig/node14": "^1.0.0", - "@tsconfig/node16": "^1.0.2", - "acorn": "^8.4.1", - "acorn-walk": "^8.1.1", - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.1", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-cwd": "dist/bin-cwd.js", - "ts-node-esm": "dist/bin-esm.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "peerDependencies": { - "@swc/core": ">=1.2.50", - "@swc/wasm": ">=1.2.50", - "@types/node": "*", - "typescript": ">=2.7" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "@swc/wasm": { - "optional": true - } - } - }, "node_modules/ts-toolbelt": { "version": "6.15.5", "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-6.15.5.tgz", "integrity": "sha512-FZIXf1ksVyLcfr7M317jbB67XFJhOO1YqdTcuGaq9q5jLUoTikukZ+98TPjKiP2jC5CgmYdWWYs0s2nLSU0/1A==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" }, "node_modules/type-detect": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } @@ -6381,6 +7053,7 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", "dev": true, + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -6392,7 +7065,8 @@ "version": "4.7.4", "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz", "integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==", - "dev": true, + "devOptional": true, + "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -6402,18 +7076,19 @@ } }, "node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "dev": true, + "license": "MIT", "engines": { "node": ">= 10.0.0" } }, "node_modules/update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", "dev": true, "funding": [ { @@ -6429,9 +7104,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -6440,40 +7116,52 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/update-browserslist-db/node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "punycode": "^2.1.0" } }, "node_modules/use-sync-external-store": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", - "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.5.0.tgz", + "integrity": "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A==", + "license": "MIT", "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/v8-compile-cache-lib": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/v8-to-istanbul": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz", - "integrity": "sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA==", + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", "dev": true, + "license": "ISC", "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", @@ -6488,6 +7176,7 @@ "resolved": "https://registry.npmjs.org/vercel/-/vercel-31.4.0.tgz", "integrity": "sha512-jRzA3GyPiNckPN9aOiN63ulzgqEduTzALf4N8nh9UvCEzyEisCgtUxj2e+3xVWljdcGkj22VVij/DV4SnAXO6Q==", "dev": true, + "license": "Apache-2.0", "dependencies": { "@vercel/build-utils": "6.8.3", "@vercel/go": "2.5.1", @@ -6509,29 +7198,39 @@ } }, "node_modules/vue": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/vue/-/vue-3.3.4.tgz", - "integrity": "sha512-VTyEYn3yvIeY1Py0WaYGZsXnz3y5UnGi62GjVEqvEGPl6nxbOrCXbVOTQWBEJUqAyTUk2uJ5JLVnYJ6ZzGbrSw==", + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.13.tgz", + "integrity": "sha512-wmeiSMxkZCSc+PM2w2VRsOYAZC8GdipNFRTsLSfodVqI9mbejKeXEGr8SckuLnrQPGe3oJN5c3K0vpoU9q/wCQ==", + "license": "MIT", "peer": true, "dependencies": { - "@vue/compiler-dom": "3.3.4", - "@vue/compiler-sfc": "3.3.4", - "@vue/runtime-dom": "3.3.4", - "@vue/server-renderer": "3.3.4", - "@vue/shared": "3.3.4" + "@vue/compiler-dom": "3.5.13", + "@vue/compiler-sfc": "3.5.13", + "@vue/runtime-dom": "3.5.13", + "@vue/server-renderer": "3.5.13", + "@vue/shared": "3.5.13" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/wait-on": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-7.2.0.tgz", - "integrity": "sha512-wCQcHkRazgjG5XoAq9jbTMLpNIjoSlZslrJ2+N9MxDsGEv1HnFoVjOCexL0ESva7Y9cu350j+DWADdk54s4AFQ==", + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-8.0.3.tgz", + "integrity": "sha512-nQFqAFzZDeRxsu7S3C7LbuxslHhk+gnJZHyethuGKAn2IVleIbTB9I3vJSQiSR+DifUqmdzfPMoMPJfLqMF2vw==", "dev": true, + "license": "MIT", "dependencies": { - "axios": "^1.6.1", - "joi": "^17.11.0", + "axios": "^1.8.2", + "joi": "^17.13.3", "lodash": "^4.17.21", "minimist": "^1.2.8", - "rxjs": "^7.8.1" + "rxjs": "^7.8.2" }, "bin": { "wait-on": "bin/wait-on" @@ -6545,6 +7244,7 @@ "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, + "license": "Apache-2.0", "dependencies": { "makeerror": "1.0.12" } @@ -6553,19 +7253,22 @@ "version": "0.2.4", "resolved": "https://registry.npmjs.org/web-vitals/-/web-vitals-0.2.4.tgz", "integrity": "sha512-6BjspCO9VriYy12z356nL6JBS0GYeEcA457YyRzD+dD6XYCQ75NKhcOHUMHentOE7OcVCIXXDvOm0jKFfQG2Gg==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "dev": true + "dev": true, + "license": "BSD-2-Clause" }, "node_modules/whatwg-url": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", "dev": true, + "license": "MIT", "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" @@ -6576,6 +7279,7 @@ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -6591,6 +7295,7 @@ "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", "dev": true, + "license": "ISC", "dependencies": { "string-width": "^1.0.2 || 2 || 3 || 4" } @@ -6600,6 +7305,7 @@ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -6616,13 +7322,15 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/write-file-atomic": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", "dev": true, + "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^3.0.7" @@ -6635,28 +7343,32 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", "dev": true, + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" }, "node_modules/yargs": { "version": "17.7.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, + "license": "MIT", "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", @@ -6675,6 +7387,7 @@ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true, + "license": "ISC", "engines": { "node": ">=12" } @@ -6684,6 +7397,7 @@ "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -6693,6 +7407,7 @@ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 4c75dd4fd..e00fa1322 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, diff --git a/examples/package.json b/examples/package.json index 65b3216b2..9262ea239 100644 --- a/examples/package.json +++ b/examples/package.json @@ -9,7 +9,7 @@ "@azure/identity": "^4.2.0", "dotenv": "^16.4.7", "express": "^4.18.2", - "next": "^14.1.1", + "next": "^14.2.25", "openai": "file:..", "zod-to-json-schema": "^3.21.4", "@azure/identity": "^4.2.0" From 3d8efaf013ce818818df5df1701e18f60fdc81e8 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 14:36:14 -0400 Subject: [PATCH 60/73] fix(docs): correct docstring on responses.stream --- src/resources/responses/responses.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 2694996ad..d8dc6ab84 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -121,7 +121,7 @@ export class Responses extends APIResource { } /** - * Creates a chat completion stream + * Creates a model response stream */ stream>( body: Params, From 1b8eeb77be4d517980c4a5a0b9df0c479926be6c Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 14:39:00 -0400 Subject: [PATCH 61/73] fix(types): export AssistantStream --- src/resources/beta/assistants.ts | 3 +++ src/resources/beta/threads/threads.ts | 1 + 2 files changed, 4 insertions(+) diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 40cc82384..167c117f5 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -11,6 +11,7 @@ import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagin import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; +import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** @@ -1505,4 +1506,6 @@ export declare namespace Assistants { type AssistantUpdateParams as AssistantUpdateParams, type AssistantListParams as AssistantListParams, }; + + export type { AssistantStream }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index b06ac458f..10b806b05 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1381,4 +1381,5 @@ export declare namespace Threads { type MessageListParams as MessageListParams, type MessageDeleteParams as MessageDeleteParams, }; + export type { AssistantStream }; } From d795a1e36d5de5c4d7e9b933f0e95c1ef91172c8 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 15:56:31 -0400 Subject: [PATCH 62/73] fix(audio): correctly handle transcription streaming --- src/core/streaming.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/streaming.ts b/src/core/streaming.ts index 49fff52aa..41c52cdac 100644 --- a/src/core/streaming.ts +++ b/src/core/streaming.ts @@ -43,7 +43,11 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null || sse.event.startsWith('response.')) { + if ( + sse.event === null || + sse.event.startsWith('response.') || + sse.event.startsWith('transcript.') + ) { let data; try { From 906e48a1a1f4f511319c4e0145a07175a434cc93 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 16:10:32 -0400 Subject: [PATCH 63/73] fix(zod): warn on optional field usage --- .../zod-to-json-schema/parsers/object.ts | 13 ++++- tests/helpers/zod.test.ts | 53 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/parsers/object.ts b/src/_vendor/zod-to-json-schema/parsers/object.ts index f2120c8fe..fbc563d6c 100644 --- a/src/_vendor/zod-to-json-schema/parsers/object.ts +++ b/src/_vendor/zod-to-json-schema/parsers/object.ts @@ -39,12 +39,21 @@ export function parseObjectDef(def: ZodObjectDef, refs: Refs) { [propName, propDef], ) => { if (propDef === undefined || propDef._def === undefined) return acc; + const propertyPath = [...refs.currentPath, 'properties', propName]; const parsedDef = parseDef(propDef._def, { ...refs, - currentPath: [...refs.currentPath, 'properties', propName], - propertyPath: [...refs.currentPath, 'properties', propName], + currentPath: propertyPath, + propertyPath, }); if (parsedDef === undefined) return acc; + + if (refs.openaiStrictMode && propDef.isOptional() && !propDef.isNullable()) { + console.warn( + `Zod field at \`${propertyPath.join( + '/', + )}\` uses \`.optional()\` without \`.nullable()\` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.`, + ); + } return { properties: { ...acc.properties, diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts index 493b4c0c8..f77bf822f 100644 --- a/tests/helpers/zod.test.ts +++ b/tests/helpers/zod.test.ts @@ -279,3 +279,56 @@ describe('zodResponseFormat', () => { `); }); }); + +it('warns on optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + required: z.string(), + optional: z.string().optional(), + optional_and_nullable: z.string().optional().nullable(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Zod field at `#/definitions/schema/properties/optional` uses `.optional()` without `.nullable()` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.', + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); +}); + +it('warns on nested optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + foo: z.object({ bar: z.array(z.object({ can_be_missing: z.boolean().optional() })) }), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'Zod field at `#/definitions/schema/properties/foo/properties/bar/items/properties/can_be_missing` uses `.optional()`', + ), + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); +}); + +it('does not warn on union nullable fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + union: z.union([z.string(), z.null()]).optional(), + }), + + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledTimes(0); +}); From 73b343e754180e4b789961516c9d2986a5a23f50 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 18:40:31 -0400 Subject: [PATCH 64/73] feat: add audio helpers --- examples/package.json | 3 +- examples/speech-to-text.ts | 19 +++++ examples/text-to-speech.ts | 23 ++++++ src/helpers/audio.ts | 148 +++++++++++++++++++++++++++++++++++++ 4 files changed, 191 insertions(+), 2 deletions(-) create mode 100644 examples/speech-to-text.ts create mode 100644 examples/text-to-speech.ts create mode 100644 src/helpers/audio.ts diff --git a/examples/package.json b/examples/package.json index 9262ea239..db01a2c10 100644 --- a/examples/package.json +++ b/examples/package.json @@ -11,8 +11,7 @@ "express": "^4.18.2", "next": "^14.2.25", "openai": "file:..", - "zod-to-json-schema": "^3.21.4", - "@azure/identity": "^4.2.0" + "zod-to-json-schema": "^3.21.4" }, "devDependencies": { "@types/body-parser": "^1.19.3", diff --git a/examples/speech-to-text.ts b/examples/speech-to-text.ts new file mode 100644 index 000000000..aa44bf872 --- /dev/null +++ b/examples/speech-to-text.ts @@ -0,0 +1,19 @@ +import OpenAI from 'openai'; +import { recordAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +async function main(): Promise { + console.log('Recording for 5 seconds...'); + const response = await recordAudio({ timeout: 5000 }); + + console.log('Transcribing...'); + const transcription = await openai.audio.transcriptions.create({ + file: response, + model: 'whisper-1', + }); + + console.log(transcription.text); +} + +main().catch(console.error); diff --git a/examples/text-to-speech.ts b/examples/text-to-speech.ts new file mode 100644 index 000000000..5a87adf91 --- /dev/null +++ b/examples/text-to-speech.ts @@ -0,0 +1,23 @@ +import OpenAI from 'openai'; +import { playAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +const exampleText = ` +I see skies of blue and clouds of white +The bright blessed days, the dark sacred nights +And I think to myself +What a wonderful world +`.trim(); + +async function main(): Promise { + const response = await openai.audio.speech.create({ + model: 'tts-1', + voice: 'nova', + input: exampleText, + }); + + await playAudio(response); +} + +main().catch(console.error); diff --git a/src/helpers/audio.ts b/src/helpers/audio.ts new file mode 100644 index 000000000..413ef66a3 --- /dev/null +++ b/src/helpers/audio.ts @@ -0,0 +1,148 @@ +import { spawn } from 'node:child_process'; +import { Readable } from 'node:stream'; +import { platform, versions } from 'node:process'; + +const DEFAULT_SAMPLE_RATE = 24000; +const DEFAULT_CHANNELS = 1; + +const isNode = Boolean(versions?.node); + +const recordingProviders: Record = { + win32: 'dshow', + darwin: 'avfoundation', + linux: 'alsa', + aix: 'alsa', + android: 'alsa', + freebsd: 'alsa', + haiku: 'alsa', + sunos: 'alsa', + netbsd: 'alsa', + openbsd: 'alsa', + cygwin: 'dshow', +}; + +function isResponse(stream: NodeJS.ReadableStream | Response | File): stream is Response { + return stream instanceof Response; +} + +function isFile(stream: NodeJS.ReadableStream | Response | File): stream is File { + return stream instanceof File; +} + +async function nodejsPlayAudio(stream: NodeJS.ReadableStream | Response | File): Promise { + return new Promise((resolve, reject) => { + try { + const ffplay = spawn('ffplay', ['-autoexit', '-nodisp', '-i', 'pipe:0']); + + if (isResponse(stream)) { + Readable.fromWeb(stream.body! as any).pipe(ffplay.stdin); + } else if (isFile(stream)) { + Readable.fromWeb(stream.stream() as any).pipe(ffplay.stdin); + } else { + stream.pipe(ffplay.stdin); + } + + ffplay.on('close', (code: number) => { + if (code !== 0) { + reject(new Error(`ffplay process exited with code ${code}`)); + } + resolve(); + }); + } catch (error) { + reject(error); + } + }); +} + +export async function playAudio(input: NodeJS.ReadableStream | Response | File): Promise { + if (isNode) { + return nodejsPlayAudio(input); + } + + throw new Error( + 'Play audio is not supported in the browser yet. Check out https://npm.im/wavtools as an alternative.', + ); +} + +type RecordAudioOptions = { + signal?: AbortSignal; + device?: number; + timeout?: number; +}; + +function nodejsRecordAudio({ signal, device, timeout }: RecordAudioOptions = {}): Promise { + return new Promise((resolve, reject) => { + const data: Uint8Array[] = []; + const errorData: Uint8Array[] = []; + const provider = recordingProviders[platform]; + try { + const ffmpeg = spawn( + 'ffmpeg', + [ + '-hide_banner', + '-loglevel', + 'error', + '-f', + provider, + '-i', + `:${device ?? 0}`, // default audio input device; adjust as needed + '-ar', + DEFAULT_SAMPLE_RATE.toString(), + '-ac', + DEFAULT_CHANNELS.toString(), + '-f', + 'wav', + 'pipe:1', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + }, + ); + + ffmpeg.stdout.on('data', (chunk) => { + data.push(chunk); + }); + + ffmpeg.stderr.on('data', (chunk) => { + errorData.push(chunk); + }); + + ffmpeg.on('error', (error) => { + reject(error); + }); + + ffmpeg.on('close', (code) => { + if (code === 1) { + reject(new Error('FFmpeg Error: ' + Buffer.concat(errorData).toString('utf-8'))); + } else { + resolve(new File(data, 'audio.wav', { type: 'audio/wav' })); + } + }); + + if (typeof timeout === 'number' && timeout > 0) { + const internalSignal = AbortSignal.timeout(timeout); + internalSignal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + + if (signal) { + signal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + } catch (error) { + reject(error); + } + }); +} + +export async function recordAudio(options: RecordAudioOptions = {}) { + if (isNode) { + return nodejsRecordAudio(options); + } + + throw new Error( + 'Record audio is not supported in the browser. Check out https://npm.im/wavtools as an alternative.', + ); +} From 109f7d566deea990bcd176146cf3eb2035773273 Mon Sep 17 00:00:00 2001 From: Em Date: Mon, 21 Apr 2025 19:14:53 -0400 Subject: [PATCH 65/73] perf(embedding): default embedding creation to base64 --- src/resources/embeddings.ts | 32 +++++++++++++++++++++++++- tests/api-resources/embeddings.test.ts | 30 ++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index b9c48efad..338daa16c 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -3,13 +3,43 @@ import { APIResource } from '../core/resource'; import { APIPromise } from '../core/api-promise'; import { RequestOptions } from '../internal/request-options'; +import { fromBase64 } from '../internal/utils'; export class Embeddings extends APIResource { /** * Creates an embedding vector representing the input text. */ create(body: EmbeddingCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/embeddings', { body, ...options }); + const response: APIPromise = this._client.post('/embeddings', { + body: { + ...body, + encoding_format: body.encoding_format ?? 'base64', + }, + ...options, + }); + + // if the user specified an encoding_format, return the response as-is + if (body.encoding_format) { + return response; + } + + // otherwise, decode the base64 embeddings + return (response as APIPromise)._thenUnwrap((response) => { + if (response && response.data) { + response.data.forEach((embeddingBase64Obj) => { + const embeddingBytes = fromBase64(embeddingBase64Obj.embedding as unknown as string); + embeddingBase64Obj.embedding = Array.from( + new Uint32Array( + embeddingBytes.buffer, + embeddingBytes.byteOffset, + embeddingBytes.byteLength / Uint32Array.BYTES_PER_ELEMENT, + ), + ); + }); + } + + return response; + }); } } diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index c449f9825..42b01e02d 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -31,4 +31,34 @@ describe('resource embeddings', () => { user: 'user-1234', }); }); + + test('create: encoding_format: "float" should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.[0]?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.[0]?.embedding[0])).toBe(true); + }); + + test('create: encoding_format: "base64" should create string embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + encoding_format: 'base64', + }); + + expect(typeof response.data?.[0]?.embedding).toBe('string'); + }); + + test('create: default encoding_format should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.[0]?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.[0]?.embedding[0])).toBe(true); + }); }); From f7a555fc7676c080541dcf1d81cc52fbfe7c9e39 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 24 Apr 2025 16:45:09 -0400 Subject: [PATCH 66/73] fix(embeddings): correctly construct datas --- src/resources/embeddings.ts | 4 +- .../embeddings-base64-response.json | 1 + .../embeddings-float-response.json | 1 + tests/api-resources/embeddings.test.ts | 60 ++++++++++++++++--- 4 files changed, 55 insertions(+), 11 deletions(-) create mode 100644 tests/api-resources/embeddings-base64-response.json create mode 100644 tests/api-resources/embeddings-float-response.json diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 338daa16c..6213c7c3d 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -29,10 +29,10 @@ export class Embeddings extends APIResource { response.data.forEach((embeddingBase64Obj) => { const embeddingBytes = fromBase64(embeddingBase64Obj.embedding as unknown as string); embeddingBase64Obj.embedding = Array.from( - new Uint32Array( + new Float32Array( embeddingBytes.buffer, embeddingBytes.byteOffset, - embeddingBytes.byteLength / Uint32Array.BYTES_PER_ELEMENT, + embeddingBytes.byteLength / Float32Array.BYTES_PER_ELEMENT, ), ); }); diff --git a/tests/api-resources/embeddings-base64-response.json b/tests/api-resources/embeddings-base64-response.json new file mode 100644 index 000000000..6eaa75d1b --- /dev/null +++ b/tests/api-resources/embeddings-base64-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":"A1fLvaC4Bb0QB7w8yEvrPOm9Xj2r0yA8EW4sPRq75j3Fbiq81/chPumAGb0afqG8R6AFvpzsQT35SPO7Hi39PEMAir1lf0A92McfvRoVlLxQv9o9tHqIvQYlrL0fwlK8sufPPYz2gjzH5Ho93GebvN+eCTxjRjW8PJRKvXMtFD4+n3C9ByMPO39Gkjs1Jm49A1fLPdNXpjv8RLm92McfveKpLz01VNO9SUIevhAHvD0flG09+9srvW5j7Txp8dY8LW4Ju08bJb1GdL29g+aNPWlLBD1p8dY8LkCkvfPLtjxcBj4+1/ehPebv/bz/Ifo8SqkOvREFHzyAr588HbUPPbFS+r00gri825WAPQlcGj1qHZ+8o8EOPo880Tn5dli9zRUSPc2APD0b5RG9mhxEvTyUSj3FQMU95u/9vE20tD3wwBC94NmxvXSUhL3Ofh8904WLPRbeJb2Paja8BClmvhwgOj2e6Ic9em0LPdj1BD3lSau7dJQEPJi107yB6kc97sTKO6lAaD2YDwE9YDuPPSFVC735dtg9SK1IOysJNrwtQkE8BmJxPb2ZXT0hVYs9g+YNvLfuuz2nyhe9z7nHN5UVWDxea5E77F1avTIbyL256oG9ft+hPVWJAbwNoug82TCtvUrm072wgN86JPWGO3TRyTwOY4a8xJwPvkx5DL1f1B68RwkTvja7Q72BrQI9Pfs6PTdfeb3RxG09jJxVvfl22D3eCbQ9FbR6vTPtYrn0mzS+kqGkPDxXhbwyG8i98M9wveayuL1EpL88lNqvve3yL70RQmQ7VcZGPaPBjr1wyEA9fKaWOskMibwNomi8J9Rku9EeGz016Si8O1mivQ38lb0EgxO88P1VvcilmLuNA0a9lj8DvHCceD3lSSs9uFWsve6HBT6XEZ68ShS5PFJSE70dTIK86OvDvSNgsbzS8DU8bPz8PAuVpTxKQIE9/NmOPBhFFj7LsL67PJRKvIxu8LwSqVS8D8yTPSOOlj1g0gG8A+69vYz2AjxPhLK80fLSPbrL/LztWz09LAcZvqfKF73B/JO8lnzIvCk5OLxwMU69dmQCvQtp3bs6hwe9WZKKume4S7x3CLg9zK4hPLsjDT16P6a7MbTXPRp+IT0dtQ89GayGvcngwD2F8bO70R4bu8tFlDxcBr67xAWdvdnWfzzQTIC9zn6fPYSKwz3alx28h8GxPW74wj3eNxk+xUBFvIpjyj0WdRi9AkoIPXhvqLugx+U8F0ezvUlCHjx3NAC9uvlhPEOmXD36oAM9D56uvddgrz2giiC9GhWUvHrWGLv0yRk8fOPbvMc+KLs7//S8v5UjPJUV2D0KLjW6YKa5PDciNDuJznQ9USZLPQ=="}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} diff --git a/tests/api-resources/embeddings-float-response.json b/tests/api-resources/embeddings-float-response.json new file mode 100644 index 000000000..9f04a185c --- /dev/null +++ b/tests/api-resources/embeddings-float-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":[-0.099287055,-0.032646775,0.022952586,0.028722659,0.05438033,0.009816091,0.042097155,0.112661555,-0.010402386,0.158172,-0.037476454,-0.01971345,-0.13049422,0.04734479,-0.0074244705,0.030905303,-0.06738331,0.046996493,-0.039008945,-0.018076468,0.10681021,-0.06664029,-0.08405499,-0.012863665,0.10151614,0.015986703,0.061253335,-0.018970422,0.008399694,-0.011064145,-0.049457774,0.14470463,-0.058745615,0.0021840946,0.00446397,0.058141906,0.099287055,0.0050763874,-0.09046361,-0.039008945,0.042886622,-0.103187956,-0.15454973,0.091810346,0.058002587,-0.041957837,0.028978076,0.02623816,-0.002097021,-0.040309247,-0.09250693,0.06928732,0.03229848,0.02623816,-0.08020054,0.022314047,0.18557113,0.079086,-0.030998182,0.030533789,-0.034829415,0.009705798,0.019492865,0.035084832,-0.122228034,-0.022523023,0.06278583,0.037685428,-0.019423205,0.13941054,0.00039908706,-0.052847836,0.035665322,0.04602127,-0.035618883,-0.04787884,0.049457774,0.096314944,-0.030998182,0.08823452,-0.03534025,-0.086841345,-0.06473628,0.03893929,0.06812634,-0.040495,-0.011133804,-0.22476584,0.045440778,0.06636165,0.03403995,0.032461017,-0.005227315,0.008092035,-0.025843427,0.048807625,0.0061880266,0.05670229,0.031509012,0.06993747,-0.034016732,0.10569567,0.0030620862,-0.011110584,0.011795563,0.058931373,0.054101694,0.068033464,-0.008660915,0.091763906,-0.0370585,0.000023809172,0.013188739,0.004437848,-0.053312227,-0.09770812,-0.06343598,0.07903956,-0.007906278,0.028397584,-0.084565826,-0.103466585,0.0017051902,0.0041185785,0.024636008,-0.016404655,-0.14024645,-0.034295365,-0.009694188,-0.14359008,-0.04778596,0.031903747,0.045649756,-0.06088182,0.058049027,-0.052151248,0.10569567,0.087909445,-0.061206896,-0.00021641403,-0.17637616,0.020096574,-0.016276948,-0.09770812,-0.058792055,-0.09018497,0.023393758,-0.08586612,-0.04295628,0.0034829418,0.048528988,-0.06970527,0.047066152,0.0011493708,-0.01672973,-0.014198792,-0.0034916492,0.037871186,-0.010309507,-0.079271756,-0.073234655,-0.0090034045,-0.052244127,-0.0046584345,-0.04834323,-0.008010766,0.060696065,0.04181852,-0.08414787,0.13040134,-0.019295497,0.022592682,-0.03596718,-0.015905434,-0.0956648,-0.021652287,0.011104779,0.030882083,0.02021267,0.0631109,0.017437927,0.14674795,-0.005819415,-0.012364443,-0.029349588,-0.012979763,0.072166555,0.07351329,-0.007923692,-0.09273913,0.007993352,-0.021791605,0.1030022,-0.030858863,0.046230245,-0.14944142,-0.0370585,-0.018064858,-0.02447347,-0.011244097,-0.050340116,-0.03183409,-0.006756907,-0.033087946,-0.001057218,-0.012434102,0.089859895,0.009868335,0.034457903,-0.005073485,0.10532416,0.0394269,0.035084832,-0.06575794,0.09417874,-0.005491438,-0.002366949,0.018099686,-0.005799098,-0.07667115,0.0156151885,-0.06264651,0.07787858,0.09547904,-0.009618724,0.086794905,0.095200405,0.14962718,-0.012039368,0.09882267,-0.037221037,0.033273704,-0.0051402412,0.02804929,-0.08753794,0.009659358,-0.031300034,0.01379245,0.053869497,0.03213594,-0.08526241,0.085633926,-0.039194703,-0.018076468,-0.0023321197,0.009386528,-0.026841871,-0.0025672184,-0.02990686,0.009984433,0.105509914,-0.00069114624,0.022662342,0.0027486214,0.05976728,0.04959709]}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index 42b01e02d..23c8a1290 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -1,6 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; +import { mockFetch } from '../utils/mock-fetch'; +import fs from 'fs/promises'; +import Path from 'path'; const client = new OpenAI({ apiKey: 'My API Key', @@ -32,33 +35,72 @@ describe('resource embeddings', () => { }); }); - test('create: encoding_format: "float" should create float32 embeddings', async () => { + test('create: encoding_format=default should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', }); - expect(response.data?.[0]?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.[0]?.embedding[0])).toBe(true); + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.09928705543279648); }); - test('create: encoding_format: "base64" should create string embeddings', async () => { + test('create: encoding_format=float should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', - encoding_format: 'base64', + encoding_format: 'float', }); - expect(typeof response.data?.[0]?.embedding).toBe('string'); + expect(Array.isArray(response.data?.at(0)?.embedding)).toBeTruthy(); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.099287055); }); - test('create: default encoding_format should create float32 embeddings', async () => { + test('create: encoding_format=base64 should return base64 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', + encoding_format: 'base64', }); - expect(response.data?.[0]?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.[0]?.embedding[0])).toBe(true); + expect(typeof response.data?.at(0)?.embedding).toBe('string'); }); }); + +function makeClient(): OpenAI { + const { fetch, handleRequest } = mockFetch(); + + handleRequest(async (_, init) => { + const format = (JSON.parse(init!.body as string) as OpenAI.EmbeddingCreateParams).encoding_format; + return new Response( + await fs.readFile( + Path.join( + __dirname, + + // these responses were taken from the live API with: + // + // model: 'text-embedding-3-large', + // input: 'h', + // dimensions: 256, + + format === 'base64' ? 'embeddings-base64-response.json' : 'embeddings-float-response.json', + ), + ), + { + status: 200, + headers: { + 'Content-Type': 'application/json', + }, + }, + ); + }); + + return new OpenAI({ + fetch, + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', + }); +} From 6707687cd22419ed99ee7331bd9dc391bf67a596 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 24 Apr 2025 16:45:16 -0400 Subject: [PATCH 67/73] chore(internal): fix lint --- src/resources/vector-stores/file-batches.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index e7428000e..3ac598d75 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -9,7 +9,7 @@ import { CursorPage, type CursorPageParams, PagePromise } from '../../core/pagin import { buildHeaders } from '../../internal/headers'; import { RequestOptions } from '../../internal/request-options'; import { path } from '../../internal/utils/path'; -import { allSettledWithThrow } from '../../lib/util'; +import { allSettledWithThrow } from '../../lib/Util'; import { sleep } from '../../internal/utils/sleep'; import { Uploadable } from '../../internal/uploads'; From dc9a5078a6936661d2d08d37c8577ddbafb7f013 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 24 Apr 2025 16:59:46 -0400 Subject: [PATCH 68/73] chore(internal): run CI on alpha --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1927329d2..68e63ba8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,7 @@ on: branches: - main - next + - alpha jobs: lint: @@ -93,10 +94,10 @@ jobs: run: | yarn install - - env: + - env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | - yarn tsn examples/demo.ts + yarn tsn examples/demo.ts ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) From 9dedb93913cd7d2eca5ad9aeae6e04afed6930cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Apr 2025 17:01:11 +0000 Subject: [PATCH 69/73] chore(perf): faster base64 decoding --- src/internal/utils/base64.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/internal/utils/base64.ts b/src/internal/utils/base64.ts index 84854e241..aa7b420e2 100644 --- a/src/internal/utils/base64.ts +++ b/src/internal/utils/base64.ts @@ -22,15 +22,17 @@ export const toBase64 = (data: string | Uint8Array | null | undefined): string = export const fromBase64 = (str: string): Uint8Array => { if (typeof (globalThis as any).Buffer !== 'undefined') { - return new Uint8Array((globalThis as any).Buffer.from(str, 'base64')); + const buf = (globalThis as any).Buffer.from(str, 'base64'); + return new Uint8Array(buf.buffer, buf.byteOffset, buf.byteLength); } if (typeof atob !== 'undefined') { - return new Uint8Array( - atob(str) - .split('') - .map((c) => c.charCodeAt(0)), - ); + const bstr = atob(str); + const buf = new Uint8Array(bstr.length); + for (let i = 0; i < bstr.length; i++) { + buf[i] = bstr.charCodeAt(i); + } + return buf; } throw new OpenAIError('Cannot decode base64 string; Expected `Buffer` or `atob` to be defined'); From 73b38e62350cdd4ab1fc6d8800cc987a507e4089 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Apr 2025 20:12:52 +0000 Subject: [PATCH 70/73] chore(ci): add timeout thresholds for CI jobs --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68e63ba8d..6281f037d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,6 +12,7 @@ on: jobs: lint: + timeout-minutes: 10 name: lint runs-on: ubuntu-latest steps: @@ -29,6 +30,7 @@ jobs: run: ./scripts/lint build: + timeout-minutes: 5 name: build runs-on: ubuntu-latest permissions: @@ -63,6 +65,7 @@ jobs: SHA: ${{ github.sha }} run: ./scripts/utils/upload-artifact.sh test: + timeout-minutes: 10 name: test runs-on: ubuntu-latest steps: From 4976f5936655eef1b9ac69cb0295bf37ad7bd691 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 16:29:54 +0000 Subject: [PATCH 71/73] feat(api): adding new image model support --- .stats.yml | 6 +- MIGRATION.md | 1 + api.md | 6 +- src/resources/beta/realtime/realtime.ts | 98 +- src/resources/beta/threads/threads.ts | 4 +- src/resources/evals/evals.ts | 749 +++++++- src/resources/evals/runs/runs.ts | 1699 ++++++++++++++--- .../fine-tuning/checkpoints/checkpoints.ts | 2 + .../fine-tuning/checkpoints/index.ts | 1 + .../fine-tuning/checkpoints/permissions.ts | 17 +- src/resources/images.ts | 202 +- src/resources/responses/responses.ts | 162 ++ tests/api-resources/evals/evals.test.ts | 1 - .../checkpoints/permissions.test.ts | 15 +- tests/api-resources/images.test.ts | 7 +- 15 files changed, 2550 insertions(+), 420 deletions(-) diff --git a/.stats.yml b/.stats.yml index 848c5b5ad..d92408173 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml -openapi_spec_hash: c855121b2b2324b99499c9244c21d24d -config_hash: d20837393b73efdb19cd08e04c1cc9a1 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml +openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 +config_hash: b597cd9a31e9e5ec709e2eefb4c54122 diff --git a/MIGRATION.md b/MIGRATION.md index 448d99b6e..2e63d6445 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -112,6 +112,7 @@ client.parents.children.retrieve('c_456', { parent_id: 'p_123' }); This affects the following methods +- `client.fineTuning.checkpoints.permissions.delete()` - `client.vectorStores.files.retrieve()` - `client.vectorStores.files.update()` - `client.vectorStores.files.delete()` diff --git a/api.md b/api.md index b983b5a3b..6e15d5c3d 100644 --- a/api.md +++ b/api.md @@ -247,7 +247,7 @@ Methods: - client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage - client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse -- client.fineTuning.checkpoints.permissions.delete(fineTunedModelCheckpoint) -> PermissionDeleteResponse +- client.fineTuning.checkpoints.permissions.delete(permissionID, { ...params }) -> PermissionDeleteResponse # VectorStores @@ -626,6 +626,10 @@ Types: - ResponseOutputRefusal - ResponseOutputText - ResponseReasoningItem +- ResponseReasoningSummaryPartAddedEvent +- ResponseReasoningSummaryPartDoneEvent +- ResponseReasoningSummaryTextDeltaEvent +- ResponseReasoningSummaryTextDoneEvent - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index f4e1becfc..b60c945e8 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -915,12 +915,34 @@ export type RealtimeClientEvent = | ConversationItemTruncateEvent | InputAudioBufferAppendEvent | InputAudioBufferClearEvent + | RealtimeClientEvent.OutputAudioBufferClear | InputAudioBufferCommitEvent | ResponseCancelEvent | ResponseCreateEvent | SessionUpdateEvent | TranscriptionSessionUpdate; +export namespace RealtimeClientEvent { + /** + * **WebRTC Only:** Emit to cut off the current audio response. This will trigger + * the server to stop generating audio and emit a `output_audio_buffer.cleared` + * event. This event should be preceded by a `response.cancel` client event to stop + * the generation of the current response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferClear { + /** + * The event type, must be `output_audio_buffer.clear`. + */ + type: 'output_audio_buffer.clear'; + + /** + * The unique ID of the client event used for error handling. + */ + event_id?: string; + } +} + /** * The response resource. */ @@ -1174,7 +1196,10 @@ export type RealtimeServerEvent = | ResponseTextDoneEvent | SessionCreatedEvent | SessionUpdatedEvent - | TranscriptionSessionUpdatedEvent; + | TranscriptionSessionUpdatedEvent + | RealtimeServerEvent.OutputAudioBufferStarted + | RealtimeServerEvent.OutputAudioBufferStopped + | RealtimeServerEvent.OutputAudioBufferCleared; export namespace RealtimeServerEvent { /** @@ -1197,6 +1222,77 @@ export namespace RealtimeServerEvent { */ type: 'conversation.item.retrieved'; } + + /** + * **WebRTC Only:** Emitted when the server begins streaming audio to the client. + * This event is emitted after an audio content part has been added + * (`response.content_part.added`) to the response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStarted { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.started`. + */ + type: 'output_audio_buffer.started'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer has been completely + * drained on the server, and no more audio is forthcoming. This event is emitted + * after the full response data has been sent to the client (`response.done`). + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStopped { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.stopped`. + */ + type: 'output_audio_buffer.stopped'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens + * either in VAD mode when the user has interrupted + * (`input_audio_buffer.speech_started`), or when the client has emitted the + * `output_audio_buffer.clear` event to manually cut off the current audio + * response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferCleared { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.cleared`. + */ + type: 'output_audio_buffer.cleared'; + } } /** diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 10b806b05..2661832b0 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -677,9 +677,7 @@ export interface ThreadCreateAndRunParamsBase { * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ - tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool - > | null; + tools?: Array | null; /** * An alternative to sampling with temperature, called nucleus sampling, where the diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index ca5d82b42..172a1c47b 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -2,6 +2,7 @@ import { APIResource } from '../../core/resource'; import * as Shared from '../shared'; +import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, @@ -99,7 +100,7 @@ export interface EvalCustomDataSourceConfig { * the evaluation. */ export interface EvalLabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -128,57 +129,43 @@ export interface EvalLabelModelGrader { } export namespace EvalLabelModelGrader { - export interface InputMessage { - content: InputMessage.Content; - + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. One of `user`, `system`, or `developer`. + * Text inputs to the model - can contain template strings. */ - role: 'user' | 'system' | 'developer'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface Assistant { - content: Assistant.Content; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The role of the message. Must be `assistant` for output. + * The type of the message input. Always `message`. */ - role: 'assistant'; + type?: 'message'; + } + export namespace Input { /** - * The type of item, which is always `message`. + * A text output from the model. */ - type: 'message'; - } - - export namespace Assistant { - export interface Content { + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } @@ -251,8 +238,8 @@ export interface EvalStringCheckGrader { */ export interface EvalTextSimilarityGrader { /** - * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, - * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. */ evaluation_metric: | 'fuzzy_match' @@ -264,8 +251,7 @@ export interface EvalTextSimilarityGrader { | 'rouge_3' | 'rouge_4' | 'rouge_5' - | 'rouge_l' - | 'cosine'; + | 'rouge_l'; /** * The text being graded. @@ -338,14 +324,131 @@ export interface EvalCreateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateResponse.Python + | EvalCreateResponse.ScoreModel + >; +} +export namespace EvalCreateResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. */ - testing_criteria: Array; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -393,14 +496,131 @@ export interface EvalRetrieveResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalRetrieveResponse.Python + | EvalRetrieveResponse.ScoreModel + >; +} +export namespace EvalRetrieveResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -448,14 +668,131 @@ export interface EvalUpdateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. + */ + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalUpdateResponse.Python + | EvalUpdateResponse.ScoreModel + >; +} + +export namespace EvalUpdateResponse { + /** + * A PythonGrader object that runs a python script on the input. */ - share_with_openai: boolean; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } /** - * A list of testing criteria. + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -503,14 +840,131 @@ export interface EvalListResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalListResponse.Python + | EvalListResponse.ScoreModel + >; +} +export namespace EvalListResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } export interface EvalDeleteResponse { @@ -525,12 +979,18 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs; /** * A list of graders for all eval runs in this group. */ - testing_criteria: Array; + testing_criteria: Array< + | EvalCreateParams.LabelModel + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateParams.Python + | EvalCreateParams.ScoreModel + >; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -546,11 +1006,6 @@ export interface EvalCreateParams { * The name of the evaluation. */ name?: string; - - /** - * Indicates whether the evaluation is shared with OpenAI. - */ - share_with_openai?: boolean; } export namespace EvalCreateParams { @@ -564,7 +1019,7 @@ export namespace EvalCreateParams { */ export interface Custom { /** - * The json schema for the run data source items. + * The json schema for each row in the data source. */ item_schema: Record; @@ -574,7 +1029,8 @@ export namespace EvalCreateParams { type: 'custom'; /** - * Whether to include the sample schema in the data source. + * Whether the eval should expect you to populate the sample namespace (ie, by + * generating responses off of your data source) */ include_sample_schema?: boolean; } @@ -584,21 +1040,16 @@ export namespace EvalCreateParams { * completions query. This is usually metadata like `usecase=chatbot` or * `prompt-version=v2`, etc. */ - export interface StoredCompletions { + export interface Logs { /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `logs`. */ - type: 'stored_completions'; + type: 'logs'; /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. + * Metadata filters for the logs data source. */ - metadata?: Shared.Metadata | null; + metadata?: Record; } /** @@ -606,7 +1057,11 @@ export namespace EvalCreateParams { * the evaluation. */ export interface LabelModel { - input: Array; + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + input: Array; /** * The labels to classify to each item in the evaluation. @@ -647,57 +1102,157 @@ export namespace EvalCreateParams { role: string; } - export interface InputMessage { - content: InputMessage.Content; + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; /** - * The role of the message. One of `user`, `system`, or `developer`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - role: 'user' | 'system' | 'developer'; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The type of item, which is always `message`. + * The type of the message input. Always `message`. */ - type: 'message'; + type?: 'message'; } - export namespace InputMessage { - export interface Content { + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `input_text`. + * The type of the output text. Always `output_text`. */ - type: 'input_text'; + type: 'output_text'; } } + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; - export interface OutputMessage { - content: OutputMessage.Content; + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. Must be `assistant` for output. + * Text inputs to the model - can contain template strings. */ - role: 'assistant'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; } - export namespace OutputMessage { - export interface Content { + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index ca5c265d9..4f12435e4 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -2,6 +2,7 @@ import { APIResource } from '../../../core/resource'; import * as Shared from '../../shared'; +import * as ResponsesAPI from '../../responses/responses'; import * as OutputItemsAPI from './output-items'; import { OutputItemListParams, @@ -75,15 +76,6 @@ export type RunListResponsesPage = CursorPage; * A CompletionsRunDataSource object describing a model sampling configuration. */ export interface CreateEvalCompletionsRunDataSource { - input_messages: - | CreateEvalCompletionsRunDataSource.Template - | CreateEvalCompletionsRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model: string; - /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -97,105 +89,19 @@ export interface CreateEvalCompletionsRunDataSource { */ type: 'completions'; + input_messages?: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; } export namespace CreateEvalCompletionsRunDataSource { - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - export interface InputMessage { - content: InputMessage.Content; - - /** - * The role of the message. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface OutputMessage { - content: OutputMessage.Content; - - /** - * The role of the message. Must be `assistant` for output. - */ - role: 'assistant'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace OutputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - export interface FileContent { /** * The content of the jsonl file. @@ -232,20 +138,25 @@ export namespace CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ export interface StoredCompletions { + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + /** * An optional Unix timestamp to filter items created after this time. */ - created_after: number | null; + created_after?: number | null; /** * An optional Unix timestamp to filter items created before this time. */ - created_before: number | null; + created_before?: number | null; /** * An optional maximum number of items to return. */ - limit: number | null; + limit?: number | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -255,17 +166,81 @@ export namespace CreateEvalCompletionsRunDataSource { * Keys are strings with a maximum length of 64 characters. Values are strings with * a maximum length of 512 characters. */ - metadata: Shared.Metadata | null; + metadata?: Shared.Metadata | null; /** * An optional model to filter by (e.g., 'gpt-4o'). */ - model: string | null; + model?: string | null; + } + export interface Template { /** - * The type of source. Always `stored_completions`. + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. */ - type: 'stored_completions'; + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; } export interface SamplingParams { @@ -370,7 +345,10 @@ export interface RunCreateResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateResponse.Completions; /** * An object representing an error response from the Eval API. @@ -434,162 +412,240 @@ export interface RunCreateResponse { } export namespace RunCreateResponse { - export interface PerModelUsage { - /** - * The number of tokens retrieved from cache. - */ - cached_tokens: number; - + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of completion tokens generated. + * A EvalResponsesSource object describing a run data source configuration. */ - completion_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of invocations. + * The type of run data source. Always `completions`. */ - invocation_count: number; + type: 'completions'; - /** - * The name of the model. - */ - model_name: string; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The number of prompt tokens used. + * The name of the model to use for generating completions (e.g. "o3-mini"). */ - prompt_tokens: number; + model?: string; - /** - * The total number of tokens used. - */ - total_tokens: number; + sampling_params?: Completions.SamplingParams; } - export interface PerTestingCriteriaResult { - /** - * Number of tests failed for this criteria. - */ - failed: number; + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; - /** - * Number of tests passed for this criteria. - */ - passed: number; + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } - /** - * A description of the testing criteria. - */ - testing_criteria: string; - } + export namespace FileContent { + export interface Content { + item: Record; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - export interface ResultCounts { - /** - * Number of output items that resulted in an error. - */ - errored: number; + sample?: Record; + } + } - /** - * Number of output items that failed to pass the evaluation. - */ - failed: number; + export interface FileID { + /** + * The identifier of the file. + */ + id: string; - /** - * Number of output items that passed the evaluation. - */ - passed: number; + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } /** - * Total number of executed output items. + * A EvalResponsesSource object describing a run data source configuration. */ - total: number; - } -} + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; -/** - * A schema representing an evaluation run. - */ -export interface RunRetrieveResponse { - /** - * Unique identifier for the evaluation run. - */ - id: string; + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; - /** - * Unix timestamp (in seconds) when the evaluation run was created. - */ - created_at: number; + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; - /** - * Information about the run's data source. - */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; - /** - * An object representing an error response from the Eval API. - */ - error: EvalAPIError; + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; - /** - * The identifier of the associated evaluation. - */ - eval_id: string; + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; - /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. - */ - metadata: Shared.Metadata | null; + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; - /** - * The model that is evaluated, if applicable. - */ - model: string; + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; - /** - * The name of the evaluation run. - */ - name: string; + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; - /** - * The type of the object. Always "eval.run". - */ - object: 'eval.run'; + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; - /** - * Usage statistics for each model during the evaluation run. - */ - per_model_usage: Array; + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; - /** - * Results per testing criteria applied during the evaluation run. - */ - per_testing_criteria_results: Array; + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } - /** - * The URL to the rendered evaluation run report on the UI dashboard. - */ - report_url: string; + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunRetrieveResponse.ResultCounts; + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } - /** - * The status of the evaluation run. - */ - status: string; -} + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } -export namespace RunRetrieveResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -668,7 +724,7 @@ export namespace RunRetrieveResponse { /** * A schema representing an evaluation run. */ -export interface RunListResponse { +export interface RunRetrieveResponse { /** * Unique identifier for the evaluation run. */ @@ -682,7 +738,10 @@ export interface RunListResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunRetrieveResponse.Completions; /** * An object representing an error response from the Eval API. @@ -722,12 +781,12 @@ export interface RunListResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -737,7 +796,7 @@ export interface RunListResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunListResponse.ResultCounts; + result_counts: RunRetrieveResponse.ResultCounts; /** * The status of the evaluation run. @@ -745,7 +804,241 @@ export interface RunListResponse { status: string; } -export namespace RunListResponse { +export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -821,18 +1114,10 @@ export namespace RunListResponse { } } -export interface RunDeleteResponse { - deleted?: boolean; - - object?: string; - - run_id?: string; -} - /** * A schema representing an evaluation run. */ -export interface RunCancelResponse { +export interface RunListResponse { /** * Unique identifier for the evaluation run. */ @@ -846,7 +1131,10 @@ export interface RunCancelResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunListResponse.Completions; /** * An object representing an error response from the Eval API. @@ -886,12 +1174,12 @@ export interface RunCancelResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -901,7 +1189,7 @@ export interface RunCancelResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunCancelResponse.ResultCounts; + result_counts: RunListResponse.ResultCounts; /** * The status of the evaluation run. @@ -909,25 +1197,660 @@ export interface RunCancelResponse { status: string; } -export namespace RunCancelResponse { - export interface PerModelUsage { +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of tokens retrieved from cache. + * A EvalResponsesSource object describing a run data source configuration. */ - cached_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of completion tokens generated. + * The type of run data source. Always `completions`. */ - completion_tokens: number; + type: 'completions'; - /** - * The number of invocations. - */ - invocation_count: number; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The name of the model. + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCancelResponse.Completions; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. */ model_name: string; @@ -989,7 +1912,10 @@ export interface RunCreateParams { /** * Details about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateParams.CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1007,6 +1933,247 @@ export interface RunCreateParams { name?: string; } +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + export interface RunRetrieveParams { /** * The ID of the evaluation to retrieve runs for. @@ -1022,8 +2189,8 @@ export interface RunListParams extends CursorPageParams { order?: 'asc' | 'desc'; /** - * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | - * "canceled". + * Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + * | `canceled`. */ status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; } diff --git a/src/resources/fine-tuning/checkpoints/checkpoints.ts b/src/resources/fine-tuning/checkpoints/checkpoints.ts index 91cab6fc9..da055b0e4 100644 --- a/src/resources/fine-tuning/checkpoints/checkpoints.ts +++ b/src/resources/fine-tuning/checkpoints/checkpoints.ts @@ -6,6 +6,7 @@ import { PermissionCreateParams, PermissionCreateResponse, PermissionCreateResponsesPage, + PermissionDeleteParams, PermissionDeleteResponse, PermissionRetrieveParams, PermissionRetrieveResponse, @@ -27,5 +28,6 @@ export declare namespace Checkpoints { type PermissionCreateResponsesPage as PermissionCreateResponsesPage, type PermissionCreateParams as PermissionCreateParams, type PermissionRetrieveParams as PermissionRetrieveParams, + type PermissionDeleteParams as PermissionDeleteParams, }; } diff --git a/src/resources/fine-tuning/checkpoints/index.ts b/src/resources/fine-tuning/checkpoints/index.ts index c5b018cea..7e04fc667 100644 --- a/src/resources/fine-tuning/checkpoints/index.ts +++ b/src/resources/fine-tuning/checkpoints/index.ts @@ -8,5 +8,6 @@ export { type PermissionDeleteResponse, type PermissionCreateParams, type PermissionRetrieveParams, + type PermissionDeleteParams, type PermissionCreateResponsesPage, } from './permissions'; diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts index ba1e79ca8..87a0743eb 100644 --- a/src/resources/fine-tuning/checkpoints/permissions.ts +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -48,9 +48,14 @@ export class Permissions extends APIResource { * Organization owners can use this endpoint to delete a permission for a * fine-tuned model checkpoint. */ - delete(fineTunedModelCheckpoint: string, options?: RequestOptions): APIPromise { + delete( + permissionID: string, + params: PermissionDeleteParams, + options?: RequestOptions, + ): APIPromise { + const { fine_tuned_model_checkpoint } = params; return this._client.delete( - path`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + path`/fine_tuning/checkpoints/${fine_tuned_model_checkpoint}/permissions/${permissionID}`, options, ); } @@ -171,6 +176,13 @@ export interface PermissionRetrieveParams { project_id?: string; } +export interface PermissionDeleteParams { + /** + * The ID of the fine-tuned model checkpoint to delete a permission for. + */ + fine_tuned_model_checkpoint: string; +} + export declare namespace Permissions { export { type PermissionCreateResponse as PermissionCreateResponse, @@ -179,5 +191,6 @@ export declare namespace Permissions { type PermissionCreateResponsesPage as PermissionCreateResponsesPage, type PermissionCreateParams as PermissionCreateParams, type PermissionRetrieveParams as PermissionRetrieveParams, + type PermissionDeleteParams as PermissionDeleteParams, }; } diff --git a/src/resources/images.ts b/src/resources/images.ts index 491255563..20955d3ab 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -8,7 +8,7 @@ import { multipartFormRequestOptions } from '../internal/uploads'; export class Images extends APIResource { /** - * Creates a variation of a given image. + * Creates a variation of a given image. This endpoint only supports `dall-e-2`. */ createVariation(body: ImageCreateVariationParams, options?: RequestOptions): APIPromise { return this._client.post( @@ -18,7 +18,8 @@ export class Images extends APIResource { } /** - * Creates an edited or extended image given an original image and a prompt. + * Creates an edited or extended image given one or more source images and a + * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. */ edit(body: ImageEditParams, options?: RequestOptions): APIPromise { return this._client.post( @@ -29,6 +30,7 @@ export class Images extends APIResource { /** * Creates an image given a prompt. + * [Learn more](https://platform.openai.com/docs/guides/images). */ generate(body: ImageGenerateParams, options?: RequestOptions): APIPromise { return this._client.post('/images/generations', { body, ...options }); @@ -36,33 +38,93 @@ export class Images extends APIResource { } /** - * Represents the url or the content of an image generated by the OpenAI API. + * Represents the content or the URL of an image generated by the OpenAI API. */ export interface Image { /** - * The base64-encoded JSON of the generated image, if `response_format` is - * `b64_json`. + * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + * and only present if `response_format` is set to `b64_json` for `dall-e-2` and + * `dall-e-3`. */ b64_json?: string; /** - * The prompt that was used to generate the image, if there was any revision to the - * prompt. + * For `dall-e-3` only, the revised prompt that was used to generate the image. */ revised_prompt?: string; /** - * The URL of the generated image, if `response_format` is `url` (default). + * When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + * `response_format` is set to `url` (default value). Unsupported for + * `gpt-image-1`. */ url?: string; } -export type ImageModel = 'dall-e-2' | 'dall-e-3'; +export type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1'; +/** + * The response from the image generation endpoint. + */ export interface ImagesResponse { + /** + * The Unix timestamp (in seconds) of when the image was created. + */ created: number; - data: Array; + /** + * The list of generated images. + */ + data?: Array; + + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + usage?: ImagesResponse.Usage; +} + +export namespace ImagesResponse { + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + export interface Usage { + /** + * The number of tokens (images and text) in the input prompt. + */ + input_tokens: number; + + /** + * The input tokens detailed information for the image generation. + */ + input_tokens_details: Usage.InputTokensDetails; + + /** + * The number of image tokens in the output image. + */ + output_tokens: number; + + /** + * The total number of tokens (images and text) used for the image generation. + */ + total_tokens: number; + } + + export namespace Usage { + /** + * The input tokens detailed information for the image generation. + */ + export interface InputTokensDetails { + /** + * The number of image tokens in the input prompt. + */ + image_tokens: number; + + /** + * The number of text tokens in the input prompt. + */ + text_tokens: number; + } + } } export interface ImageCreateVariationParams { @@ -79,8 +141,7 @@ export interface ImageCreateVariationParams { model?: (string & {}) | ImageModel | null; /** - * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - * `n=1` is supported. + * The number of images to generate. Must be between 1 and 10. */ n?: number | null; @@ -107,27 +168,31 @@ export interface ImageCreateVariationParams { export interface ImageEditParams { /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - * is not provided, image must have transparency, which will be used as the mask. + * The image(s) to edit. Must be a supported image file or an array of images. For + * `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. For `dall-e-2`, you can only provide one image, and it should be a square + * `png` file less than 4MB. */ - image: Uploadable; + image: Uploadable | Array; /** * A text description of the desired image(s). The maximum length is 1000 - * characters. + * characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. */ prompt: string; /** * An additional image whose fully transparent areas (e.g. where alpha is zero) - * indicate where `image` should be edited. Must be a valid PNG file, less than + * indicate where `image` should be edited. If there are multiple images provided, + * the mask will be applied on the first image. Must be a valid PNG file, less than * 4MB, and have the same dimensions as `image`. */ mask?: Uploadable; /** - * The model to use for image generation. Only `dall-e-2` is supported at this - * time. + * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + * is used. */ model?: (string & {}) | ImageModel | null; @@ -136,16 +201,25 @@ export interface ImageEditParams { */ n?: number | null; + /** + * The quality of the image that will be generated. `high`, `medium` and `low` are + * only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + * Defaults to `auto`. + */ + quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null; + /** * The format in which the generated images are returned. Must be one of `url` or * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + * will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024`. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ size?: '256x256' | '512x512' | '1024x1024' | null; @@ -159,16 +233,36 @@ export interface ImageEditParams { export interface ImageGenerateParams { /** - * A text description of the desired image(s). The maximum length is 1000 - * characters for `dall-e-2` and 4000 characters for `dall-e-3`. + * A text description of the desired image(s). The maximum length is 32000 + * characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + * for `dall-e-3`. */ prompt: string; /** - * The model to use for image generation. + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + + /** + * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + * `gpt-image-1` is used. */ model?: (string & {}) | ImageModel | null; + /** + * Control the content-moderation level for images generated by `gpt-image-1`. Must + * be either `low` for less restrictive filtering or `auto` (default value). + */ + moderation?: 'low' | 'auto' | null; + /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only * `n=1` is supported. @@ -176,31 +270,59 @@ export interface ImageGenerateParams { n?: number | null; /** - * The quality of the image that will be generated. `hd` creates images with finer - * details and greater consistency across the image. This param is only supported - * for `dall-e-3`. + * The compression level (0-100%) for the generated images. This parameter is only + * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + * defaults to 100. */ - quality?: 'standard' | 'hd'; + output_compression?: number | null; /** - * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * The format in which the generated images are returned. This parameter is only + * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + */ + output_format?: 'png' | 'jpeg' | 'webp' | null; + + /** + * The quality of the image that will be generated. + * + * - `auto` (default value) will automatically select the best quality for the + * given model. + * - `high`, `medium` and `low` are supported for `gpt-image-1`. + * - `hd` and `standard` are supported for `dall-e-3`. + * - `standard` is the only option for `dall-e-2`. + */ + quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null; + + /** + * The format in which generated images with `dall-e-2` and `dall-e-3` are + * returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + * after the image has been generated. This parameter isn't supported for + * `gpt-image-1` which will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - * `1024x1792` for `dall-e-3` models. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + * one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. */ - size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null; + size?: + | 'auto' + | '1024x1024' + | '1536x1024' + | '1024x1536' + | '256x256' + | '512x512' + | '1792x1024' + | '1024x1792' + | null; /** - * The style of the generated images. Must be one of `vivid` or `natural`. Vivid - * causes the model to lean towards generating hyper-real and dramatic images. - * Natural causes the model to produce more natural, less hyper-real looking - * images. This param is only supported for `dall-e-3`. + * The style of the generated images. This parameter is only supported for + * `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + * towards generating hyper-real and dramatic images. Natural causes the model to + * produce more natural, less hyper-real looking images. */ style?: 'vivid' | 'natural' | null; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index d8dc6ab84..fa3484ae8 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2151,6 +2151,160 @@ export namespace ResponseReasoningItem { } } +/** + * Emitted when a new reasoning summary part is added. + */ +export interface ResponseReasoningSummaryPartAddedEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The summary part that was added. + */ + part: ResponseReasoningSummaryPartAddedEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.added`. + */ + type: 'response.reasoning_summary_part.added'; +} + +export namespace ResponseReasoningSummaryPartAddedEvent { + /** + * The summary part that was added. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a reasoning summary part is completed. + */ +export interface ResponseReasoningSummaryPartDoneEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The completed summary part. + */ + part: ResponseReasoningSummaryPartDoneEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.done`. + */ + type: 'response.reasoning_summary_part.done'; +} + +export namespace ResponseReasoningSummaryPartDoneEvent { + /** + * The completed summary part. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a delta is added to a reasoning summary text. + */ +export interface ResponseReasoningSummaryTextDeltaEvent { + /** + * The text delta that was added to the summary. + */ + delta: string; + + /** + * The ID of the item this summary text delta is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text delta is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_text.delta`. + */ + type: 'response.reasoning_summary_text.delta'; +} + +/** + * Emitted when a reasoning summary text is completed. + */ +export interface ResponseReasoningSummaryTextDoneEvent { + /** + * The ID of the item this summary text is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The full text of the completed reasoning summary. + */ + text: string; + + /** + * The type of the event. Always `response.reasoning_summary_text.done`. + */ + type: 'response.reasoning_summary_text.done'; +} + /** * Emitted when there is a partial refusal text. */ @@ -2245,6 +2399,10 @@ export type ResponseStreamEvent = | ResponseIncompleteEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent + | ResponseReasoningSummaryPartAddedEvent + | ResponseReasoningSummaryPartDoneEvent + | ResponseReasoningSummaryTextDeltaEvent + | ResponseReasoningSummaryTextDoneEvent | ResponseRefusalDeltaEvent | ResponseRefusalDoneEvent | ResponseTextAnnotationDeltaEvent @@ -2960,6 +3118,10 @@ export declare namespace Responses { type ResponseOutputRefusal as ResponseOutputRefusal, type ResponseOutputText as ResponseOutputText, type ResponseReasoningItem as ResponseReasoningItem, + type ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent, + type ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, + type ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent, + type ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent, type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, type ResponseStatus as ResponseStatus, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index 7d896e55f..a852ef8f7 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -46,7 +46,6 @@ describe('resource evals', () => { ], metadata: { foo: 'string' }, name: 'name', - share_with_openai: true, }); }); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index ce632add1..8427ee957 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -51,11 +51,10 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - // OpenAPI spec is slightly incorrect - test.skip('delete', async () => { - const responsePromise = client.fineTuning.checkpoints.permissions.delete( - 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', - ); + test('delete: only required params', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.delete('cp_zc4Q7MP6XxulcVzj4MZdwsAB', { + fine_tuned_model_checkpoint: 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -64,4 +63,10 @@ describe('resource permissions', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('delete: required and optional params', async () => { + const response = await client.fineTuning.checkpoints.permissions.delete('cp_zc4Q7MP6XxulcVzj4MZdwsAB', { + fine_tuned_model_checkpoint: 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + }); + }); }); diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 67b608d7e..844ff3768 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -53,6 +53,7 @@ describe('resource images', () => { mask: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'string', n: 1, + quality: 'high', response_format: 'url', size: '1024x1024', user: 'user-1234', @@ -73,9 +74,13 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', + background: 'transparent', model: 'string', + moderation: 'low', n: 1, - quality: 'standard', + output_compression: 100, + output_format: 'png', + quality: 'medium', response_format: 'url', size: '1024x1024', style: 'vivid', From 86676d3e8003b34d16006d44441e8625370e6daf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:55:24 +0000 Subject: [PATCH 72/73] chore(ci): run on more branches and use depot runners --- .github/workflows/ci.yml | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6281f037d..0eb4b452a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,20 +1,18 @@ name: CI on: push: - branches: - - main - - update-specs - pull_request: - branches: - - main - - next - - alpha + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: timeout-minutes: 10 name: lint - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -32,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read id-token: write @@ -67,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 From 7273cd832e3166f2d5a375540201c4cb4dcc6f3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 19:58:52 +0000 Subject: [PATCH 73/73] chore(ci): only use depot for staging repos --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0eb4b452a..16a03488b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -30,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} permissions: contents: read id-token: write @@ -65,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -82,7 +82,7 @@ jobs: examples: name: examples - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'stainless-sdks/openai-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -102,7 +102,7 @@ jobs: ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'stainless-sdks/openai-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} if: github.repository == 'openai/openai-node' timeout-minutes: 20 strategy: