diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
old mode 100644
new mode 100755
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
old mode 100644
new mode 100755
diff --git a/.eslintrc.js b/.eslintrc.js
old mode 100644
new mode 100755
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
old mode 100644
new mode 100755
index 0cbe526..75a47c3
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -1,6 +1,8 @@
name: Release Doctor
on:
pull_request:
+ branches:
+ - main
workflow_dispatch:
jobs:
@@ -17,3 +19,4 @@ jobs:
bash ./bin/check-release-environment
env:
NPM_TOKEN: ${{ secrets.TOGETHER_NPM_TOKEN || secrets.NPM_TOKEN }}
+
diff --git a/.gitignore b/.gitignore
old mode 100644
new mode 100755
diff --git a/.prettierignore b/.prettierignore
old mode 100644
new mode 100755
diff --git a/.prettierrc.json b/.prettierrc.json
old mode 100644
new mode 100755
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
old mode 100644
new mode 100755
index 0369079..d889c08
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.6.0-alpha.4"
+ ".": "0.6.0-alpha.5"
}
diff --git a/.stats.yml b/.stats.yml
old mode 100644
new mode 100755
index 229d59d..1572e80
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 15
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2FTogetherAI-aa19594b663913393bdbc1b56903615e4eb84c6ebc60617ab2f451ede8a730c2.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2FTogetherAI-4d2d6b953f3918f01c022b00942571be9241e37a4e6877444e0353913f21cba0.yml
diff --git a/Brewfile b/Brewfile
old mode 100644
new mode 100755
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c8b605..3cf50fc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,26 @@
# Changelog
+## 0.6.0-alpha.5 (2024-08-20)
+
+Full Changelog: [v0.6.0-alpha.4...v0.6.0-alpha.5](https://github.com/togethercomputer/together-typescript/compare/v0.6.0-alpha.4...v0.6.0-alpha.5)
+
+### Features
+
+* **api:** manual updates ([#31](https://github.com/togethercomputer/together-typescript/issues/31)) ([52c8005](https://github.com/togethercomputer/together-typescript/commit/52c800544043060963bf09ef10d3596d3855a86e))
+* **api:** OpenAPI spec update via Stainless API ([#28](https://github.com/togethercomputer/together-typescript/issues/28)) ([9544a3f](https://github.com/togethercomputer/together-typescript/commit/9544a3f961b816fd59338db99b5f9b499dbfe832))
+* **api:** OpenAPI spec update via Stainless API ([#36](https://github.com/togethercomputer/together-typescript/issues/36)) ([0154ccf](https://github.com/togethercomputer/together-typescript/commit/0154ccf0758b9ec9117b5a399eade3e51f49c5fd))
+* **api:** OpenAPI spec update via Stainless API ([#39](https://github.com/togethercomputer/together-typescript/issues/39)) ([a141abb](https://github.com/togethercomputer/together-typescript/commit/a141abb2184e99a291eb5ea634fda71fca85f47b))
+
+
+### Chores
+
+* **ci:** bump prism mock server version ([#35](https://github.com/togethercomputer/together-typescript/issues/35)) ([2723c30](https://github.com/togethercomputer/together-typescript/commit/2723c30cd88f69fb77442faeb8a316bf1c5cde88))
+* **ci:** minor changes ([#34](https://github.com/togethercomputer/together-typescript/issues/34)) ([54b4da5](https://github.com/togethercomputer/together-typescript/commit/54b4da54aeec098d154486f09ba52f6e8bc3f715))
+* **examples:** minor formatting changes ([#38](https://github.com/togethercomputer/together-typescript/issues/38)) ([35f5465](https://github.com/togethercomputer/together-typescript/commit/35f546552c433af8d518db0b44f7fcd473c02eab))
+* force eslint to use non flat config ([#33](https://github.com/togethercomputer/together-typescript/issues/33)) ([a327432](https://github.com/togethercomputer/together-typescript/commit/a327432973bc4f245ec272dcd58a7d0a4f117c09))
+* **internal:** codegen related update ([#32](https://github.com/togethercomputer/together-typescript/issues/32)) ([edd1691](https://github.com/togethercomputer/together-typescript/commit/edd16911c877b06679f6f7759486e2ed442c94c5))
+* **internal:** codegen related update ([#37](https://github.com/togethercomputer/together-typescript/issues/37)) ([12be697](https://github.com/togethercomputer/together-typescript/commit/12be697f897f91ce94edd5ec5fca6abe236a5135))
+
## 0.6.0-alpha.4 (2024-07-16)
Full Changelog: [v0.6.0-alpha.3...v0.6.0-alpha.4](https://github.com/togethercomputer/together-typescript/compare/v0.6.0-alpha.3...v0.6.0-alpha.4)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
old mode 100644
new mode 100755
diff --git a/LICENSE b/LICENSE
old mode 100644
new mode 100755
diff --git a/README.md b/README.md
old mode 100644
new mode 100755
index 96fa38b..30171f7
--- a/README.md
+++ b/README.md
@@ -22,12 +22,12 @@ The full API of this library can be found in [api.md](api.md).
```js
import Together from 'together-ai';
-const together = new Together({
+const client = new Together({
apiKey: process.env['TOGETHER_API_KEY'], // This is the default and can be omitted
});
async function main() {
- const chatCompletion = await together.chat.completions.create({
+ const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test!' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
});
@@ -45,9 +45,9 @@ We provide support for streaming responses using Server Sent Events (SSE).
```ts
import Together from 'together-ai';
-const together = new Together();
+const client = new Together();
-const stream = await together.chat.completions.create({
+const stream = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
stream: true,
@@ -68,7 +68,7 @@ This library includes TypeScript definitions for all request params and response
```ts
import Together from 'together-ai';
-const together = new Together({
+const client = new Together({
apiKey: process.env['TOGETHER_API_KEY'], // This is the default and can be omitted
});
@@ -77,7 +77,7 @@ async function main() {
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
};
- const chatCompletion: Together.Chat.ChatCompletion = await together.chat.completions.create(params);
+ const chatCompletion: Together.Chat.ChatCompletion = await client.chat.completions.create(params);
}
main();
@@ -94,7 +94,7 @@ a subclass of `APIError` will be thrown:
```ts
async function main() {
- const chatCompletion = await together.chat.completions
+ const chatCompletion = await client.chat.completions
.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
@@ -137,12 +137,12 @@ You can use the `maxRetries` option to configure or disable this:
```js
// Configure the default for all requests:
-const together = new Together({
+const client = new Together({
maxRetries: 0, // default is 2
});
// Or, configure per-request:
-await together.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, {
+await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, {
maxRetries: 5,
});
```
@@ -154,12 +154,12 @@ Requests time out after 1 minute by default. You can configure this with a `time
```ts
// Configure the default for all requests:
-const together = new Together({
+const client = new Together({
timeout: 20 * 1000, // 20 seconds (default is 1 minute)
});
// Override per-request:
-await together.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, {
+await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, {
timeout: 5 * 1000,
});
```
@@ -178,9 +178,9 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
-const together = new Together();
+const client = new Together();
-const response = await together.chat.completions
+const response = await client.chat.completions
.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
@@ -189,7 +189,7 @@ const response = await together.chat.completions
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: chatCompletion, response: raw } = await together.chat.completions
+const { data: chatCompletion, response: raw } = await client.chat.completions
.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
@@ -295,12 +295,12 @@ import http from 'http';
import { HttpsProxyAgent } from 'https-proxy-agent';
// Configure the default for all requests:
-const together = new Together({
+const client = new Together({
httpAgent: new HttpsProxyAgent(process.env.PROXY_URL),
});
// Override per-request:
-await together.chat.completions.create(
+await client.chat.completions.create(
{
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
diff --git a/SECURITY.md b/SECURITY.md
old mode 100644
new mode 100755
diff --git a/api.md b/api.md
old mode 100644
new mode 100755
index c9dd638..7fa8be9
--- a/api.md
+++ b/api.md
@@ -5,8 +5,16 @@
Types:
- ChatCompletion
+- ChatCompletionAssistantMessageParam
- ChatCompletionChunk
+- ChatCompletionFunctionMessageParam
+- ChatCompletionMessage
+- ChatCompletionMessageParam
+- ChatCompletionSystemMessageParam
+- ChatCompletionTool
+- ChatCompletionToolMessageParam
- ChatCompletionUsage
+- ChatCompletionUserMessageParam
Methods:
diff --git a/bin/check-release-environment b/bin/check-release-environment
old mode 100644
new mode 100755
index 66692c5..8807123
--- a/bin/check-release-environment
+++ b/bin/check-release-environment
@@ -1,20 +1,9 @@
#!/usr/bin/env bash
-warnings=()
errors=()
if [ -z "${NPM_TOKEN}" ]; then
- warnings+=("The TOGETHER_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets")
-fi
-
-lenWarnings=${#warnings[@]}
-
-if [[ lenWarnings -gt 0 ]]; then
- echo -e "Found the following warnings in the release environment:\n"
-
- for warning in "${warnings[@]}"; do
- echo -e "- $warning\n"
- done
+ errors+=("The TOGETHER_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets")
fi
lenErrors=${#errors[@]}
@@ -30,3 +19,4 @@ if [[ lenErrors -gt 0 ]]; then
fi
echo "The environment is ready to push releases!"
+
diff --git a/bin/publish-npm b/bin/publish-npm
old mode 100644
new mode 100755
index 4d6c9f3..4c21181
--- a/bin/publish-npm
+++ b/bin/publish-npm
@@ -2,8 +2,24 @@
set -eux
-npm config set //registry.npmjs.org/:_authToken $NPM_TOKEN
+npm config set '//registry.npmjs.org/:_authToken' "$NPM_TOKEN"
+# Build the project
yarn build
+
+# Navigate to the dist directory
cd dist
-yarn publish --access public
+
+# Get the version from package.json
+VERSION="$(node -p "require('./package.json').version")"
+
+# Extract the pre-release tag if it exists
+if [[ "$VERSION" =~ -([a-zA-Z]+) ]]; then
+ # Extract the part before any dot in the pre-release identifier
+ TAG="${BASH_REMATCH[1]}"
+else
+ TAG="latest"
+fi
+
+# Publish with the appropriate tag
+yarn publish --access public --tag "$TAG"
diff --git a/examples/.keep b/examples/.keep
old mode 100644
new mode 100755
diff --git a/examples/chat-completions.ts b/examples/chat-completions.ts
new file mode 100755
index 0000000..01d435e
--- /dev/null
+++ b/examples/chat-completions.ts
@@ -0,0 +1,24 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import Together from 'together-ai';
+
+const together = new Together();
+async function main() {
+ const runner = together.chat.completions
+ .stream({
+ model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ messages: [{ role: 'user', content: 'Say this is a test' }],
+ })
+ .on('message', (msg) => console.log(msg))
+ .on('content', (diff) => process.stdout.write(diff));
+
+ for await (const chunk of runner) {
+ // Note: comment out the next line to print chunks as they are streamed from the API
+ // console.log('chunk', chunk);
+ }
+
+ const result = await runner.finalMessage();
+ console.log(result);
+}
+
+main();
diff --git a/examples/embedding.ts b/examples/embedding.ts
index 03d084e..201b457 100755
--- a/examples/embedding.ts
+++ b/examples/embedding.ts
@@ -1,6 +1,6 @@
#!/usr/bin/env -S npm run tsn -T
-import Together from 'together';
+import Together from 'together-ai';
const together = new Together();
diff --git a/examples/files.ts b/examples/files.ts
index ed1b843..23f3ef2 100755
--- a/examples/files.ts
+++ b/examples/files.ts
@@ -2,7 +2,7 @@
// Example of listing and retrieving files
-import Together from 'together';
+import Together from 'together-ai';
const together = new Together();
diff --git a/examples/fine-tune.ts b/examples/fine-tune.ts
index 5c48874..4763e4c 100755
--- a/examples/fine-tune.ts
+++ b/examples/fine-tune.ts
@@ -1,6 +1,6 @@
#!/usr/bin/env -S npm run tsn -T
-import Together from 'together';
+import Together from 'together-ai';
const together = new Together();
diff --git a/examples/image.ts b/examples/image.ts
index 0cef9dd..8ffa91a 100755
--- a/examples/image.ts
+++ b/examples/image.ts
@@ -2,7 +2,7 @@
//An example to generate an image and save to a file
-import Together from 'together';
+import Together from 'together-ai';
import fs from 'fs';
const together = new Together();
diff --git a/examples/models.ts b/examples/models.ts
index be2a54c..56914a4 100755
--- a/examples/models.ts
+++ b/examples/models.ts
@@ -2,7 +2,7 @@
//An example to request a list of models and print them.
-import Together from 'together';
+import Together from 'together-ai';
const together = new Together();
diff --git a/examples/streaming.ts b/examples/streaming.ts
index 2c02e1c..ab408d8 100755
--- a/examples/streaming.ts
+++ b/examples/streaming.ts
@@ -1,6 +1,6 @@
#!/usr/bin/env -S npm run tsn -T
-import Together from 'together';
+import Together from 'together-ai';
const together = new Together();
diff --git a/jest.config.ts b/jest.config.ts
old mode 100644
new mode 100755
diff --git a/package.json b/package.json
old mode 100644
new mode 100755
index f276dad..924e3b5
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "together-ai",
- "version": "0.6.0-alpha.4",
+ "version": "0.6.0-alpha.5",
"description": "The official TypeScript library for the Together API",
"author": "Together ",
"types": "dist/index.d.ts",
@@ -21,7 +21,7 @@
"prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi",
"tsn": "ts-node -r tsconfig-paths/register",
"lint": "./scripts/lint",
- "fix": "eslint --fix --ext ts,js ."
+ "fix": "./scripts/format"
},
"dependencies": {
"@types/node": "^18.11.18",
@@ -30,8 +30,7 @@
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
- "node-fetch": "^2.6.7",
- "web-streams-polyfill": "^3.2.1"
+ "node-fetch": "^2.6.7"
},
"devDependencies": {
"@swc/core": "^1.3.102",
diff --git a/release-please-config.json b/release-please-config.json
old mode 100644
new mode 100755
diff --git a/scripts/format b/scripts/format
index d297e76..a6bb9d0 100755
--- a/scripts/format
+++ b/scripts/format
@@ -5,4 +5,4 @@ set -e
cd "$(dirname "$0")/.."
echo "==> Running eslint --fix"
-./node_modules/.bin/eslint --fix --ext ts,js .
+ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --fix --ext ts,js .
diff --git a/scripts/lint b/scripts/lint
index 6b0e5dc..4af1de0 100755
--- a/scripts/lint
+++ b/scripts/lint
@@ -5,4 +5,4 @@ set -e
cd "$(dirname "$0")/.."
echo "==> Running eslint"
-./node_modules/.bin/eslint --ext ts,js .
+ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --ext ts,js .
diff --git a/scripts/mock b/scripts/mock
index fe89a1d..d2814ae 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}"
# Run prism mock on the given spec
if [ "$1" == "--daemon" ]; then
- npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log &
+ npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log &
# Wait for server to come online
echo -n "Waiting for server"
@@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL"
+ npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL"
fi
diff --git a/scripts/utils/check-version.cjs b/scripts/utils/check-version.cjs
old mode 100644
new mode 100755
diff --git a/scripts/utils/fix-index-exports.cjs b/scripts/utils/fix-index-exports.cjs
old mode 100644
new mode 100755
diff --git a/scripts/utils/make-dist-package-json.cjs b/scripts/utils/make-dist-package-json.cjs
old mode 100644
new mode 100755
diff --git a/scripts/utils/postprocess-files.cjs b/scripts/utils/postprocess-files.cjs
old mode 100644
new mode 100755
diff --git a/src/_shims/MultipartBody.ts b/src/_shims/MultipartBody.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/README.md b/src/_shims/README.md
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/runtime-bun.ts b/src/_shims/auto/runtime-bun.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/runtime-deno.ts b/src/_shims/auto/runtime-deno.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/runtime-node.ts b/src/_shims/auto/runtime-node.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/runtime.ts b/src/_shims/auto/runtime.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/types-deno.ts b/src/_shims/auto/types-deno.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/types-node.ts b/src/_shims/auto/types-node.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/types.d.ts b/src/_shims/auto/types.d.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/types.js b/src/_shims/auto/types.js
old mode 100644
new mode 100755
diff --git a/src/_shims/auto/types.mjs b/src/_shims/auto/types.mjs
old mode 100644
new mode 100755
diff --git a/src/_shims/bun-runtime.ts b/src/_shims/bun-runtime.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/index.d.ts b/src/_shims/index.d.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/index.js b/src/_shims/index.js
old mode 100644
new mode 100755
diff --git a/src/_shims/index.mjs b/src/_shims/index.mjs
old mode 100644
new mode 100755
diff --git a/src/_shims/manual-types.d.ts b/src/_shims/manual-types.d.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/manual-types.js b/src/_shims/manual-types.js
old mode 100644
new mode 100755
diff --git a/src/_shims/manual-types.mjs b/src/_shims/manual-types.mjs
old mode 100644
new mode 100755
diff --git a/src/_shims/node-runtime.ts b/src/_shims/node-runtime.ts
old mode 100644
new mode 100755
index a9c42eb..ab9f2ab
--- a/src/_shims/node-runtime.ts
+++ b/src/_shims/node-runtime.ts
@@ -13,9 +13,7 @@ import { Readable } from 'node:stream';
import { type RequestOptions } from '../core';
import { MultipartBody } from './MultipartBody';
import { type Shims } from './registry';
-
-// @ts-ignore (this package does not have proper export maps for this export)
-import { ReadableStream } from 'web-streams-polyfill/dist/ponyfill.es2018.js';
+import { ReadableStream } from 'node:stream/web';
type FileFromPathOptions = Omit;
diff --git a/src/_shims/node-types.d.ts b/src/_shims/node-types.d.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/node-types.js b/src/_shims/node-types.js
old mode 100644
new mode 100755
diff --git a/src/_shims/node-types.mjs b/src/_shims/node-types.mjs
old mode 100644
new mode 100755
diff --git a/src/_shims/registry.ts b/src/_shims/registry.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/web-runtime.ts b/src/_shims/web-runtime.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/web-types.d.ts b/src/_shims/web-types.d.ts
old mode 100644
new mode 100755
diff --git a/src/_shims/web-types.js b/src/_shims/web-types.js
old mode 100644
new mode 100755
diff --git a/src/_shims/web-types.mjs b/src/_shims/web-types.mjs
old mode 100644
new mode 100755
diff --git a/src/core.ts b/src/core.ts
old mode 100644
new mode 100755
diff --git a/src/error.ts b/src/error.ts
old mode 100644
new mode 100755
diff --git a/src/index.ts b/src/index.ts
old mode 100644
new mode 100755
index 61e91bc..e8a1316
--- a/src/index.ts
+++ b/src/index.ts
@@ -143,6 +143,7 @@ export class Together extends Core.APIClient {
}
static Together = this;
+ static DEFAULT_TIMEOUT = 60000; // 1 minute
static TogetherError = Errors.TogetherError;
static APIError = Errors.APIError;
diff --git a/src/lib/.keep b/src/lib/.keep
old mode 100644
new mode 100755
diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts
new file mode 100644
index 0000000..3eb96ad
--- /dev/null
+++ b/src/lib/AbstractChatCompletionRunner.ts
@@ -0,0 +1,688 @@
+import * as Core from 'together-ai/core';
+import {
+ type Completions,
+ type ChatCompletion,
+ type ChatCompletionMessage,
+ type ChatCompletionMessageParam,
+ type CompletionCreateParams,
+ type ChatCompletionTool,
+} from 'together-ai/resources/chat/completions';
+import { APIUserAbortError, TogetherError } from 'together-ai/error';
+import {
+ type RunnableFunction,
+ isRunnableFunctionWithParse,
+ type BaseFunctionsArgs,
+} from './RunnableFunction';
+import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from './ChatCompletionRunner';
+import {
+ ChatCompletionStreamingFunctionRunnerParams,
+ ChatCompletionStreamingToolRunnerParams,
+} from './ChatCompletionStreamingRunner';
+import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatCompletionUtils';
+import { Chat } from 'together-ai/resources';
+import ChatCompletionUsage = Chat.ChatCompletionUsage;
+
+const DEFAULT_MAX_CHAT_COMPLETIONS = 10;
+export interface RunnerOptions extends Core.RequestOptions {
+ /** How many requests to make before canceling. Default 10. */
+ maxChatCompletions?: number;
+}
+
+export abstract class AbstractChatCompletionRunner<
+ Events extends CustomEvents = AbstractChatCompletionRunnerEvents,
+> {
+ controller: AbortController = new AbortController();
+
+ #connectedPromise: Promise;
+ #resolveConnectedPromise: () => void = () => {};
+ #rejectConnectedPromise: (error: TogetherError) => void = () => {};
+
+ #endPromise: Promise;
+ #resolveEndPromise: () => void = () => {};
+ #rejectEndPromise: (error: TogetherError) => void = () => {};
+
+ #listeners: { [Event in keyof Events]?: ListenersForEvent } = {};
+
+ protected _chatCompletions: ChatCompletion[] = [];
+ messages: ChatCompletionMessageParam[] = [];
+
+ #ended = false;
+ #errored = false;
+ #aborted = false;
+ #catchingPromiseCreated = false;
+
+ constructor() {
+ this.#connectedPromise = new Promise((resolve, reject) => {
+ this.#resolveConnectedPromise = resolve;
+ this.#rejectConnectedPromise = reject;
+ });
+
+ this.#endPromise = new Promise((resolve, reject) => {
+ this.#resolveEndPromise = resolve;
+ this.#rejectEndPromise = reject;
+ });
+
+ // Don't let these promises cause unhandled rejection errors.
+ // we will manually cause an unhandled rejection error later
+ // if the user hasn't registered any error listener or called
+ // any promise-returning method.
+ this.#connectedPromise.catch(() => {});
+ this.#endPromise.catch(() => {});
+ }
+
+ protected _run(executor: () => Promise) {
+ // Unfortunately if we call `executor()` immediately we get runtime errors about
+ // references to `this` before the `super()` constructor call returns.
+ setTimeout(() => {
+ executor().then(() => {
+ this._emitFinal();
+ this._emit('end');
+ }, this.#handleError);
+ }, 0);
+ }
+
+ protected _addChatCompletion(chatCompletion: ChatCompletion): ChatCompletion {
+ this._chatCompletions.push(chatCompletion);
+ this._emit('chatCompletion', chatCompletion);
+ const message = chatCompletion.choices[0]?.message;
+ if (message) this._addMessage(message as ChatCompletionMessageParam);
+ return chatCompletion;
+ }
+
+ protected _addMessage(message: ChatCompletionMessageParam, emit = true) {
+ if (!('content' in message)) message.content = null;
+
+ this.messages.push(message);
+
+ if (emit) {
+ this._emit('message', message);
+ if ((isFunctionMessage(message) || isToolMessage(message)) && message.content) {
+ // Note, this assumes that {role: 'tool', content: …} is always the result of a call of tool of type=function.
+ this._emit('functionCallResult', message.content as string);
+ } else if (isAssistantMessage(message) && message.function_call) {
+ this._emit('functionCall', message.function_call);
+ } else if (isAssistantMessage(message) && message.tool_calls) {
+ for (const tool_call of message.tool_calls) {
+ if (tool_call.type === 'function') {
+ this._emit('functionCall', tool_call.function);
+ }
+ }
+ }
+ }
+ }
+
+ protected _connected() {
+ if (this.ended) return;
+ this.#resolveConnectedPromise();
+ this._emit('connect');
+ }
+
+ get ended(): boolean {
+ return this.#ended;
+ }
+
+ get errored(): boolean {
+ return this.#errored;
+ }
+
+ get aborted(): boolean {
+ return this.#aborted;
+ }
+
+ abort() {
+ this.controller.abort();
+ }
+
+ /**
+ * Adds the listener function to the end of the listeners array for the event.
+ * No checks are made to see if the listener has already been added. Multiple calls passing
+ * the same combination of event and listener will result in the listener being added, and
+ * called, multiple times.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ on(event: Event, listener: ListenerForEvent): this {
+ const listeners: ListenersForEvent =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener });
+ return this;
+ }
+
+ /**
+ * Removes the specified listener from the listener array for the event.
+ * off() will remove, at most, one instance of a listener from the listener array. If any single
+ * listener has been added multiple times to the listener array for the specified event, then
+ * off() must be called multiple times to remove each instance.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ off(event: Event, listener: ListenerForEvent): this {
+ const listeners = this.#listeners[event];
+ if (!listeners) return this;
+ const index = listeners.findIndex((l) => l.listener === listener);
+ if (index >= 0) listeners.splice(index, 1);
+ return this;
+ }
+
+ /**
+ * Adds a one-time listener function for the event. The next time the event is triggered,
+ * this listener is removed and then invoked.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ once(event: Event, listener: ListenerForEvent): this {
+ const listeners: ListenersForEvent =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener, once: true });
+ return this;
+ }
+
+ /**
+ * This is similar to `.once()`, but returns a Promise that resolves the next time
+ * the event is triggered, instead of calling a listener callback.
+ * @returns a Promise that resolves the next time given event is triggered,
+ * or rejects if an error is emitted. (If you request the 'error' event,
+ * returns a promise that resolves with the error).
+ *
+ * Example:
+ *
+ * const message = await stream.emitted('message') // rejects if the stream errors
+ */
+ emitted(
+ event: Event,
+ ): Promise<
+ EventParameters extends [infer Param] ? Param
+ : EventParameters extends [] ? void
+ : EventParameters
+ > {
+ return new Promise((resolve, reject) => {
+ this.#catchingPromiseCreated = true;
+ if (event !== 'error') this.once('error', reject);
+ this.once(event, resolve as any);
+ });
+ }
+
+ async done(): Promise {
+ this.#catchingPromiseCreated = true;
+ await this.#endPromise;
+ }
+
+ /**
+ * @returns a promise that resolves with the final ChatCompletion, or rejects
+ * if an error occurred or the stream ended prematurely without producing a ChatCompletion.
+ */
+ async finalChatCompletion(): Promise {
+ await this.done();
+ const completion = this._chatCompletions[this._chatCompletions.length - 1];
+ if (!completion) throw new TogetherError('stream ended without producing a ChatCompletion');
+ return completion;
+ }
+
+ #getFinalContent(): string | null {
+ return this.#getFinalMessage().content ?? null;
+ }
+
+ /**
+ * @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects
+ * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
+ */
+ async finalContent(): Promise {
+ await this.done();
+ return this.#getFinalContent();
+ }
+
+ #getFinalMessage(): ChatCompletionMessage {
+ let i = this.messages.length;
+ while (i-- > 0) {
+ const message = this.messages[i];
+ if (isAssistantMessage(message)) {
+ return { ...message, content: message.content ?? null };
+ }
+ }
+ throw new TogetherError('stream ended without producing a ChatCompletionMessage with role=assistant');
+ }
+
+ /**
+ * @returns a promise that resolves with the the final assistant ChatCompletionMessage response,
+ * or rejects if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
+ */
+ async finalMessage(): Promise {
+ await this.done();
+ return this.#getFinalMessage();
+ }
+
+ #getFinalFunctionCall(): ChatCompletionMessage.FunctionCall | undefined {
+ for (let i = this.messages.length - 1; i >= 0; i--) {
+ const message = this.messages[i];
+ if (isAssistantMessage(message) && message?.function_call) {
+ return message.function_call;
+ }
+ if (isAssistantMessage(message) && message?.tool_calls?.length) {
+ return message.tool_calls.at(-1)?.function;
+ }
+ }
+
+ return;
+ }
+
+ /**
+ * @returns a promise that resolves with the content of the final FunctionCall, or rejects
+ * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
+ */
+ async finalFunctionCall(): Promise {
+ await this.done();
+ return this.#getFinalFunctionCall();
+ }
+
+ #getFinalFunctionCallResult(): string | undefined {
+ for (let i = this.messages.length - 1; i >= 0; i--) {
+ const message = this.messages[i];
+ if (isFunctionMessage(message) && message.content != null) {
+ return message.content;
+ }
+ if (
+ isToolMessage(message) &&
+ message.content != null &&
+ this.messages.some(
+ (x) =>
+ x.role === 'assistant' &&
+ x.tool_calls?.some((y) => y.type === 'function' && y.id === message.tool_call_id),
+ )
+ ) {
+ return message.content;
+ }
+ }
+
+ return;
+ }
+
+ async finalFunctionCallResult(): Promise {
+ await this.done();
+ return this.#getFinalFunctionCallResult();
+ }
+
+ #calculateTotalUsage(): ChatCompletionUsage {
+ const total: ChatCompletionUsage = {
+ completion_tokens: 0,
+ prompt_tokens: 0,
+ total_tokens: 0,
+ };
+ for (const { usage } of this._chatCompletions) {
+ if (usage) {
+ total.completion_tokens += usage.completion_tokens;
+ total.prompt_tokens += usage.prompt_tokens;
+ total.total_tokens += usage.total_tokens;
+ }
+ }
+ return total;
+ }
+
+ async totalUsage(): Promise {
+ await this.done();
+ return this.#calculateTotalUsage();
+ }
+
+ allChatCompletions(): ChatCompletion[] {
+ return [...this._chatCompletions];
+ }
+
+ #handleError = (error: unknown) => {
+ this.#errored = true;
+ if (error instanceof Error && error.name === 'AbortError') {
+ error = new APIUserAbortError();
+ }
+ if (error instanceof APIUserAbortError) {
+ this.#aborted = true;
+ return this._emit('abort', error);
+ }
+ if (error instanceof TogetherError) {
+ return this._emit('error', error);
+ }
+ if (error instanceof Error) {
+ // @ts-ignore
+ const TogetherError: TogetherError = new TogetherError(error.message);
+ // @ts-ignore
+ TogetherError.cause = error;
+ return this._emit('error', TogetherError);
+ }
+ return this._emit('error', new TogetherError(String(error)));
+ };
+
+ protected _emit(event: Event, ...args: EventParameters) {
+ // make sure we don't emit any events after end
+ if (this.#ended) {
+ return;
+ }
+
+ if (event === 'end') {
+ this.#ended = true;
+ this.#resolveEndPromise();
+ }
+
+ const listeners: ListenersForEvent | undefined = this.#listeners[event];
+ if (listeners) {
+ this.#listeners[event] = listeners.filter((l) => !l.once) as any;
+ listeners.forEach(({ listener }: any) => listener(...args));
+ }
+
+ if (event === 'abort') {
+ const error = args[0] as APIUserAbortError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ return;
+ }
+
+ if (event === 'error') {
+ // NOTE: _emit('error', error) should only be called from #handleError().
+
+ const error = args[0] as TogetherError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ // Trigger an unhandled rejection if the user hasn't registered any error handlers.
+ // If you are seeing stack traces here, make sure to handle errors via either:
+ // - runner.on('error', () => ...)
+ // - await runner.done()
+ // - await runner.finalChatCompletion()
+ // - etc.
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ }
+ }
+
+ protected _emitFinal() {
+ const completion = this._chatCompletions[this._chatCompletions.length - 1];
+ if (completion) this._emit('finalChatCompletion', completion);
+ const finalMessage = this.#getFinalMessage();
+ if (finalMessage) this._emit('finalMessage', finalMessage);
+ const finalContent = this.#getFinalContent();
+ if (finalContent) this._emit('finalContent', finalContent);
+
+ const finalFunctionCall = this.#getFinalFunctionCall();
+ if (finalFunctionCall) this._emit('finalFunctionCall', finalFunctionCall);
+
+ const finalFunctionCallResult = this.#getFinalFunctionCallResult();
+ if (finalFunctionCallResult != null) this._emit('finalFunctionCallResult', finalFunctionCallResult);
+
+ if (this._chatCompletions.some((c) => c.usage)) {
+ this._emit('totalUsage', this.#calculateTotalUsage());
+ }
+ }
+
+ #validateParams(params: CompletionCreateParams): void {
+ if (params.n != null && params.n > 1) {
+ throw new TogetherError(
+ 'ChatCompletion convenience helpers only support n=1 at this time. To use n>1, please use chat.completions.create() directly.',
+ );
+ }
+ }
+
+ protected async _createChatCompletion(
+ completions: Completions,
+ params: CompletionCreateParams,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this.#validateParams(params);
+
+ const chatCompletion = await completions.create(
+ { ...params, stream: false },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ return this._addChatCompletion(chatCompletion);
+ }
+
+ protected async _runChatCompletion(
+ completions: Completions,
+ params: CompletionCreateParams,
+ options?: Core.RequestOptions,
+ ): Promise {
+ for (const message of params.messages) {
+ this._addMessage(message, false);
+ }
+ return await this._createChatCompletion(completions, params, options);
+ }
+
+ protected async _runFunctions(
+ completions: Completions,
+ params:
+ | ChatCompletionFunctionRunnerParams
+ | ChatCompletionStreamingFunctionRunnerParams,
+ options?: RunnerOptions,
+ ) {
+ const role = 'function' as const;
+ const { function_call = 'auto', stream, ...restParams } = params;
+ const singleFunctionToCall = typeof function_call !== 'string' && function_call?.name;
+ const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
+
+ const functionsByName: Record> = {};
+ for (const f of params.functions) {
+ functionsByName[f.name || f.function.name] = f;
+ }
+
+ // @ts-ignore
+ const functions: CompletionCreateParams.Function[] = params.functions.map(
+ // @ts-ignore
+ (f): CompletionCreateParams.Function => ({
+ name: f.name || f.function.name,
+ parameters: f.parameters as Record,
+ description: f.description,
+ }),
+ );
+
+ for (const message of params.messages) {
+ this._addMessage(message, false);
+ }
+
+ for (let i = 0; i < maxChatCompletions; ++i) {
+ const chatCompletion: ChatCompletion = await this._createChatCompletion(
+ completions,
+ {
+ ...restParams,
+ function_call,
+ functions,
+ // @ts-ignore
+ messages: [...this.messages],
+ },
+ options,
+ );
+ const message = chatCompletion.choices[0]?.message;
+ if (!message) {
+ throw new TogetherError(`missing message in ChatCompletion response`);
+ }
+ if (!message.function_call) return;
+ const { name, arguments: args } = message.function_call;
+ const fn = functionsByName[name];
+ if (!fn) {
+ const content = `Invalid function_call: ${JSON.stringify(name)}. Available options are: ${functions
+ .map((f) => JSON.stringify(f.name))
+ .join(', ')}. Please try again`;
+
+ this._addMessage({ role, name, content });
+ continue;
+ } else if (singleFunctionToCall && singleFunctionToCall !== name) {
+ const content = `Invalid function_call: ${JSON.stringify(name)}. ${JSON.stringify(
+ singleFunctionToCall,
+ )} requested. Please try again`;
+
+ this._addMessage({ role, name, content });
+ continue;
+ }
+
+ let parsed;
+ try {
+ parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;
+ } catch (error) {
+ this._addMessage({
+ role,
+ name,
+ content: error instanceof Error ? error.message : String(error),
+ });
+ continue;
+ }
+
+ // @ts-expect-error it can't rule out `never` type.
+ const rawContent = await fn.function(parsed, this);
+ const content = this.#stringifyFunctionCallResult(rawContent);
+
+ this._addMessage({ role, name, content });
+
+ if (singleFunctionToCall) return;
+ }
+ }
+
+ protected async _runTools(
+ completions: Completions,
+ params:
+ | ChatCompletionToolRunnerParams
+ | ChatCompletionStreamingToolRunnerParams,
+ options?: RunnerOptions,
+ ) {
+ const role = 'tool' as const;
+ const { tool_choice = 'auto', stream, ...restParams } = params;
+ const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
+ const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
+
+ const functionsByName: Record> = {};
+ for (const f of params.tools) {
+ if (f.type === 'function') {
+ functionsByName[f.function.name || f.function.function.name] = f.function;
+ }
+ }
+
+ const tools: ChatCompletionTool[] =
+ 'tools' in params ?
+ params.tools.map((t) =>
+ t.type === 'function' ?
+ {
+ type: 'function',
+ function: {
+ name: t.function.name || t.function.function.name,
+ parameters: t.function.parameters as Record,
+ description: t.function.description,
+ },
+ }
+ : (t as unknown as ChatCompletionTool),
+ )
+ : (undefined as any);
+
+ for (const message of params.messages) {
+ this._addMessage(message, false);
+ }
+
+ for (let i = 0; i < maxChatCompletions; ++i) {
+ const chatCompletion: ChatCompletion = await this._createChatCompletion(
+ completions,
+ {
+ ...restParams,
+ tool_choice,
+ tools,
+ // @ts-ignore
+ messages: [...this.messages],
+ },
+ options,
+ );
+ const message = chatCompletion.choices[0]?.message;
+ if (!message) {
+ throw new TogetherError(`missing message in ChatCompletion response`);
+ }
+ if (!message.tool_calls) {
+ return;
+ }
+
+ for (const tool_call of message.tool_calls) {
+ if (tool_call.type !== 'function') continue;
+ const tool_call_id = tool_call.id;
+ const { name, arguments: args } = tool_call.function;
+ const fn = functionsByName[name];
+
+ if (!fn) {
+ const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${tools
+ .map((f) => JSON.stringify(f.function.name))
+ .join(', ')}. Please try again`;
+
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ } else if (singleFunctionToCall && singleFunctionToCall !== name) {
+ const content = `Invalid tool_call: ${JSON.stringify(name)}. ${JSON.stringify(
+ singleFunctionToCall,
+ )} requested. Please try again`;
+
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ }
+
+ let parsed;
+ try {
+ parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;
+ } catch (error) {
+ const content = error instanceof Error ? error.message : String(error);
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ }
+
+ // @ts-expect-error it can't rule out `never` type.
+ const rawContent = await fn.function(parsed, this);
+ const content = this.#stringifyFunctionCallResult(rawContent);
+ this._addMessage({ role, tool_call_id, content });
+
+ if (singleFunctionToCall) {
+ return;
+ }
+ }
+ }
+
+ return;
+ }
+
+ #stringifyFunctionCallResult(rawContent: unknown): string {
+ return (
+ typeof rawContent === 'string' ? rawContent
+ : rawContent === undefined ? 'undefined'
+ : JSON.stringify(rawContent)
+ );
+ }
+}
+
+type CustomEvents = {
+ [k in Event]: k extends keyof AbstractChatCompletionRunnerEvents ? AbstractChatCompletionRunnerEvents[k]
+ : (...args: any[]) => void;
+};
+
+type ListenerForEvent, Event extends keyof Events> = Event extends (
+ keyof AbstractChatCompletionRunnerEvents
+) ?
+ AbstractChatCompletionRunnerEvents[Event]
+: Events[Event];
+
+type ListenersForEvent, Event extends keyof Events> = Array<{
+ listener: ListenerForEvent;
+ once?: boolean;
+}>;
+type EventParameters, Event extends keyof Events> = Parameters<
+ ListenerForEvent
+>;
+
+export interface AbstractChatCompletionRunnerEvents {
+ connect: () => void;
+ functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
+ message: (message: ChatCompletionMessageParam) => void;
+ chatCompletion: (completion: ChatCompletion) => void;
+ finalContent: (contentSnapshot: string) => void;
+ finalMessage: (message: ChatCompletionMessageParam) => void;
+ finalChatCompletion: (completion: ChatCompletion) => void;
+ finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
+ functionCallResult: (content: string) => void;
+ finalFunctionCallResult: (content: string) => void;
+ error: (error: TogetherError) => void;
+ abort: (error: APIUserAbortError) => void;
+ end: () => void;
+ totalUsage: (usage: ChatCompletionUsage) => void;
+}
diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts
new file mode 100644
index 0000000..516cac9
--- /dev/null
+++ b/src/lib/ChatCompletionRunner.ts
@@ -0,0 +1,68 @@
+import {
+ type Completions,
+ type ChatCompletionMessageParam,
+ type CompletionCreateParamsNonStreaming,
+} from 'together-ai/resources/chat/completions';
+import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction';
+import {
+ AbstractChatCompletionRunner,
+ AbstractChatCompletionRunnerEvents,
+ RunnerOptions,
+} from './AbstractChatCompletionRunner';
+import { isAssistantMessage } from './chatCompletionUtils';
+
+export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents {
+ content: (content: string) => void;
+}
+
+export type ChatCompletionFunctionRunnerParams = Omit<
+ CompletionCreateParamsNonStreaming,
+ 'functions'
+> & {
+ functions: RunnableFunctions;
+};
+
+export type ChatCompletionToolRunnerParams = Omit<
+ CompletionCreateParamsNonStreaming,
+ 'tools'
+> & {
+ tools: RunnableTools;
+};
+
+export class ChatCompletionRunner extends AbstractChatCompletionRunner {
+ /** @deprecated - please use `runTools` instead. */
+ static runFunctions(
+ completions: Completions,
+ params: ChatCompletionFunctionRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionRunner {
+ const runner = new ChatCompletionRunner();
+ const opts = {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },
+ };
+ runner._run(() => runner._runFunctions(completions, params, opts));
+ return runner;
+ }
+
+ static runTools(
+ completions: Completions,
+ params: ChatCompletionToolRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionRunner {
+ const runner = new ChatCompletionRunner();
+ const opts = {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' },
+ };
+ runner._run(() => runner._runTools(completions, params, opts));
+ return runner;
+ }
+
+ override _addMessage(message: ChatCompletionMessageParam) {
+ super._addMessage(message);
+ if (isAssistantMessage(message) && message.content) {
+ this._emit('content', message.content as string);
+ }
+ }
+}
diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts
new file mode 100644
index 0000000..7f580c6
--- /dev/null
+++ b/src/lib/ChatCompletionStream.ts
@@ -0,0 +1,498 @@
+import * as Core from 'together-ai/core';
+import { TogetherError, APIUserAbortError } from 'together-ai/error';
+import {
+ Completions,
+ type ChatCompletion,
+ type ChatCompletionChunk,
+ type CompletionCreateParams,
+ type CompletionCreateParamsBase,
+} from 'together-ai/resources/chat/completions';
+import {
+ AbstractChatCompletionRunner,
+ type AbstractChatCompletionRunnerEvents,
+} from './AbstractChatCompletionRunner';
+import { type ReadableStream } from 'together-ai/_shims/index';
+import { Stream } from 'together-ai/streaming';
+import { LogProbs } from 'together-ai/resources';
+
+export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
+ content: (contentDelta: string, contentSnapshot: string) => void;
+ chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;
+}
+
+export type ChatCompletionStreamParams = Omit & {
+ stream?: true;
+};
+
+export class ChatCompletionStream
+ extends AbstractChatCompletionRunner
+ implements AsyncIterable
+{
+ #currentChatCompletionSnapshot: ChatCompletionSnapshot | undefined;
+
+ get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined {
+ return this.#currentChatCompletionSnapshot;
+ }
+
+ /**
+ * Intended for use on the frontend, consuming a stream produced with
+ * `.toReadableStream()` on the backend.
+ *
+ * Note that messages sent to the model do not appear in `.on('message')`
+ * in this context.
+ */
+ static fromReadableStream(stream: ReadableStream): ChatCompletionStream {
+ const runner = new ChatCompletionStream();
+ runner._run(() => runner._fromReadableStream(stream));
+ return runner;
+ }
+
+ static createChatCompletion(
+ completions: Completions,
+ params: ChatCompletionStreamParams,
+ options?: Core.RequestOptions,
+ ): ChatCompletionStream {
+ const runner = new ChatCompletionStream();
+ runner._run(() =>
+ runner._runChatCompletion(
+ completions,
+ { ...params, stream: true },
+ { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },
+ ),
+ );
+ return runner;
+ }
+
+ #beginRequest() {
+ if (this.ended) return;
+ this.#currentChatCompletionSnapshot = undefined;
+ }
+ #addChunk(chunk: ChatCompletionChunk) {
+ if (this.ended) return;
+ const completion = this.#accumulateChatCompletion(chunk);
+ this._emit('chunk', chunk, completion);
+ const delta = chunk.choices[0]?.delta?.content;
+ const snapshot = completion.choices[0]?.message;
+ if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) {
+ this._emit('content', delta, snapshot.content);
+ }
+ }
+ #endRequest(): ChatCompletion {
+ if (this.ended) {
+ throw new TogetherError(`stream has ended, this shouldn't happen`);
+ }
+ const snapshot = this.#currentChatCompletionSnapshot;
+ if (!snapshot) {
+ throw new TogetherError(`request ended without sending any chunks`);
+ }
+ this.#currentChatCompletionSnapshot = undefined;
+ return finalizeChatCompletion(snapshot);
+ }
+
+ protected override async _createChatCompletion(
+ completions: Completions,
+ params: CompletionCreateParams,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this.#beginRequest();
+ const stream = await completions.create(
+ { ...params, stream: true },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ for await (const chunk of stream) {
+ this.#addChunk(chunk);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+ return this._addChatCompletion(this.#endRequest());
+ }
+
+ protected async _fromReadableStream(
+ readableStream: ReadableStream,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this.#beginRequest();
+ this._connected();
+ const stream = Stream.fromReadableStream(readableStream, this.controller);
+ let chatId;
+ for await (const chunk of stream) {
+ if (chatId && chatId !== chunk.id) {
+ // A new request has been made.
+ this._addChatCompletion(this.#endRequest());
+ }
+
+ this.#addChunk(chunk);
+ chatId = chunk.id;
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+ return this._addChatCompletion(this.#endRequest());
+ }
+
+ #accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot {
+ let snapshot = this.#currentChatCompletionSnapshot;
+ const { choices, ...rest } = chunk;
+ if (!snapshot) {
+ snapshot = this.#currentChatCompletionSnapshot = {
+ ...rest,
+ choices: [],
+ };
+ } else {
+ Object.assign(snapshot, rest);
+ }
+
+ for (const { delta, finish_reason, index, logprobs = null, ...other } of chunk.choices) {
+ let choice = snapshot.choices[index];
+ if (!choice) {
+ choice = snapshot.choices[index] = { finish_reason, index, message: {}, logprobs, ...other };
+ }
+
+ if (logprobs) {
+ if (!choice.logprobs) {
+ choice.logprobs = Object.assign({}, logprobs);
+ } else {
+ const { content, ...rest } = logprobs;
+ Object.assign(choice.logprobs, rest);
+ if (content) {
+ choice.logprobs.content ??= [];
+ choice.logprobs.content.push(...content);
+ }
+ }
+ }
+
+ if (finish_reason) choice.finish_reason = finish_reason;
+ Object.assign(choice, other);
+
+ if (!delta) continue; // Shouldn't happen; just in case.
+ const { content, function_call, role, tool_calls, ...rest } = delta;
+ Object.assign(choice.message, rest);
+
+ if (content) choice.message.content = (choice.message.content || '') + content;
+ if (role) choice.message.role = role;
+ if (function_call) {
+ if (!choice.message.function_call) {
+ choice.message.function_call = function_call;
+ } else {
+ if (function_call.name) choice.message.function_call.name = function_call.name;
+ if (function_call.arguments) {
+ choice.message.function_call.arguments ??= '';
+ choice.message.function_call.arguments += function_call.arguments;
+ }
+ }
+ }
+ if (tool_calls) {
+ if (!choice.message.tool_calls) choice.message.tool_calls = [];
+ for (const { index, id, type, function: fn, ...rest } of tool_calls) {
+ const tool_call = (choice.message.tool_calls[index] ??= {});
+ Object.assign(tool_call, rest);
+ if (id) tool_call.id = id;
+ if (type) tool_call.type = type;
+ if (fn) tool_call.function ??= { arguments: '' };
+ if (fn?.name) tool_call.function!.name = fn.name;
+ if (fn?.arguments) tool_call.function!.arguments += fn.arguments;
+ }
+ }
+ }
+ return snapshot;
+ }
+
+ [Symbol.asyncIterator](): AsyncIterator {
+ const pushQueue: ChatCompletionChunk[] = [];
+ const readQueue: {
+ resolve: (chunk: ChatCompletionChunk | undefined) => void;
+ reject: (err: unknown) => void;
+ }[] = [];
+ let done = false;
+
+ this.on('chunk', (chunk) => {
+ const reader = readQueue.shift();
+ if (reader) {
+ reader.resolve(chunk);
+ } else {
+ pushQueue.push(chunk);
+ }
+ });
+
+ this.on('end', () => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.resolve(undefined);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('abort', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('error', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ return {
+ next: async (): Promise> => {
+ if (!pushQueue.length) {
+ if (done) {
+ return { value: undefined, done: true };
+ }
+ return new Promise((resolve, reject) =>
+ readQueue.push({ resolve, reject }),
+ ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));
+ }
+ const chunk = pushQueue.shift()!;
+ return { value: chunk, done: false };
+ },
+ return: async () => {
+ this.abort();
+ return { value: undefined, done: true };
+ },
+ };
+ }
+
+ toReadableStream(): ReadableStream {
+ const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);
+ return stream.toReadableStream();
+ }
+}
+
+function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletion {
+ const { id, choices, created, model, system_fingerprint, ...rest } = snapshot;
+ return {
+ ...rest,
+ id,
+ choices: choices.map(
+ ({ message, finish_reason, index, logprobs, ...choiceRest }): ChatCompletion.Choice => {
+ if (!finish_reason) throw new TogetherError(`missing finish_reason for choice ${index}`);
+ const { content = null, function_call, tool_calls, ...messageRest } = message;
+ const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.
+ if (!role) throw new TogetherError(`missing role for choice ${index}`);
+ if (function_call) {
+ const { arguments: args, name } = function_call;
+ if (args == null) throw new TogetherError(`missing function_call.arguments for choice ${index}`);
+ if (!name) throw new TogetherError(`missing function_call.name for choice ${index}`);
+ return {
+ ...choiceRest,
+ message: { content, function_call: { arguments: args, name }, role },
+ finish_reason,
+ index,
+ logprobs,
+ };
+ }
+ if (tool_calls) {
+ return {
+ ...choiceRest,
+ index,
+ finish_reason,
+ logprobs,
+ message: {
+ ...messageRest,
+ role,
+ content,
+ // @ts-ignore
+ tool_calls: tool_calls.map((tool_call, i) => {
+ const { function: fn, type, id, ...toolRest } = tool_call;
+ const { arguments: args, name, ...fnRest } = fn || {};
+ if (id == null)
+ throw new TogetherError(`missing choices[${index}].tool_calls[${i}].id\n${str(snapshot)}`);
+ if (type == null)
+ throw new TogetherError(
+ `missing choices[${index}].tool_calls[${i}].type\n${str(snapshot)}`,
+ );
+ if (name == null)
+ throw new TogetherError(
+ `missing choices[${index}].tool_calls[${i}].function.name\n${str(snapshot)}`,
+ );
+ if (args == null)
+ throw new TogetherError(
+ `missing choices[${index}].tool_calls[${i}].function.arguments\n${str(snapshot)}`,
+ );
+
+ return { ...toolRest, id, type, function: { ...fnRest, name, arguments: args } };
+ }),
+ },
+ };
+ }
+ return {
+ ...choiceRest,
+ message: { ...messageRest, content, role },
+ finish_reason,
+ index,
+ logprobs,
+ };
+ },
+ ),
+ created,
+ model,
+ object: 'chat.completion',
+ ...(system_fingerprint ? { system_fingerprint } : {}),
+ };
+}
+
+function str(x: unknown) {
+ return JSON.stringify(x);
+}
+
+/**
+ * Represents a streamed chunk of a chat completion response returned by model,
+ * based on the provided input.
+ */
+export interface ChatCompletionSnapshot {
+ /**
+ * A unique identifier for the chat completion.
+ */
+ id: string;
+
+ /**
+ * A list of chat completion choices. Can be more than one if `n` is greater
+ * than 1.
+ */
+ choices: Array;
+
+ /**
+ * The Unix timestamp (in seconds) of when the chat completion was created.
+ */
+ created: number;
+
+ /**
+ * The model to generate the completion.
+ */
+ model: string;
+
+ // Note we do not include an "object" type on the snapshot,
+ // because the object is not a valid "chat.completion" until finalized.
+ // object: 'chat.completion';
+
+ /**
+ * This fingerprint represents the backend configuration that the model runs with.
+ *
+ * Can be used in conjunction with the `seed` request parameter to understand when
+ * backend changes have been made that might impact determinism.
+ */
+ system_fingerprint?: string;
+}
+
+export namespace ChatCompletionSnapshot {
+ export interface Choice {
+ /**
+ * A chat completion delta generated by streamed model responses.
+ */
+ message: Choice.Message;
+
+ /**
+ * The reason the model stopped generating tokens. This will be `stop` if the model
+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
+ * number of tokens specified in the request was reached, `content_filter` if
+ * content was omitted due to a flag from our content filters, or `function_call`
+ * if the model called a function.
+ */
+ finish_reason: ChatCompletion.Choice['finish_reason'] | null;
+
+ /**
+ * Log probability information for the choice.
+ */
+ logprobs: LogProbs | null;
+
+ /**
+ * The index of the choice in the list of choices.
+ */
+ index: number;
+ }
+
+ export namespace Choice {
+ /**
+ * A chat completion delta generated by streamed model responses.
+ */
+ export interface Message {
+ /**
+ * The contents of the chunk message.
+ */
+ content?: string | null;
+
+ /**
+ * The name and arguments of a function that should be called, as generated by the
+ * model.
+ */
+ function_call?: Message.FunctionCall;
+
+ tool_calls?: Array;
+
+ /**
+ * The role of the author of this message.
+ */
+ role?: 'system' | 'user' | 'assistant' | 'function' | 'tool';
+ }
+
+ export namespace Message {
+ export interface ToolCall {
+ /**
+ * The ID of the tool call.
+ */
+ id?: string;
+
+ function?: ToolCall.Function;
+
+ /**
+ * The type of the tool.
+ */
+ type?: 'function';
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name?: string;
+ }
+ }
+
+ /**
+ * The name and arguments of a function that should be called, as generated by the
+ * model.
+ */
+ export interface FunctionCall {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name?: string;
+ }
+ }
+ }
+}
diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts
new file mode 100644
index 0000000..31491e7
--- /dev/null
+++ b/src/lib/ChatCompletionStreamingRunner.ts
@@ -0,0 +1,68 @@
+import {
+ Completions,
+ type ChatCompletionChunk,
+ type CompletionCreateParamsStreaming,
+} from 'together-ai/resources/chat/completions';
+import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner';
+import { type ReadableStream } from 'together-ai/_shims/index';
+import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction';
+import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream';
+
+export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
+ content: (contentDelta: string, contentSnapshot: string) => void;
+ chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;
+}
+
+export type ChatCompletionStreamingFunctionRunnerParams = Omit<
+ CompletionCreateParamsStreaming,
+ 'functions'
+> & {
+ functions: RunnableFunctions;
+};
+
+export type ChatCompletionStreamingToolRunnerParams = Omit<
+ CompletionCreateParamsStreaming,
+ 'tools'
+> & {
+ tools: RunnableTools;
+};
+
+export class ChatCompletionStreamingRunner
+ extends ChatCompletionStream
+ implements AsyncIterable
+{
+ static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner {
+ const runner = new ChatCompletionStreamingRunner();
+ runner._run(() => runner._fromReadableStream(stream));
+ return runner;
+ }
+
+ /** @deprecated - please use `runTools` instead. */
+ static runFunctions(
+ completions: Completions,
+ params: ChatCompletionStreamingFunctionRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionStreamingRunner {
+ const runner = new ChatCompletionStreamingRunner();
+ const opts = {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },
+ };
+ runner._run(() => runner._runFunctions(completions, params, opts));
+ return runner;
+ }
+
+ static runTools(
+ completions: Completions,
+ params: ChatCompletionStreamingToolRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionStreamingRunner {
+ const runner = new ChatCompletionStreamingRunner();
+ const opts = {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' },
+ };
+ runner._run(() => runner._runTools(completions, params, opts));
+ return runner;
+ }
+}
diff --git a/src/lib/RunnableFunction.ts b/src/lib/RunnableFunction.ts
new file mode 100644
index 0000000..8ebcbdc
--- /dev/null
+++ b/src/lib/RunnableFunction.ts
@@ -0,0 +1,134 @@
+import { type ChatCompletionRunner } from './ChatCompletionRunner';
+import { type ChatCompletionStreamingRunner } from './ChatCompletionStreamingRunner';
+import { JSONSchema } from './jsonschema';
+
+type PromiseOrValue = T | Promise;
+
+export type RunnableFunctionWithParse = {
+ /**
+ * @param args the return value from `parse`.
+ * @param runner the runner evaluating this callback.
+ * @returns a string to send back.
+ */
+ function: (
+ args: Args,
+ runner: ChatCompletionRunner | ChatCompletionStreamingRunner,
+ ) => PromiseOrValue;
+ /**
+ * @param input the raw args from the function call.
+ * @returns the parsed arguments to pass to `function`
+ */
+ parse: (input: string) => PromiseOrValue;
+ /**
+ * The parameters the function accepts, describes as a JSON Schema object.
+ */
+ parameters: JSONSchema;
+ /**
+ * A description of what the function does, used by the model to choose when and how to call the function.
+ */
+ description: string;
+ /**
+ * The name of the function to be called. Will default to function.name if omitted.
+ */
+ name?: string | undefined;
+};
+
+export type RunnableFunctionWithoutParse = {
+ /**
+ * @param args the raw args from the function call.
+ * @returns a string to send back
+ */
+ function: (
+ args: string,
+ runner: ChatCompletionRunner | ChatCompletionStreamingRunner,
+ ) => PromiseOrValue;
+ /**
+ * The parameters the function accepts, describes as a JSON Schema object.
+ */
+ parameters: JSONSchema;
+ /**
+ * A description of what the function does, used by the model to choose when and how to call the function.
+ */
+ description: string;
+ /**
+ * The name of the function to be called. Will default to function.name if omitted.
+ */
+ name?: string | undefined;
+};
+
+export type RunnableFunction =
+ Args extends string ? RunnableFunctionWithoutParse
+ : Args extends object ? RunnableFunctionWithParse
+ : never;
+
+export type RunnableToolFunction =
+ Args extends string ? RunnableToolFunctionWithoutParse
+ : Args extends object ? RunnableToolFunctionWithParse
+ : never;
+
+export type RunnableToolFunctionWithoutParse = {
+ type: 'function';
+ function: RunnableFunctionWithoutParse;
+};
+export type RunnableToolFunctionWithParse = {
+ type: 'function';
+ function: RunnableFunctionWithParse;
+};
+
+export function isRunnableFunctionWithParse(
+ fn: any,
+): fn is RunnableFunctionWithParse {
+ return typeof (fn as any).parse === 'function';
+}
+
+export type BaseFunctionsArgs = readonly (object | string)[];
+
+export type RunnableFunctions =
+ [any[]] extends [FunctionsArgs] ? readonly RunnableFunction[]
+ : {
+ [Index in keyof FunctionsArgs]: Index extends number ? RunnableFunction
+ : FunctionsArgs[Index];
+ };
+
+export type RunnableTools =
+ [any[]] extends [FunctionsArgs] ? readonly RunnableToolFunction[]
+ : {
+ [Index in keyof FunctionsArgs]: Index extends number ? RunnableToolFunction
+ : FunctionsArgs[Index];
+ };
+
+/**
+ * This is helper class for passing a `function` and `parse` where the `function`
+ * argument type matches the `parse` return type.
+ *
+ * @deprecated - please use ParsingToolFunction instead.
+ */
+export class ParsingFunction {
+ function: RunnableFunctionWithParse['function'];
+ parse: RunnableFunctionWithParse['parse'];
+ parameters: RunnableFunctionWithParse['parameters'];
+ description: RunnableFunctionWithParse['description'];
+ name?: RunnableFunctionWithParse['name'];
+
+ constructor(input: RunnableFunctionWithParse) {
+ this.function = input.function;
+ this.parse = input.parse;
+ this.parameters = input.parameters;
+ this.description = input.description;
+ this.name = input.name;
+ }
+}
+
+/**
+ * This is helper class for passing a `function` and `parse` where the `function`
+ * argument type matches the `parse` return type.
+ */
+export class ParsingToolFunction {
+ type: 'function';
+ function: RunnableFunctionWithParse;
+
+ constructor(input: RunnableFunctionWithParse) {
+ this.type = 'function';
+ this.function = input;
+ }
+}
diff --git a/src/lib/chatCompletionUtils.ts b/src/lib/chatCompletionUtils.ts
new file mode 100644
index 0000000..b0d4979
--- /dev/null
+++ b/src/lib/chatCompletionUtils.ts
@@ -0,0 +1,28 @@
+import {
+ type ChatCompletionAssistantMessageParam,
+ type ChatCompletionFunctionMessageParam,
+ type ChatCompletionMessageParam,
+ type ChatCompletionToolMessageParam,
+} from 'together-ai/resources/chat';
+
+export const isAssistantMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionAssistantMessageParam => {
+ return message?.role === 'assistant';
+};
+
+export const isFunctionMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionFunctionMessageParam => {
+ return message?.role === 'function';
+};
+
+export const isToolMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionToolMessageParam => {
+ return message?.role === 'tool';
+};
+
+export function isPresent(obj: T | null | undefined): obj is T {
+ return obj != null;
+}
diff --git a/src/lib/jsonschema.ts b/src/lib/jsonschema.ts
new file mode 100644
index 0000000..6362777
--- /dev/null
+++ b/src/lib/jsonschema.ts
@@ -0,0 +1,148 @@
+// File mostly copied from @types/json-schema, but stripped down a bit for brevity
+// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/817274f3280152ba2929a6067c93df8b34c4c9aa/types/json-schema/index.d.ts
+//
+// ==================================================================================================
+// JSON Schema Draft 07
+// ==================================================================================================
+// https://tools.ietf.org/html/draft-handrews-json-schema-validation-01
+// --------------------------------------------------------------------------------------------------
+
+/**
+ * Primitive type
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
+ */
+export type JSONSchemaTypeName =
+ | ({} & string)
+ | 'string'
+ | 'number'
+ | 'integer'
+ | 'boolean'
+ | 'object'
+ | 'array'
+ | 'null';
+
+/**
+ * Primitive type
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
+ */
+export type JSONSchemaType =
+ | string //
+ | number
+ | boolean
+ | JSONSchemaObject
+ | JSONSchemaArray
+ | null;
+
+// Workaround for infinite type recursion
+export interface JSONSchemaObject {
+ [key: string]: JSONSchemaType;
+}
+
+// Workaround for infinite type recursion
+// https://github.com/Microsoft/TypeScript/issues/3496#issuecomment-128553540
+export interface JSONSchemaArray extends Array {}
+
+/**
+ * Meta schema
+ *
+ * Recommended values:
+ * - 'http://json-schema.org/schema#'
+ * - 'http://json-schema.org/hyper-schema#'
+ * - 'http://json-schema.org/draft-07/schema#'
+ * - 'http://json-schema.org/draft-07/hyper-schema#'
+ *
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5
+ */
+export type JSONSchemaVersion = string;
+
+/**
+ * JSON Schema v7
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01
+ */
+export type JSONSchemaDefinition = JSONSchema | boolean;
+export interface JSONSchema {
+ $id?: string | undefined;
+ $comment?: string | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1
+ */
+ type?: JSONSchemaTypeName | JSONSchemaTypeName[] | undefined;
+ enum?: JSONSchemaType[] | undefined;
+ const?: JSONSchemaType | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2
+ */
+ multipleOf?: number | undefined;
+ maximum?: number | undefined;
+ exclusiveMaximum?: number | undefined;
+ minimum?: number | undefined;
+ exclusiveMinimum?: number | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3
+ */
+ maxLength?: number | undefined;
+ minLength?: number | undefined;
+ pattern?: string | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4
+ */
+ items?: JSONSchemaDefinition | JSONSchemaDefinition[] | undefined;
+ additionalItems?: JSONSchemaDefinition | undefined;
+ maxItems?: number | undefined;
+ minItems?: number | undefined;
+ uniqueItems?: boolean | undefined;
+ contains?: JSONSchemaDefinition | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5
+ */
+ maxProperties?: number | undefined;
+ minProperties?: number | undefined;
+ required?: string[] | undefined;
+ properties?:
+ | {
+ [key: string]: JSONSchemaDefinition;
+ }
+ | undefined;
+ patternProperties?:
+ | {
+ [key: string]: JSONSchemaDefinition;
+ }
+ | undefined;
+ additionalProperties?: JSONSchemaDefinition | undefined;
+ propertyNames?: JSONSchemaDefinition | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6
+ */
+ if?: JSONSchemaDefinition | undefined;
+ then?: JSONSchemaDefinition | undefined;
+ else?: JSONSchemaDefinition | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7
+ */
+ allOf?: JSONSchemaDefinition[] | undefined;
+ anyOf?: JSONSchemaDefinition[] | undefined;
+ oneOf?: JSONSchemaDefinition[] | undefined;
+ not?: JSONSchemaDefinition | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7
+ */
+ format?: string | undefined;
+
+ /**
+ * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10
+ */
+ title?: string | undefined;
+ description?: string | undefined;
+ default?: JSONSchemaType | undefined;
+ readOnly?: boolean | undefined;
+ writeOnly?: boolean | undefined;
+ examples?: JSONSchemaType | undefined;
+}
diff --git a/src/resource.ts b/src/resource.ts
old mode 100644
new mode 100755
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
old mode 100644
new mode 100755
index 401291f..fc656a3
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -10,8 +10,16 @@ export class Chat extends APIResource {
export namespace Chat {
export import Completions = CompletionsAPI.Completions;
export import ChatCompletion = CompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam;
+ export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage;
+ export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam;
export import ChatCompletionUsage = CompletionsAPI.ChatCompletionUsage;
+ export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam;
export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams;
export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;
export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
old mode 100644
new mode 100755
index bb35029..be09d27
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -6,6 +6,7 @@ import * as Core from '../../core';
import * as ChatCompletionsAPI from './completions';
import * as CompletionsAPI from '../completions';
import { Stream } from '../../streaming';
+import { ChatCompletionStream, ChatCompletionStreamParams } from 'together-ai/lib/ChatCompletionStream';
export class Completions extends APIResource {
/**
@@ -28,6 +29,9 @@ export class Completions extends APIResource {
| APIPromise
| APIPromise>;
}
+ stream(body: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream {
+ return ChatCompletionStream.createChatCompletion(this._client.chat.completions, body, options);
+ }
}
export interface ChatCompletion {
@@ -52,37 +56,37 @@ export namespace ChatCompletion {
logprobs?: CompletionsAPI.LogProbs | null;
- message?: Choice.Message;
+ message?: ChatCompletionsAPI.ChatCompletionMessage;
seed?: number;
text?: string;
}
+}
- export namespace Choice {
- export interface Message {
- content: string | null;
+export interface ChatCompletionAssistantMessageParam {
+ role: 'assistant';
- role: 'assistant';
+ content?: string | null;
- /**
- * @deprecated
- */
- function_call?: Message.FunctionCall;
+ /**
+ * @deprecated
+ */
+ function_call?: ChatCompletionAssistantMessageParam.FunctionCall;
- tool_calls?: Array;
- }
+ name?: string;
- export namespace Message {
- /**
- * @deprecated
- */
- export interface FunctionCall {
- arguments: string;
+ tool_calls?: Array;
+}
- name: string;
- }
- }
+export namespace ChatCompletionAssistantMessageParam {
+ /**
+ * @deprecated
+ */
+ export interface FunctionCall {
+ arguments: string;
+
+ name: string;
}
}
@@ -106,15 +110,17 @@ export namespace ChatCompletionChunk {
export interface Choice {
delta: Choice.Delta;
- finish_reason: 'stop' | 'eos' | 'length' | 'tool_calls' | 'function_call';
+ finish_reason: 'stop' | 'eos' | 'length' | 'tool_calls' | 'function_call' | null;
index: number;
- logprobs?: CompletionsAPI.LogProbs;
+ logprobs?: number | null;
}
export namespace Choice {
export interface Delta {
+ role: 'system' | 'user' | 'assistant' | 'function' | 'tool';
+
content?: string | null;
/**
@@ -122,8 +128,6 @@ export namespace ChatCompletionChunk {
*/
function_call?: Delta.FunctionCall | null;
- role?: 'system' | 'user' | 'assistant' | 'function' | 'tool';
-
token_id?: number;
tool_calls?: Array;
@@ -142,6 +146,82 @@ export namespace ChatCompletionChunk {
}
}
+/**
+ * @deprecated
+ */
+export interface ChatCompletionFunctionMessageParam {
+ content: string;
+
+ name: string;
+
+ role: 'function';
+}
+
+export interface ChatCompletionMessage {
+ content: string | null;
+
+ role: 'assistant';
+
+ /**
+ * @deprecated
+ */
+ function_call?: ChatCompletionMessage.FunctionCall;
+
+ tool_calls?: Array;
+}
+
+export namespace ChatCompletionMessage {
+ /**
+ * @deprecated
+ */
+ export interface FunctionCall {
+ arguments: string;
+
+ name: string;
+ }
+}
+
+export type ChatCompletionMessageParam =
+ | ChatCompletionSystemMessageParam
+ | ChatCompletionUserMessageParam
+ | ChatCompletionAssistantMessageParam
+ | ChatCompletionToolMessageParam
+ | ChatCompletionFunctionMessageParam;
+
+export interface ChatCompletionSystemMessageParam {
+ content: string;
+
+ role: 'system';
+
+ name?: string;
+}
+
+export interface ChatCompletionTool {
+ function: ChatCompletionTool.Function;
+
+ type: 'function';
+}
+
+export namespace ChatCompletionTool {
+ export interface Function {
+ name: string;
+
+ description?: string;
+
+ parameters?: Record;
+ }
+}
+
+export interface ChatCompletionToolMessageParam {
+ content?: string;
+
+ required?: unknown;
+
+ role?: 'tool';
+
+ tool_call_id?: string;
+}
+
export interface ChatCompletionUsage {
completion_tokens: number;
@@ -150,6 +230,14 @@ export interface ChatCompletionUsage {
total_tokens: number;
}
+export interface ChatCompletionUserMessageParam {
+ content: string;
+
+ role: 'user';
+
+ name?: string;
+}
+
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
export interface CompletionCreateParamsBase {
@@ -194,7 +282,7 @@ export interface CompletionCreateParamsBase {
max_tokens?: number;
/**
- * A number between 0 and 1 that can be used as an alternative to temperature.
+ * A number between 0 and 1 that can be used as an alternative to top_p and top-k.
*/
min_p?: number;
@@ -291,7 +379,7 @@ export namespace CompletionCreateParams {
/**
* The role of the messages author. Choice between: system, user, or assistant.
*/
- role: 'system' | 'user' | 'assistant';
+ role: 'system' | 'user' | 'assistant' | 'tool';
}
export interface Name {
@@ -337,8 +425,16 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB
export namespace Completions {
export import ChatCompletion = ChatCompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam;
+ export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage;
+ export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam;
export import ChatCompletionUsage = ChatCompletionsAPI.ChatCompletionUsage;
+ export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam;
export import CompletionCreateParams = ChatCompletionsAPI.CompletionCreateParams;
export import CompletionCreateParamsNonStreaming = ChatCompletionsAPI.CompletionCreateParamsNonStreaming;
export import CompletionCreateParamsStreaming = ChatCompletionsAPI.CompletionCreateParamsStreaming;
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
old mode 100644
new mode 100755
index 2f9038b..89c8a6d
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -3,8 +3,16 @@
export { Chat } from './chat';
export {
ChatCompletion,
+ ChatCompletionAssistantMessageParam,
ChatCompletionChunk,
+ ChatCompletionFunctionMessageParam,
+ ChatCompletionMessage,
+ ChatCompletionMessageParam,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionTool,
+ ChatCompletionToolMessageParam,
ChatCompletionUsage,
+ ChatCompletionUserMessageParam,
CompletionCreateParams,
CompletionCreateParamsNonStreaming,
CompletionCreateParamsStreaming,
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
old mode 100644
new mode 100755
index c22df37..b6090e7
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -63,7 +63,10 @@ export namespace Completion {
}
export interface LogProbs {
- content?: Array;
+ /**
+ * List of token IDs corresponding to the logprobs
+ */
+ token_ids?: Array;
/**
* List of token log probabilities
@@ -76,14 +79,6 @@ export interface LogProbs {
tokens?: Array;
}
-export namespace LogProbs {
- export interface Content {
- token: string;
-
- logprob: number;
- }
-}
-
export interface ToolChoice {
id: string;
@@ -163,7 +158,7 @@ export interface CompletionCreateParamsBase {
max_tokens?: number;
/**
- * A number between 0 and 1 that can be used as an alternative to temperature.
+ * A number between 0 and 1 that can be used as an alternative to top-p and top-k.
*/
min_p?: number;
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
old mode 100644
new mode 100755
diff --git a/src/resources/files.ts b/src/resources/files.ts
old mode 100644
new mode 100755
diff --git a/src/resources/fine-tune.ts b/src/resources/fine-tune.ts
old mode 100644
new mode 100755
diff --git a/src/resources/images.ts b/src/resources/images.ts
old mode 100644
new mode 100755
diff --git a/src/resources/index.ts b/src/resources/index.ts
old mode 100644
new mode 100755
diff --git a/src/resources/models.ts b/src/resources/models.ts
old mode 100644
new mode 100755
diff --git a/src/shims/node.ts b/src/shims/node.ts
old mode 100644
new mode 100755
diff --git a/src/shims/web.ts b/src/shims/web.ts
old mode 100644
new mode 100755
diff --git a/src/streaming.ts b/src/streaming.ts
old mode 100644
new mode 100755
diff --git a/src/uploads.ts b/src/uploads.ts
old mode 100644
new mode 100755
diff --git a/src/version.ts b/src/version.ts
old mode 100644
new mode 100755
index 56adb6a..55b70f1
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '0.6.0-alpha.4'; // x-release-please-version
+export const VERSION = '0.6.0-alpha.5'; // x-release-please-version
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
old mode 100644
new mode 100755
index bdf434f..6de16e5
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -3,18 +3,18 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource completions', () => {
test('create: only required params', async () => {
- const responsePromise = together.chat.completions.create({
+ const responsePromise = client.chat.completions.create({
messages: [
- { role: 'system', content: 'string' },
- { role: 'system', content: 'string' },
- { role: 'system', content: 'string' },
+ { content: 'content', role: 'system' },
+ { content: 'content', role: 'system' },
+ { content: 'content', role: 'system' },
],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
});
@@ -28,11 +28,11 @@ describe('resource completions', () => {
});
test('create: required and optional params', async () => {
- const response = await together.chat.completions.create({
+ const response = await client.chat.completions.create({
messages: [
- { role: 'system', content: 'string' },
- { role: 'system', content: 'string' },
- { role: 'system', content: 'string' },
+ { content: 'content', role: 'system' },
+ { content: 'content', role: 'system' },
+ { content: 'content', role: 'system' },
],
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
echo: true,
@@ -45,7 +45,7 @@ describe('resource completions', () => {
n: 1,
presence_penalty: 0,
repetition_penalty: 0,
- response_format: { type: 'json', schema: { foo: 'string' } },
+ response_format: { schema: { foo: 'string' }, type: 'json' },
safety_model: 'safety_model_name',
stop: ['string', 'string', 'string'],
stream: false,
@@ -53,28 +53,28 @@ describe('resource completions', () => {
tool_choice: 'tool_name',
tools: [
{
- type: 'tool_type',
function: {
description: 'A description of the function.',
name: 'function_name',
parameters: { foo: 'bar' },
},
+ type: 'tool_type',
},
{
- type: 'tool_type',
function: {
description: 'A description of the function.',
name: 'function_name',
parameters: { foo: 'bar' },
},
+ type: 'tool_type',
},
{
- type: 'tool_type',
function: {
description: 'A description of the function.',
name: 'function_name',
parameters: { foo: 'bar' },
},
+ type: 'tool_type',
},
],
top_k: 0,
diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts
old mode 100644
new mode 100755
index 53c81d8..e9958d5
--- a/tests/api-resources/completions.test.ts
+++ b/tests/api-resources/completions.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource completions', () => {
test('create: only required params', async () => {
- const responsePromise = together.completions.create({
+ const responsePromise = client.completions.create({
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
prompt: '[INST] What is the capital of France? [/INST]',
});
@@ -24,7 +24,7 @@ describe('resource completions', () => {
});
test('create: required and optional params', async () => {
- const response = await together.completions.create({
+ const response = await client.completions.create({
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
prompt: '[INST] What is the capital of France? [/INST]',
echo: true,
diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts
old mode 100644
new mode 100755
index a9fbfd4..da70d02
--- a/tests/api-resources/embeddings.test.ts
+++ b/tests/api-resources/embeddings.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource embeddings', () => {
test('create: only required params', async () => {
- const responsePromise = together.embeddings.create({
+ const responsePromise = client.embeddings.create({
input: 'Our solar system orbits the Milky Way galaxy at about 515,000 mph',
model: 'togethercomputer/m2-bert-80M-8k-retrieval',
});
@@ -24,7 +24,7 @@ describe('resource embeddings', () => {
});
test('create: required and optional params', async () => {
- const response = await together.embeddings.create({
+ const response = await client.embeddings.create({
input: 'Our solar system orbits the Milky Way galaxy at about 515,000 mph',
model: 'togethercomputer/m2-bert-80M-8k-retrieval',
});
diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts
old mode 100644
new mode 100755
index 6ccc78c..bfc9cf7
--- a/tests/api-resources/files.test.ts
+++ b/tests/api-resources/files.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource files', () => {
test('retrieve', async () => {
- const responsePromise = together.files.retrieve('string');
+ const responsePromise = client.files.retrieve('id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -22,13 +22,13 @@ describe('resource files', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.files.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.files.retrieve('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('list', async () => {
- const responsePromise = together.files.list();
+ const responsePromise = client.files.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -40,13 +40,13 @@ describe('resource files', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.files.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.files.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('delete', async () => {
- const responsePromise = together.files.delete('string');
+ const responsePromise = client.files.delete('id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -58,14 +58,14 @@ describe('resource files', () => {
test('delete: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.files.delete('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.files.delete('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('content: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.files.content('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.files.content('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
diff --git a/tests/api-resources/fine-tune.test.ts b/tests/api-resources/fine-tune.test.ts
old mode 100644
new mode 100755
index 7058b2a..643ef1f
--- a/tests/api-resources/fine-tune.test.ts
+++ b/tests/api-resources/fine-tune.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource fineTune', () => {
test('create: only required params', async () => {
- const responsePromise = together.fineTune.create({ model: 'string', training_file: 'string' });
+ const responsePromise = client.fineTune.create({ model: 'model', training_file: 'training_file' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -21,20 +21,20 @@ describe('resource fineTune', () => {
});
test('create: required and optional params', async () => {
- const response = await together.fineTune.create({
- model: 'string',
- training_file: 'string',
+ const response = await client.fineTune.create({
+ model: 'model',
+ training_file: 'training_file',
batch_size: 0,
learning_rate: 0,
n_checkpoints: 0,
n_epochs: 0,
- suffix: 'string',
- wandb_api_key: 'string',
+ suffix: 'suffix',
+ wandb_api_key: 'wandb_api_key',
});
});
test('retrieve', async () => {
- const responsePromise = together.fineTune.retrieve('string');
+ const responsePromise = client.fineTune.retrieve('id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -46,13 +46,13 @@ describe('resource fineTune', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.fineTune.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.fineTune.retrieve('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('list', async () => {
- const responsePromise = together.fineTune.list();
+ const responsePromise = client.fineTune.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -64,13 +64,13 @@ describe('resource fineTune', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.fineTune.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.fineTune.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('cancel', async () => {
- const responsePromise = together.fineTune.cancel('string');
+ const responsePromise = client.fineTune.cancel('id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -82,13 +82,13 @@ describe('resource fineTune', () => {
test('cancel: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.fineTune.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.fineTune.cancel('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
test('download: only required params', async () => {
- const responsePromise = together.fineTune.download({ ft_id: 'string' });
+ const responsePromise = client.fineTune.download({ ft_id: 'ft_id' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -99,15 +99,11 @@ describe('resource fineTune', () => {
});
test('download: required and optional params', async () => {
- const response = await together.fineTune.download({
- ft_id: 'string',
- checkpoint_step: 0,
- output: 'string',
- });
+ const response = await client.fineTune.download({ ft_id: 'ft_id', checkpoint_step: 0, output: 'output' });
});
test('listEvents', async () => {
- const responsePromise = together.fineTune.listEvents('string');
+ const responsePromise = client.fineTune.listEvents('id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -119,8 +115,8 @@ describe('resource fineTune', () => {
test('listEvents: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- together.fineTune.listEvents('string', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(Together.NotFoundError);
+ await expect(client.fineTune.listEvents('id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ Together.NotFoundError,
+ );
});
});
diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts
old mode 100644
new mode 100755
index 97862c9..94eea13
--- a/tests/api-resources/images.test.ts
+++ b/tests/api-resources/images.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource images', () => {
test('create: only required params', async () => {
- const responsePromise = together.images.create({
+ const responsePromise = client.images.create({
model: 'stabilityai/stable-diffusion-xl-base-1.0',
prompt: 'cat floating in space, cinematic',
});
@@ -24,12 +24,12 @@ describe('resource images', () => {
});
test('create: required and optional params', async () => {
- const response = await together.images.create({
+ const response = await client.images.create({
model: 'stabilityai/stable-diffusion-xl-base-1.0',
prompt: 'cat floating in space, cinematic',
height: 0,
n: 0,
- negative_prompt: 'string',
+ negative_prompt: 'negative_prompt',
seed: 0,
steps: 0,
width: 0,
diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts
old mode 100644
new mode 100755
index d949fa0..d150726
--- a/tests/api-resources/models.test.ts
+++ b/tests/api-resources/models.test.ts
@@ -3,14 +3,14 @@
import Together from 'together-ai';
import { Response } from 'node-fetch';
-const together = new Together({
+const client = new Together({
apiKey: 'My API Key',
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
});
describe('resource models', () => {
test('list', async () => {
- const responsePromise = together.models.list();
+ const responsePromise = client.models.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -22,7 +22,7 @@ describe('resource models', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(together.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
Together.NotFoundError,
);
});
diff --git a/tests/form.test.ts b/tests/form.test.ts
old mode 100644
new mode 100755
diff --git a/tests/index.test.ts b/tests/index.test.ts
old mode 100644
new mode 100755
diff --git a/tests/responses.test.ts b/tests/responses.test.ts
old mode 100644
new mode 100755
diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts
old mode 100644
new mode 100755
diff --git a/tests/stringifyQuery.test.ts b/tests/stringifyQuery.test.ts
old mode 100644
new mode 100755
diff --git a/tests/uploads.test.ts b/tests/uploads.test.ts
old mode 100644
new mode 100755
diff --git a/tsc-multi.json b/tsc-multi.json
old mode 100644
new mode 100755
diff --git a/tsconfig.build.json b/tsconfig.build.json
old mode 100644
new mode 100755
diff --git a/tsconfig.deno.json b/tsconfig.deno.json
old mode 100644
new mode 100755
diff --git a/tsconfig.dist-src.json b/tsconfig.dist-src.json
old mode 100644
new mode 100755
diff --git a/tsconfig.json b/tsconfig.json
old mode 100644
new mode 100755
diff --git a/yarn.lock b/yarn.lock
old mode 100644
new mode 100755
index dda4d2e..358dbf2
--- a/yarn.lock
+++ b/yarn.lock
@@ -3412,11 +3412,6 @@ web-streams-polyfill@4.0.0-beta.1:
resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.1.tgz#3b19b9817374b7cee06d374ba7eeb3aeb80e8c95"
integrity sha512-3ux37gEX670UUphBF9AMCq8XM6iQ8Ac6A+DSRRjDoRBm1ufCkaCDdNVbaqq60PsEkdNlLKrGtv/YBP4EJXqNtQ==
-web-streams-polyfill@^3.2.1:
- version "3.2.1"
- resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6"
- integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==
-
webidl-conversions@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"