diff --git a/.gitignore b/.gitignore
index 270d7eb047b4..4d2ffa92987d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,3 +45,4 @@ docs/build/
docs/api_refs/typedoc.json
.tool-versions
+credentials.json
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 76668924271e..60e2ec2a311b 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -11,6 +11,7 @@
},
"typescript.tsdk": "node_modules/typescript/lib",
"cSpell.words": [
+ "AILLM",
"Upstash"
],
"cSpell.enableFiletypes": [
diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.mdx b/docs/core_docs/docs/integrations/chat/google_generativeai.mdx
index 40f5aaf7d94d..b8e7f8e05646 100644
--- a/docs/core_docs/docs/integrations/chat/google_generativeai.mdx
+++ b/docs/core_docs/docs/integrations/chat/google_generativeai.mdx
@@ -1,5 +1,5 @@
---
-sidebar_label: Google AI
+sidebar_label: Google GenAI
keywords: [gemini, gemini-pro, ChatGoogleGenerativeAI]
---
@@ -11,6 +11,12 @@ You can access Google's `gemini` and `gemini-vision` models, as well as other
generative models in LangChain through `ChatGoogleGenerativeAI` class in the
`@langchain/google-genai` integration package.
+:::tip
+You can also access Google's `gemini` family of models via the LangChain VertexAI and VertexAI-web integrations.
+
+Click [here](/docs/integrations/chat/google_vertex_ai) to read the docs.
+:::
+
Get an API key here: https://ai.google.dev/tutorials/setup
You'll first need to install the `@langchain/google-genai` package:
diff --git a/docs/core_docs/docs/integrations/chat/google_palm.mdx b/docs/core_docs/docs/integrations/chat/google_palm.mdx
index ace5e1d48d09..c09aa5a9ea96 100644
--- a/docs/core_docs/docs/integrations/chat/google_palm.mdx
+++ b/docs/core_docs/docs/integrations/chat/google_palm.mdx
@@ -1,5 +1,6 @@
---
-sidebar_label: Google PaLM
+sidebar_label: (Legacy) Google PaLM/VertexAI
+sidebar_class_name: hidden
---
import CodeBlock from "@theme/CodeBlock";
@@ -7,7 +8,7 @@ import CodeBlock from "@theme/CodeBlock";
# ChatGooglePaLM
:::note
-This integration does not support `gemini-*` models. Check [Google AI](/docs/integrations/chat/google_generativeai).
+This integration does not support `gemini-*` models. Check Google [GenAI](/docs/integrations/chat/google_generativeai) or [VertexAI](/docs/integrations/chat/google_vertex_ai).
:::
The [Google PaLM API](https://developers.generativeai.google/products/palm) can be integrated by first
@@ -28,3 +29,97 @@ the model.
import GooglePaLMExample from "@examples/models/chat/integration_googlepalm.ts";
{GooglePaLMExample}
+
+# ChatGooglePaLM
+
+LangChain.js supports Google Vertex AI chat models as an integration.
+It supports two different methods of authentication based on whether you're running
+in a Node environment or a web environment.
+
+## Setup
+
+### Node
+
+To call Vertex AI models in Node, you'll need to install [Google's official auth client](https://www.npmjs.com/package/google-auth-library) as a peer dependency.
+
+You should make sure the Vertex AI API is
+enabled for the relevant project and that you've authenticated to
+Google Cloud using one of these methods:
+
+- You are logged into an account (using `gcloud auth application-default login`)
+ permitted to that project.
+- You are running on a machine using a service account that is permitted
+ to the project.
+- You have downloaded the credentials for a service account that is permitted
+ to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment
+ variable to the path of this file.
+
+
+
+```bash npm2yarn
+npm install google-auth-library @langchain/community
+```
+
+### Web
+
+To call Vertex AI models in web environments (like Edge functions), you'll need to install
+the [`web-auth-library`](https://github.com/kriasoft/web-auth-library) pacakge as a peer dependency:
+
+```bash npm2yarn
+npm install web-auth-library
+```
+
+Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:
+
+```
+GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
+```
+
+You can also pass your credentials directly in code like this:
+
+```typescript
+import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+
+const model = new ChatGoogleVertexAI({
+ authOptions: {
+ credentials: {"type":"service_account","project_id":"YOUR_PROJECT-12345",...},
+ },
+});
+```
+
+## Usage
+
+Several models are available and can be specified by the `model` attribute
+in the constructor. These include:
+
+- code-bison (default)
+- code-bison-32k
+
+The ChatGoogleVertexAI class works just like other chat-based LLMs,
+with a few exceptions:
+
+1. The first `SystemMessage` passed in is mapped to the "context" parameter that the PaLM model expects.
+ No other `SystemMessages` are allowed.
+2. After the first `SystemMessage`, there must be an odd number of messages, representing a conversation between a human and the model.
+3. Human messages must alternate with AI messages.
+
+import ChatGoogleVertexAI from "@examples/models/chat/integration_googlevertexai_legacy.ts";
+
+{ChatGoogleVertexAI}
+
+### Streaming
+
+ChatGoogleVertexAI also supports streaming in multiple chunks for faster responses:
+
+import ChatGoogleVertexAIStreaming from "@examples/models/chat/integration_googlevertexai-streaming_legacy.ts";
+
+{ChatGoogleVertexAIStreaming}
+
+### Examples
+
+There is also an optional `examples` constructor parameter that can help the model understand what an appropriate response
+looks like.
+
+import ChatGoogleVertexAIExamples from "@examples/models/chat/integration_googlevertexai-examples_legacy.ts";
+
+{ChatGoogleVertexAIExamples}
diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx b/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx
index 4cf24fa26722..3e3cf6df2685 100644
--- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx
+++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx
@@ -1,10 +1,11 @@
---
sidebar_label: Google Vertex AI
+keywords: [gemini, gemini-pro, ChatVertexAI, vertex]
---
import CodeBlock from "@theme/CodeBlock";
-# ChatGoogleVertexAI
+# ChatVertexAI
LangChain.js supports Google Vertex AI chat models as an integration.
It supports two different methods of authentication based on whether you're running
@@ -14,7 +15,15 @@ in a Node environment or a web environment.
### Node
-To call Vertex AI models in Node, you'll need to install [Google's official auth client](https://www.npmjs.com/package/google-auth-library) as a peer dependency.
+To call Vertex AI models in Node, you'll need to install the `@langchain/google-vertexai` package:
+
+import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
+
+
+
+```bash npm2yarn
+npm install @langchain/google-vertexai
+```
You should make sure the Vertex AI API is
enabled for the relevant project and that you've authenticated to
@@ -28,21 +37,19 @@ Google Cloud using one of these methods:
to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment
variable to the path of this file.
-import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
-
```bash npm2yarn
-npm install google-auth-library @langchain/community
+npm install @langchain/google-vertexai
```
### Web
To call Vertex AI models in web environments (like Edge functions), you'll need to install
-the [`web-auth-library`](https://github.com/kriasoft/web-auth-library) pacakge as a peer dependency:
+the `@langchain/google-vertexai-web` package:
```bash npm2yarn
-npm install web-auth-library
+npm install @langchain/google-vertexai-web
```
Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:
@@ -51,12 +58,12 @@ Then, you'll need to add your service account credentials directly as a `GOOGLE_
GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
```
-You can also pass your credentials directly in code like this:
+Lastly, you may also pass your credentials directly in code like this:
```typescript
-import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+import { ChatVertexAI } from "@langchain/google-vertexai-web";
-const model = new ChatGoogleVertexAI({
+const model = new ChatVertexAI({
authOptions: {
credentials: {"type":"service_account","project_id":"YOUR_PROJECT-12345",...},
},
@@ -65,37 +72,50 @@ const model = new ChatGoogleVertexAI({
## Usage
-Several models are available and can be specified by the `model` attribute
-in the constructor. These include:
+The entire family of `gemini` models are available by specifying the `modelName` parameter.
-- code-bison (default)
-- code-bison-32k
+For example:
-The ChatGoogleVertexAI class works just like other chat-based LLMs,
-with a few exceptions:
+import ChatVertexAI from "@examples/models/chat/integration_googlevertexai.ts";
-1. The first `SystemMessage` passed in is mapped to the "context" parameter that the PaLM model expects.
- No other `SystemMessages` are allowed.
-2. After the first `SystemMessage`, there must be an odd number of messages, representing a conversation between a human and the model.
-3. Human messages must alternate with AI messages.
+{ChatVertexAI}
-import ChatGoogleVertexAI from "@examples/models/chat/integration_googlevertexai.ts";
-
-{ChatGoogleVertexAI}
+:::tip
+See the LangSmith trace for the example above [here](https://smith.langchain.com/public/9fb579d8-4987-4302-beca-29a684ae2f4c/r).
+:::
### Streaming
-ChatGoogleVertexAI also supports streaming in multiple chunks for faster responses:
+`ChatVertexAI` also supports streaming in multiple chunks for faster responses:
+
+import ChatVertexAIStreaming from "@examples/models/chat/integration_googlevertexai-streaming.ts";
+
+{ChatVertexAIStreaming}
+
+:::tip
+See the LangSmith trace for the example above [here](https://smith.langchain.com/public/ba4cb190-3f60-49aa-a6f8-7d31316d94cf/r).
+:::
+
+### Tool calling
+
+`ChatVertexAI` also supports calling the model with a tool:
+
+import ChatVertexAITool from "@examples/models/chat/integration_googlevertexai-tools.ts";
+
+{ChatVertexAITool}
-import ChatGoogleVertexAIStreaming from "@examples/models/chat/integration_googlevertexai-streaming.ts";
+:::tip
+See the LangSmith trace for the example above [here](https://smith.langchain.com/public/49e1c32c-395a-45e2-afba-913aa3389137/r).
+:::
-{ChatGoogleVertexAIStreaming}
+### `withStructuredOutput`
-### Examples
+Alternatively, you can also use the `withStructuredOutput` method:
-There is also an optional `examples` constructor parameter that can help the model understand what an appropriate response
-looks like.
+import ChatVertexAIWSA from "@examples/models/chat/integration_googlevertexai-wsa.ts";
-import ChatGoogleVertexAIExamples from "@examples/models/chat/integration_googlevertexai-examples.ts";
+{ChatVertexAIWSA}
-{ChatGoogleVertexAIExamples}
+:::tip
+See the LangSmith trace for the example above [here](https://smith.langchain.com/public/41bbbddb-f357-4bfa-a111-def8294a4514/r).
+:::
diff --git a/docs/core_docs/docs/integrations/chat/index.mdx b/docs/core_docs/docs/integrations/chat/index.mdx
index 45322478a1d5..ca852b07e8a7 100644
--- a/docs/core_docs/docs/integrations/chat/index.mdx
+++ b/docs/core_docs/docs/integrations/chat/index.mdx
@@ -36,6 +36,7 @@ The table shows, for each integration, which features have been implemented with
| ChatFireworks | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| ChatGoogleGenerativeAI | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| ChatGoogleVertexAI | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
+| ChatVertexAI | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| ChatGooglePaLM | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| ChatGroq | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| ChatLlamaCpp | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
diff --git a/docs/core_docs/docs/integrations/llms/google_palm.mdx b/docs/core_docs/docs/integrations/llms/google_palm.mdx
index 43c9830de472..75f2202937d9 100644
--- a/docs/core_docs/docs/integrations/llms/google_palm.mdx
+++ b/docs/core_docs/docs/integrations/llms/google_palm.mdx
@@ -1,5 +1,6 @@
---
-sidebar_label: Google PaLM
+sidebar_label: (Legacy) Google PaLM/VertexAI
+sidebar_class_name: hidden
---
import CodeBlock from "@theme/CodeBlock";
@@ -7,7 +8,7 @@ import CodeBlock from "@theme/CodeBlock";
# Google PaLM
:::note
-This integration does not support `gemini-*` models. Check [Google AI](/docs/integrations/chat/google_generativeai).
+This integration does not support `gemini-*` models. Check [Google AI](/docs/integrations/chat/google_generativeai) or [VertexAI](/docs/integrations/llms/google_vertex_ai).
:::
The [Google PaLM API](https://developers.generativeai.google/products/palm) can be integrated by first
@@ -28,3 +29,100 @@ the model.
import GooglePaLMExample from "@examples/models/llm/googlepalm.ts";
{GooglePaLMExample}
+
+# GooglePaLM
+
+Langchain.js supports two different authentication methods based on whether
+you're running in a Node.js environment or a web environment.
+
+## Setup
+
+### Node.js
+
+To call Vertex AI models in Node, you'll need to install [Google's official auth client](https://www.npmjs.com/package/google-auth-library) as a peer dependency.
+
+You should make sure the Vertex AI API is
+enabled for the relevant project and that you've authenticated to
+Google Cloud using one of these methods:
+
+- You are logged into an account (using `gcloud auth application-default login`)
+ permitted to that project.
+- You are running on a machine using a service account that is permitted
+ to the project.
+- You have downloaded the credentials for a service account that is permitted
+ to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment
+ variable to the path of this file.
+
+```bash npm2yarn
+npm install google-auth-library
+```
+
+### Web
+
+To call Vertex AI models in web environments (like Edge functions), you'll need to install
+the [`web-auth-library`](https://github.com/kriasoft/web-auth-library) pacakge as a peer dependency:
+
+```bash npm2yarn
+npm install web-auth-library
+```
+
+Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:
+
+```
+GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
+```
+
+You can also pass your credentials directly in code like this:
+
+
+
+```bash npm2yarn
+npm install @langchain/community
+```
+
+```typescript
+import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+
+const model = new GoogleVertexAI({
+ authOptions: {
+ credentials: {"type":"service_account","project_id":"YOUR_PROJECT-12345",...},
+ },
+});
+```
+
+## Usage
+
+Several models are available and can be specified by the `model` attribute
+in the constructor. These include:
+
+- text-bison (default)
+- text-bison-32k
+- code-gecko
+- code-bison
+
+import GoogleVertexAIExample from "@examples/llms/googlevertexai_legacy.ts";
+
+{GoogleVertexAIExample}
+
+Google also has separate models for their "Codey" code generation models.
+
+The "code-gecko" model is useful for code completion:
+
+import GoogleVertexAICodeGeckoExample from "@examples/llms/googlevertexai-code-gecko_legacy.ts";
+
+{GoogleVertexAICodeGeckoExample}
+
+While the "code-bison" model is better at larger code generation based on
+a text prompt:
+
+import GoogleVertexAICodeBisonExample from "@examples/llms/googlevertexai-code-bison_legacy.ts";
+
+{GoogleVertexAICodeBisonExample}
+
+### Streaming
+
+Streaming in multiple chunks is supported for faster responses:
+
+import GoogleVertexAIStreaming from "@examples/llms/googlevertexai-streaming_legacy.ts";
+
+{GoogleVertexAIStreaming}
diff --git a/docs/core_docs/docs/integrations/llms/google_vertex_ai.mdx b/docs/core_docs/docs/integrations/llms/google_vertex_ai.mdx
index d49c1547a2bf..c4aff0e067f9 100644
--- a/docs/core_docs/docs/integrations/llms/google_vertex_ai.mdx
+++ b/docs/core_docs/docs/integrations/llms/google_vertex_ai.mdx
@@ -7,7 +7,15 @@ you're running in a Node.js environment or a web environment.
### Node.js
-To call Vertex AI models in Node, you'll need to install [Google's official auth client](https://www.npmjs.com/package/google-auth-library) as a peer dependency.
+To call Vertex AI models in Node, you'll need to install the `@langchain/google-vertexai` package:
+
+import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
+
+
+
+```bash npm2yarn
+npm install @langchain/google-vertexai
+```
You should make sure the Vertex AI API is
enabled for the relevant project and that you've authenticated to
@@ -20,18 +28,16 @@ Google Cloud using one of these methods:
- You have downloaded the credentials for a service account that is permitted
to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment
variable to the path of this file.
-
-```bash npm2yarn
-npm install google-auth-library
-```
+ **or**
+- You set the `GOOGLE_API_KEY` environment variable to the API key for the project.
### Web
To call Vertex AI models in web environments (like Edge functions), you'll need to install
-the [`web-auth-library`](https://github.com/kriasoft/web-auth-library) pacakge as a peer dependency:
+the `@langchain/google-vertexai-web` package:
```bash npm2yarn
-npm install web-auth-library
+npm install @langchain/google-vertexai-web
```
Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:
@@ -42,18 +48,12 @@ GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PR
You can also pass your credentials directly in code like this:
-import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
-
-
-
-```bash npm2yarn
-npm install @langchain/community
-```
-
```typescript
-import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+import { VertexAI } from "@langchain/google-vertexai";
+// Or uncomment this line if you're using the web version:
+// import { VertexAI } from "@langchain/google-vertexai-web";
-const model = new GoogleVertexAI({
+const model = new VertexAI({
authOptions: {
credentials: {"type":"service_account","project_id":"YOUR_PROJECT-12345",...},
},
@@ -62,38 +62,17 @@ const model = new GoogleVertexAI({
## Usage
-Several models are available and can be specified by the `model` attribute
-in the constructor. These include:
-
-- text-bison (default)
-- text-bison-32k
-- code-gecko
-- code-bison
+The entire family of `gemini` models are available by specifying the `modelName` parameter.
import CodeBlock from "@theme/CodeBlock";
-import GoogleVertexAIExample from "@examples/llms/googlevertexai.ts";
-
-{GoogleVertexAIExample}
-
-Google also has separate models for their "Codey" code generation models.
-
-The "code-gecko" model is useful for code completion:
-
-import GoogleVertexAICodeGeckoExample from "@examples/llms/googlevertexai-code-gecko.ts";
-
-{GoogleVertexAICodeGeckoExample}
-
-While the "code-bison" model is better at larger code generation based on
-a text prompt:
-
-import GoogleVertexAICodeBisonExample from "@examples/llms/googlevertexai-code-bison.ts";
+import VertexAILLMExample from "@examples/llms/googlevertexai.ts";
-{GoogleVertexAICodeBisonExample}
+{VertexAILLMExample}
### Streaming
Streaming in multiple chunks is supported for faster responses:
-import GoogleVertexAIStreaming from "@examples/llms/googlevertexai-streaming.ts";
+import VertexAILLMStreaming from "@examples/llms/googlevertexai-streaming.ts";
-{GoogleVertexAIStreaming}
+{VertexAILLMStreaming}
diff --git a/docs/core_docs/docs/integrations/platforms/google.mdx b/docs/core_docs/docs/integrations/platforms/google.mdx
index c6c577a83ca9..39bd077e2ead 100644
--- a/docs/core_docs/docs/integrations/platforms/google.mdx
+++ b/docs/core_docs/docs/integrations/platforms/google.mdx
@@ -8,12 +8,17 @@ Functionality related to [Google Cloud Platform](https://cloud.google.com/)
## Chat models
-### ChatGoogleGenerativeAI
+### Gemini Models
-Access Gemini models such as `gemini-pro` and `gemini-pro-vision` through the [`ChatGoogleGenerativeAI`](/docs/integrations/chat/google_generativeai) class.
+Access Gemini models such as `gemini-pro` and `gemini-pro-vision` through the [`ChatGoogleGenerativeAI`](/docs/integrations/chat/google_generativeai),
+or if using VertexAI, via the [`ChatVertexAI`](/docs/integrations/chat/google_vertex_ai) class.
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
+
+
```bash npm2yarn
@@ -69,34 +74,91 @@ const input2 = [
const res = await visionModel.invoke(input2);
```
-The value of image_url must be a base64 encoded image (e.g., data:image/png;base64,abcd124).
+:::tip
+Click [here](/docs/integrations/chat/google_generativeai) for the `@langchain/google-genai` specific integration docs
+:::
-### Vertex AI
+
-Access PaLM chat models like `chat-bison` and `codechat-bison` via Google Cloud.
+
+
-```typescript
-import { ChatGoogleVertexAI } from "langchain/chat_models/googlevertexai";
+```bash npm2yarn
+npm install @langchain/google-vertexai
```
-## LLMs
+Then, you'll need to add your service account credentials, either directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:
-### Vertex AI
+```
+GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
+```
-Access PaLM LLMs like `text-bison` and `code-bison` via Google Cloud.
+or as a file path:
-```typescript
-import { GoogleVertexAI } from "langchain/llms/googlevertexai";
+```
+GOOGLE_VERTEX_AI_WEB_CREDENTIALS_FILE=/path/to/your/credentials.json
```
-### Model Garden
+```typescript
+import { ChatVertexAI } from "@langchain/google-vertexai";
+// Or, if using the web entrypoint:
+// import { ChatVertexAI } from "@langchain/google-vertexai-web";
-Access PaLM and hundreds of OSS models via Vertex AI Model Garden.
+const model = new ChatVertexAI({
+ modelName: "gemini-1.0-pro",
+ maxOutputTokens: 2048,
+});
+
+// Batch and stream are also supported
+const res = await model.invoke([
+ [
+ "human",
+ "What would be a good company name for a company that makes colorful socks?",
+ ],
+]);
+```
+
+Gemini vision models support image inputs when providing a single human message. For example:
```typescript
-import { GoogleVertexAI } from "langchain/llms/googlevertexai";
+const visionModel = new ChatVertexAI({
+ modelName: "gemini-pro-vision",
+ maxOutputTokens: 2048,
+});
+const image = fs.readFileSync("./hotdog.png").toString("base64");
+const input2 = [
+ new HumanMessage({
+ content: [
+ {
+ type: "text",
+ text: "Describe the following image.",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${image}`,
+ },
+ ],
+ }),
+];
+
+const res = await visionModel.invoke(input2);
```
+:::tip
+Click [here](/docs/integrations/chat/google_vertex_ai) for the `@langchain/google-vertexai` specific integration docs
+:::
+
+
+
+
+The value of `image_url` must be a base64 encoded image (e.g., `data:image/png;base64,abcd124`).
+
+### Vertex AI (Legacy)
+
+:::tip
+See the legacy Google PaLM and VertexAI documentation [here](/docs/integrations/chat/google_palm) for chat, and [here](/docs/integrations/llms/google_palm) for LLMs.
+:::
+
## Vector Store
### Vertex AI Vector Search
diff --git a/examples/package.json b/examples/package.json
index def41784ab8b..5d41b3d00135 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -37,7 +37,10 @@
"@langchain/community": "workspace:*",
"@langchain/core": "workspace:*",
"@langchain/exa": "workspace:*",
+ "@langchain/google-common": "workspace:*",
"@langchain/google-genai": "workspace:*",
+ "@langchain/google-vertexai": "workspace:*",
+ "@langchain/google-vertexai-web": "workspace:*",
"@langchain/groq": "workspace:*",
"@langchain/mistralai": "workspace:*",
"@langchain/mongodb": "workspace:*",
@@ -49,7 +52,7 @@
"@langchain/weaviate": "workspace:*",
"@langchain/yandex": "workspace:*",
"@opensearch-project/opensearch": "^2.2.0",
- "@pinecone-database/pinecone": "^2.0.0",
+ "@pinecone-database/pinecone": "^2.2.0",
"@planetscale/database": "^1.8.0",
"@prisma/client": "^4.11.0",
"@raycast/api": "^1.55.2",
diff --git a/examples/src/llms/googlevertexai-code-bison.ts b/examples/src/llms/googlevertexai-code-bison_legacy.ts
similarity index 100%
rename from examples/src/llms/googlevertexai-code-bison.ts
rename to examples/src/llms/googlevertexai-code-bison_legacy.ts
diff --git a/examples/src/llms/googlevertexai-code-gecko.ts b/examples/src/llms/googlevertexai-code-gecko_legacy.ts
similarity index 100%
rename from examples/src/llms/googlevertexai-code-gecko.ts
rename to examples/src/llms/googlevertexai-code-gecko_legacy.ts
diff --git a/examples/src/llms/googlevertexai-streaming.ts b/examples/src/llms/googlevertexai-streaming.ts
index 9f8aae39e92e..7fa545438eaa 100644
--- a/examples/src/llms/googlevertexai-streaming.ts
+++ b/examples/src/llms/googlevertexai-streaming.ts
@@ -1,6 +1,8 @@
-import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+import { VertexAI } from "@langchain/google-vertexai";
+// Or, if using the web entrypoint:
+// import { VertexAI } from "@langchain/google-vertexai-web";
-const model = new GoogleVertexAI({
+const model = new VertexAI({
temperature: 0.7,
});
const stream = await model.stream(
@@ -12,33 +14,31 @@ for await (const chunk of stream) {
}
/*
- ---------
- Chunk:
- ---------
- 1. Toe-tally Awesome Socks
- 2. The Sock Drawer
- 3. Happy Feet
- 4.
+---------
+Chunk:
+---------
+ * Kaleidoscope Toes
+* Huephoria
+* Soleful Spectrum
+*
- ---------
- Chunk:
- ---------
- Sock It to Me
- 5. Crazy Color Socks
- 6. Wild and Wacky Socks
- 7. Fu
+---------
+Chunk:
+---------
+ Colorwave Hosiery
+* Chromatic Threads
+* Rainbow Rhapsody
+* Vibrant Soles
+* Toe-tally Colorful
+* Socktacular Hues
+*
- ---------
- Chunk:
- ---------
- nky Feet
- 8. Mismatched Socks
- 9. Rainbow Socks
- 10. Sole Mates
-
- ---------
- Chunk:
- ---------
-
+---------
+Chunk:
+---------
+ Threads of Joy
+---------
+Chunk:
+---------
*/
diff --git a/examples/src/llms/googlevertexai-streaming_legacy.ts b/examples/src/llms/googlevertexai-streaming_legacy.ts
new file mode 100644
index 000000000000..9f8aae39e92e
--- /dev/null
+++ b/examples/src/llms/googlevertexai-streaming_legacy.ts
@@ -0,0 +1,44 @@
+import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+
+const model = new GoogleVertexAI({
+ temperature: 0.7,
+});
+const stream = await model.stream(
+ "What would be a good company name for a company that makes colorful socks?"
+);
+
+for await (const chunk of stream) {
+ console.log("\n---------\nChunk:\n---------\n", chunk);
+}
+
+/*
+ ---------
+ Chunk:
+ ---------
+ 1. Toe-tally Awesome Socks
+ 2. The Sock Drawer
+ 3. Happy Feet
+ 4.
+
+ ---------
+ Chunk:
+ ---------
+ Sock It to Me
+ 5. Crazy Color Socks
+ 6. Wild and Wacky Socks
+ 7. Fu
+
+ ---------
+ Chunk:
+ ---------
+ nky Feet
+ 8. Mismatched Socks
+ 9. Rainbow Socks
+ 10. Sole Mates
+
+ ---------
+ Chunk:
+ ---------
+
+
+*/
diff --git a/examples/src/llms/googlevertexai.ts b/examples/src/llms/googlevertexai.ts
index 1e9fd256d71c..ea8b10758293 100644
--- a/examples/src/llms/googlevertexai.ts
+++ b/examples/src/llms/googlevertexai.ts
@@ -1,23 +1,25 @@
-import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+import { VertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
-// import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai/web";
+// import { VertexAI } from "@langchain/google-vertexai-web";
-/*
- * Before running this, you should make sure you have created a
- * Google Cloud Project that is permitted to the Vertex AI API.
- *
- * You will also need permission to access this project / API.
- * Typically, this is done in one of three ways:
- * - You are logged into an account permitted to that project.
- * - You are running this on a machine using a service account permitted to
- * the project.
- * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
- * path of a credentials file for a service account permitted to the project.
- */
-const model = new GoogleVertexAI({
+const model = new VertexAI({
temperature: 0.7,
});
const res = await model.invoke(
"What would be a good company name for a company that makes colorful socks?"
);
console.log({ res });
+/*
+{
+ res: '* Hue Hues\n' +
+ '* Sock Spectrum\n' +
+ '* Kaleidosocks\n' +
+ '* Threads of Joy\n' +
+ '* Vibrant Threads\n' +
+ '* Rainbow Soles\n' +
+ '* Colorful Canvases\n' +
+ '* Prismatic Pedals\n' +
+ '* Sock Canvas\n' +
+ '* Color Collective'
+}
+ */
diff --git a/examples/src/llms/googlevertexai_legacy.ts b/examples/src/llms/googlevertexai_legacy.ts
new file mode 100644
index 000000000000..1e9fd256d71c
--- /dev/null
+++ b/examples/src/llms/googlevertexai_legacy.ts
@@ -0,0 +1,23 @@
+import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai";
+// Or, if using the web entrypoint:
+// import { GoogleVertexAI } from "@langchain/community/llms/googlevertexai/web";
+
+/*
+ * Before running this, you should make sure you have created a
+ * Google Cloud Project that is permitted to the Vertex AI API.
+ *
+ * You will also need permission to access this project / API.
+ * Typically, this is done in one of three ways:
+ * - You are logged into an account permitted to that project.
+ * - You are running this on a machine using a service account permitted to
+ * the project.
+ * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
+ * path of a credentials file for a service account permitted to the project.
+ */
+const model = new GoogleVertexAI({
+ temperature: 0.7,
+});
+const res = await model.invoke(
+ "What would be a good company name for a company that makes colorful socks?"
+);
+console.log({ res });
diff --git a/examples/src/models/chat/integration_googlevertexai-examples.ts b/examples/src/models/chat/integration_googlevertexai-examples_legacy.ts
similarity index 100%
rename from examples/src/models/chat/integration_googlevertexai-examples.ts
rename to examples/src/models/chat/integration_googlevertexai-examples_legacy.ts
diff --git a/examples/src/models/chat/integration_googlevertexai-streaming.ts b/examples/src/models/chat/integration_googlevertexai-streaming.ts
index cf071968a735..1574d5236c4e 100644
--- a/examples/src/models/chat/integration_googlevertexai-streaming.ts
+++ b/examples/src/models/chat/integration_googlevertexai-streaming.ts
@@ -1,8 +1,8 @@
-import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+import { ChatVertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
-// import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai/web";
+// import { ChatVertexAI } from "@langchain/google-vertexai-web";
-const model = new ChatGoogleVertexAI({
+const model = new ChatVertexAI({
temperature: 0.7,
});
const stream = await model.stream([
@@ -16,16 +16,18 @@ for await (const chunk of stream) {
/*
AIMessageChunk {
- content: ' Ahoy there, matey! My favorite food be fish, cooked any way ye ',
- additional_kwargs: {}
+ content: [{ type: 'text', text: 'Ahoy there, matey! Me favorite grub be fish and chips, with' }],
+ additional_kwargs: {},
+ response_metadata: { data: { candidates: [Array], promptFeedback: [Object] } }
}
AIMessageChunk {
- content: 'like!',
- additional_kwargs: {}
+ content: [{ type: 'text', text: " a hearty pint o' grog to wash it down. What be yer fancy, landlubber?" }],
+ additional_kwargs: {},
+ response_metadata: { data: { candidates: [Array] } }
}
AIMessageChunk {
content: '',
- name: undefined,
- additional_kwargs: {}
+ additional_kwargs: {},
+ response_metadata: { finishReason: 'stop' }
}
*/
diff --git a/examples/src/models/chat/integration_googlevertexai-streaming_legacy.ts b/examples/src/models/chat/integration_googlevertexai-streaming_legacy.ts
new file mode 100644
index 000000000000..cf071968a735
--- /dev/null
+++ b/examples/src/models/chat/integration_googlevertexai-streaming_legacy.ts
@@ -0,0 +1,31 @@
+import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+// Or, if using the web entrypoint:
+// import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai/web";
+
+const model = new ChatGoogleVertexAI({
+ temperature: 0.7,
+});
+const stream = await model.stream([
+ ["system", "You are a funny assistant that answers in pirate language."],
+ ["human", "What is your favorite food?"],
+]);
+
+for await (const chunk of stream) {
+ console.log(chunk);
+}
+
+/*
+AIMessageChunk {
+ content: ' Ahoy there, matey! My favorite food be fish, cooked any way ye ',
+ additional_kwargs: {}
+}
+AIMessageChunk {
+ content: 'like!',
+ additional_kwargs: {}
+}
+AIMessageChunk {
+ content: '',
+ name: undefined,
+ additional_kwargs: {}
+}
+*/
diff --git a/examples/src/models/chat/integration_googlevertexai-tools.ts b/examples/src/models/chat/integration_googlevertexai-tools.ts
new file mode 100644
index 000000000000..e95e77c32c5c
--- /dev/null
+++ b/examples/src/models/chat/integration_googlevertexai-tools.ts
@@ -0,0 +1,48 @@
+import { ChatVertexAI } from "@langchain/google-vertexai";
+import { type GeminiTool } from "@langchain/google-vertexai/types";
+import { zodToGeminiParameters } from "@langchain/google-vertexai/utils";
+import { z } from "zod";
+// Or, if using the web entrypoint:
+// import { ChatVertexAI } from "@langchain/google-vertexai-web";
+
+const calculatorSchema = z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The type of operation to execute"),
+ number1: z.number().describe("The first number to operate on."),
+ number2: z.number().describe("The second number to operate on."),
+});
+
+const geminiCalculatorTool: GeminiTool = {
+ functionDeclarations: [
+ {
+ name: "calculator",
+ description: "A simple calculator tool",
+ parameters: zodToGeminiParameters(calculatorSchema),
+ },
+ ],
+};
+
+const model = new ChatVertexAI({
+ temperature: 0.7,
+ modelName: "gemini-1.0-pro",
+}).bind({
+ tools: [geminiCalculatorTool],
+});
+
+const response = await model.invoke("What is 1628253239 times 81623836?");
+console.log(JSON.stringify(response.additional_kwargs, null, 2));
+/*
+{
+ "tool_calls": [
+ {
+ "id": "calculator",
+ "type": "function",
+ "function": {
+ "name": "calculator",
+ "arguments": "{\"number2\":81623836,\"number1\":1628253239,\"operation\":\"multiply\"}"
+ }
+ }
+ ],
+}
+ */
diff --git a/examples/src/models/chat/integration_googlevertexai-wsa.ts b/examples/src/models/chat/integration_googlevertexai-wsa.ts
new file mode 100644
index 000000000000..c7f566220375
--- /dev/null
+++ b/examples/src/models/chat/integration_googlevertexai-wsa.ts
@@ -0,0 +1,23 @@
+import { ChatVertexAI } from "@langchain/google-vertexai";
+import { z } from "zod";
+// Or, if using the web entrypoint:
+// import { ChatVertexAI } from "@langchain/google-vertexai-web";
+
+const calculatorSchema = z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The type of operation to execute"),
+ number1: z.number().describe("The first number to operate on."),
+ number2: z.number().describe("The second number to operate on."),
+});
+
+const model = new ChatVertexAI({
+ temperature: 0.7,
+ modelName: "gemini-1.0-pro",
+}).withStructuredOutput(calculatorSchema);
+
+const response = await model.invoke("What is 1628253239 times 81623836?");
+console.log(response);
+/*
+{ operation: 'multiply', number1: 1628253239, number2: 81623836 }
+ */
diff --git a/examples/src/models/chat/integration_googlevertexai.ts b/examples/src/models/chat/integration_googlevertexai.ts
index c6dd65e4e214..37d174f46892 100644
--- a/examples/src/models/chat/integration_googlevertexai.ts
+++ b/examples/src/models/chat/integration_googlevertexai.ts
@@ -1,7 +1,18 @@
-import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+import { ChatVertexAI } from "@langchain/google-vertexai";
// Or, if using the web entrypoint:
-// import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai/web";
+// import { ChatVertexAI } from "@langchain/google-vertexai-web";
-const model = new ChatGoogleVertexAI({
+const model = new ChatVertexAI({
temperature: 0.7,
+ modelName: "gemini-1.0-pro",
});
+
+const response = await model.invoke("Why is the ocean blue?");
+console.log(response);
+/*
+AIMessageChunk {
+ content: [{ type: 'text', text: 'The ocean appears blue due to a phenomenon called Rayleigh scattering. This occurs when sunlight' }],
+ additional_kwargs: {},
+ response_metadata: {}
+}
+ */
diff --git a/examples/src/models/chat/integration_googlevertexai_legacy.ts b/examples/src/models/chat/integration_googlevertexai_legacy.ts
new file mode 100644
index 000000000000..c6dd65e4e214
--- /dev/null
+++ b/examples/src/models/chat/integration_googlevertexai_legacy.ts
@@ -0,0 +1,7 @@
+import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai";
+// Or, if using the web entrypoint:
+// import { ChatGoogleVertexAI } from "@langchain/community/chat_models/googlevertexai/web";
+
+const model = new ChatGoogleVertexAI({
+ temperature: 0.7,
+});
diff --git a/langchain-core/package.json b/langchain-core/package.json
index aed777bdde61..57314432f4fd 100644
--- a/langchain-core/package.json
+++ b/langchain-core/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/core",
- "version": "0.1.51",
+ "version": "0.1.52",
"description": "Core LangChain.js abstractions and schemas",
"type": "module",
"engines": {
diff --git a/langchain-core/src/prompts/structured.ts b/langchain-core/src/prompts/structured.ts
index 855e16a71feb..cfb8b30c01ea 100644
--- a/langchain-core/src/prompts/structured.ts
+++ b/langchain-core/src/prompts/structured.ts
@@ -1,5 +1,9 @@
import { ChatPromptValueInterface } from "../prompt_values.js";
-import { RunnableLike, Runnable } from "../runnables/base.js";
+import {
+ RunnableLike,
+ Runnable,
+ type RunnableBinding,
+} from "../runnables/base.js";
import { RunnableConfig } from "../runnables/config.js";
import { InputValues } from "../utils/types.js";
import {
@@ -8,6 +12,30 @@ import {
ChatPromptTemplateInput,
} from "./chat.js";
+function isWithStructuredOutput(
+ x: unknown
+ // eslint-disable-next-line @typescript-eslint/ban-types
+): x is {
+ withStructuredOutput: (...arg: unknown[]) => Runnable;
+} {
+ return (
+ typeof x === "object" &&
+ x != null &&
+ "withStructuredOutput" in x &&
+ typeof x.withStructuredOutput === "function"
+ );
+}
+
+function isRunnableBinding(x: unknown): x is RunnableBinding {
+ return (
+ typeof x === "object" &&
+ x != null &&
+ "lc_id" in x &&
+ Array.isArray(x.lc_id) &&
+ x.lc_id.join("/") === "langchain_core/runnables/RunnableBinding"
+ );
+}
+
/**
* Interface for the input of a ChatPromptTemplate.
*/
@@ -33,6 +61,8 @@ export class StructuredPrompt<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
schema: Record;
+ lc_namespace = ["langchain_core", "prompts", "structured"];
+
get lc_aliases(): Record {
return {
...super.lc_aliases,
@@ -48,17 +78,25 @@ export class StructuredPrompt<
pipe(
coerceable: RunnableLike
): Runnable, RunnableConfig> {
+ if (isWithStructuredOutput(coerceable)) {
+ return super.pipe(coerceable.withStructuredOutput(this.schema));
+ }
+
if (
- typeof coerceable === "object" &&
- "withStructuredOutput" in coerceable &&
- typeof coerceable.withStructuredOutput === "function"
+ isRunnableBinding(coerceable) &&
+ isWithStructuredOutput(coerceable.bound)
) {
- return super.pipe(coerceable.withStructuredOutput(this.schema));
- } else {
- throw new Error(
- `Structured prompts need to be piped to a language model that supports the "withStructuredOutput()" method.`
+ return super.pipe(
+ coerceable.bound
+ .withStructuredOutput(this.schema)
+ .bind(coerceable.kwargs ?? {})
+ .withConfig(coerceable.config)
);
}
+
+ throw new Error(
+ `Structured prompts need to be piped to a language model that supports the "withStructuredOutput()" method.`
+ );
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
diff --git a/langchain-core/src/prompts/tests/structured.test.ts b/langchain-core/src/prompts/tests/structured.test.ts
index 227ae2c77d11..4b5bca4559f6 100644
--- a/langchain-core/src/prompts/tests/structured.test.ts
+++ b/langchain-core/src/prompts/tests/structured.test.ts
@@ -90,10 +90,16 @@ test("Test format", async () => {
const revived: StructuredPrompt = await load(JSON.stringify(prompt));
expect(JSON.stringify(prompt)).toEqual(
- '{"lc":1,"type":"constructor","id":["langchain_core","prompts","chat","StructuredPrompt"],"kwargs":{"schema_":{"name":"yo","description":"a structured output","parameters":{"name":{"type":"string"},"value":{"type":"integer"}}},"input_variables":[],"messages":[{"lc":1,"type":"constructor","id":["langchain_core","prompts","chat","HumanMessagePromptTemplate"],"kwargs":{"prompt":{"lc":1,"type":"constructor","id":["langchain_core","prompts","prompt","PromptTemplate"],"kwargs":{"input_variables":[],"template_format":"f-string","template":"I\'m very structured, how about you?"}}}}]}}'
+ '{"lc":1,"type":"constructor","id":["langchain_core","prompts","structured","StructuredPrompt"],"kwargs":{"schema_":{"name":"yo","description":"a structured output","parameters":{"name":{"type":"string"},"value":{"type":"integer"}}},"input_variables":[],"messages":[{"lc":1,"type":"constructor","id":["langchain_core","prompts","chat","HumanMessagePromptTemplate"],"kwargs":{"prompt":{"lc":1,"type":"constructor","id":["langchain_core","prompts","prompt","PromptTemplate"],"kwargs":{"input_variables":[],"template_format":"f-string","template":"I\'m very structured, how about you?"}}}}]}}'
);
const revivedChain = revived.pipe(model);
await expect(revivedChain.invoke({})).resolves.toEqual(schema);
+
+ const boundModel = model.bind({ runName: "boundModel" });
+
+ const chainWithBoundModel = prompt.pipe(boundModel);
+
+ await expect(chainWithBoundModel.invoke({})).resolves.toEqual(schema);
});
diff --git a/langchain-core/src/runnables/base.ts b/langchain-core/src/runnables/base.ts
index a692426eed1d..69ef35434abe 100644
--- a/langchain-core/src/runnables/base.ts
+++ b/langchain-core/src/runnables/base.ts
@@ -930,7 +930,7 @@ export class RunnableBinding<
config: RunnableConfig;
- protected kwargs?: Partial;
+ kwargs?: Partial;
configFactories?: Array<
(config: RunnableConfig) => RunnableConfig | Promise
diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json
index c3c7a16f20ad..b91cd73219af 100644
--- a/libs/langchain-community/package.json
+++ b/libs/langchain-community/package.json
@@ -143,7 +143,7 @@
"google-auth-library": "^8.9.0",
"googleapis": "^126.0.1",
"graphql": "^16.6.0",
- "hnswlib-node": "^1.4.2",
+ "hnswlib-node": "^3.0.0",
"html-to-text": "^9.0.5",
"interface-datastore": "^8.2.11",
"ioredis": "^5.3.2",
diff --git a/libs/langchain-community/src/caches/tests/ioredis.int.test.ts b/libs/langchain-community/src/caches/tests/ioredis.int.test.ts
index 5e790aa668aa..cb4561e0a0c3 100644
--- a/libs/langchain-community/src/caches/tests/ioredis.int.test.ts
+++ b/libs/langchain-community/src/caches/tests/ioredis.int.test.ts
@@ -36,6 +36,6 @@ describe("Test RedisCache", () => {
const response1 = await model.invoke("What is something random?");
const response2 = await model.invoke("What is something random?");
expect(response1).not.toBeUndefined();
- expect(response1).toEqual(response2);
+ expect(JSON.stringify(response1)).toEqual(JSON.stringify(response2));
});
});
diff --git a/libs/langchain-community/src/embeddings/tests/premai.int.test.ts b/libs/langchain-community/src/embeddings/tests/premai.int.test.ts
index 1c8e2f34a127..cf241cdef29f 100644
--- a/libs/langchain-community/src/embeddings/tests/premai.int.test.ts
+++ b/libs/langchain-community/src/embeddings/tests/premai.int.test.ts
@@ -2,7 +2,7 @@ import { describe, test, expect } from "@jest/globals";
import { PremEmbeddings } from "../premai.js";
describe("EmbeddingsPrem", () => {
- test("Test embedQuery", async () => {
+ test.skip("Test embedQuery", async () => {
const client = new PremEmbeddings({ model: "@cf/baai/bge-small-en-v1.5" });
const res = await client.embedQuery("Hello world");
// console.log(res);
diff --git a/libs/langchain-community/src/storage/tests/cassandra.int.test.ts b/libs/langchain-community/src/storage/tests/cassandra.int.test.ts
index d3c4c439aeee..70384b0a0908 100644
--- a/libs/langchain-community/src/storage/tests/cassandra.int.test.ts
+++ b/libs/langchain-community/src/storage/tests/cassandra.int.test.ts
@@ -1,102 +1,106 @@
-/* eslint-disable no-process-env */
-import { test, expect, describe } from "@jest/globals";
-import { CassandraClientFactory } from "../../utils/cassandra.js";
-import { CassandraKVStore } from "../cassandra.js";
+// /* eslint-disable no-process-env */
+// Hangs when run with other tests, uncomment for development
-const cassandraConfig = {
- serviceProviderArgs: {
- astra: {
- token: process.env.ASTRA_TOKEN as string,
- endpoint: process.env.ASTRA_DB_ENDPOINT as string,
- },
- },
- keyspace: "test",
- table: "test_kv",
-};
+// import { test, expect, describe } from "@jest/globals";
+// import { CassandraClientFactory } from "../../utils/cassandra.js";
+// import { CassandraKVStore } from "../cassandra.js";
-let client;
+test("Empty test to prevent runner from complaining", async () => {});
-// For internal testing:
-// 1. switch "describe.skip(" to "describe("
-// 2. Export ASTRA_DB_ENDPOINT and ASTRA_TOKEN; "test" keyspace should exist
-// 3. cd langchainjs/libs/langchain-community
-// 4. yarn test:single src/storage/tests/cassandra.int.test.ts
-// Once manual testing is complete, re-instate the ".skip"
-describe.skip("CassandraKVStore", () => {
- let store: CassandraKVStore;
+// const cassandraConfig = {
+// serviceProviderArgs: {
+// astra: {
+// token: process.env.ASTRA_TOKEN as string,
+// endpoint: process.env.ASTRA_DB_ENDPOINT as string,
+// },
+// },
+// keyspace: "test",
+// table: "test_kv",
+// };
- beforeAll(async () => {
- client = await CassandraClientFactory.getClient(cassandraConfig);
- await client.execute("DROP TABLE IF EXISTS test.test_kv;");
- store = new CassandraKVStore(cassandraConfig);
- });
+// let client;
- test("CassandraKVStore can perform all operations", async () => {
- // Using TextEncoder to simulate encoding of string data to binary format
- const encoder = new TextEncoder();
- const decoder = new TextDecoder();
- const value1 = encoder.encode(new Date().toISOString());
- const value2 = encoder.encode(
- new Date().toISOString() + new Date().toISOString()
- );
+// // For internal testing:
+// // 1. switch "describe.skip(" to "describe("
+// // 2. Export ASTRA_DB_ENDPOINT and ASTRA_TOKEN; "test" keyspace should exist
+// // 3. cd langchainjs/libs/langchain-community
+// // 4. yarn test:single src/storage/tests/cassandra.int.test.ts
+// // Once manual testing is complete, re-instate the ".skip"
+// describe.skip("CassandraKVStore", () => {
+// let store: CassandraKVStore;
- // mset
- await store.mset([
- ["key1", value1],
- ["key2", value2],
- ]);
+// beforeAll(async () => {
+// client = await CassandraClientFactory.getClient(cassandraConfig);
+// await client.execute("DROP TABLE IF EXISTS test.test_kv;");
+// store = new CassandraKVStore(cassandraConfig);
+// });
- // mget
- const retrievedValues = await store.mget(["key1", "key2"]);
- expect(retrievedValues.map((v) => decoder.decode(v))).toEqual([
- decoder.decode(value1),
- decoder.decode(value2),
- ]);
+// test("CassandraKVStore can perform all operations", async () => {
+// // Using TextEncoder to simulate encoding of string data to binary format
+// const encoder = new TextEncoder();
+// const decoder = new TextDecoder();
+// const value1 = encoder.encode(new Date().toISOString());
+// const value2 = encoder.encode(
+// new Date().toISOString() + new Date().toISOString()
+// );
- // yieldKeys
- const keys = [];
- for await (const key of store.yieldKeys()) {
- keys.push(key);
- }
- expect(keys).toContain("key1");
- expect(keys).toContain("key2");
+// // mset
+// await store.mset([
+// ["key1", value1],
+// ["key2", value2],
+// ]);
- // mdelete
- await store.mdelete(["key1", "key2"]);
- const retrievedValuesAfterDelete = await store.mget(["key1", "key2"]);
- expect(retrievedValuesAfterDelete).toEqual([undefined, undefined]);
- });
+// // mget
+// const retrievedValues = await store.mget(["key1", "key2"]);
+// expect(retrievedValues.map((v) => decoder.decode(v))).toEqual([
+// decoder.decode(value1),
+// decoder.decode(value2),
+// ]);
- describe.skip("CassandraKVStore key prefix retrieval", () => {
- beforeAll(async () => {
- client = await CassandraClientFactory.getClient(cassandraConfig);
- await client.execute("DROP TABLE IF EXISTS test.test_kv;");
- store = new CassandraKVStore(cassandraConfig);
+// // yieldKeys
+// const keys = [];
+// for await (const key of store.yieldKeys()) {
+// keys.push(key);
+// }
+// expect(keys).toContain("key1");
+// expect(keys).toContain("key2");
- await store.mset([
- ["a/b/c", new TextEncoder().encode("value abc")],
- ["a/b/d", new TextEncoder().encode("value abd")],
- ["a/e/f", new TextEncoder().encode("value aef")],
- ]);
- });
+// // mdelete
+// await store.mdelete(["key1", "key2"]);
+// const retrievedValuesAfterDelete = await store.mget(["key1", "key2"]);
+// expect(retrievedValuesAfterDelete).toEqual([undefined, undefined]);
+// });
- test.each([
- ["a", ["a/b/c", "a/b/d", "a/e/f"]],
- ["a/", ["a/b/c", "a/b/d", "a/e/f"]],
- ["a/b", ["a/b/c", "a/b/d"]],
- ["a/b/", ["a/b/c", "a/b/d"]],
- ["a/e", ["a/e/f"]],
- ["a/e/", ["a/e/f"]],
- ["b", []],
- ])(
- "yieldKeys with prefix '%s' returns expected keys",
- async (prefix, expectedKeys) => {
- const retrievedKeys = [];
- for await (const key of store.yieldKeys(prefix)) {
- retrievedKeys.push(key);
- }
- expect(retrievedKeys.sort()).toEqual(expectedKeys.sort());
- }
- );
- });
-});
+// describe.skip("CassandraKVStore key prefix retrieval", () => {
+// beforeAll(async () => {
+// client = await CassandraClientFactory.getClient(cassandraConfig);
+// await client.execute("DROP TABLE IF EXISTS test.test_kv;");
+// store = new CassandraKVStore(cassandraConfig);
+
+// await store.mset([
+// ["a/b/c", new TextEncoder().encode("value abc")],
+// ["a/b/d", new TextEncoder().encode("value abd")],
+// ["a/e/f", new TextEncoder().encode("value aef")],
+// ]);
+// });
+
+// test.each([
+// ["a", ["a/b/c", "a/b/d", "a/e/f"]],
+// ["a/", ["a/b/c", "a/b/d", "a/e/f"]],
+// ["a/b", ["a/b/c", "a/b/d"]],
+// ["a/b/", ["a/b/c", "a/b/d"]],
+// ["a/e", ["a/e/f"]],
+// ["a/e/", ["a/e/f"]],
+// ["b", []],
+// ])(
+// "yieldKeys with prefix '%s' returns expected keys",
+// async (prefix, expectedKeys) => {
+// const retrievedKeys = [];
+// for await (const key of store.yieldKeys(prefix)) {
+// retrievedKeys.push(key);
+// }
+// expect(retrievedKeys.sort()).toEqual(expectedKeys.sort());
+// }
+// );
+// });
+// });
diff --git a/libs/langchain-community/src/stores/tests/cassandra.int.test.ts b/libs/langchain-community/src/stores/tests/cassandra.int.test.ts
index f74c8a06003c..18453d113a87 100644
--- a/libs/langchain-community/src/stores/tests/cassandra.int.test.ts
+++ b/libs/langchain-community/src/stores/tests/cassandra.int.test.ts
@@ -1,109 +1,113 @@
-/* eslint-disable no-process-env */
-import { test, expect, describe } from "@jest/globals";
-import { AIMessage, HumanMessage } from "@langchain/core/messages";
-import { CassandraClientFactory } from "../../utils/cassandra.js";
-import { CassandraChatMessageHistory } from "../message/cassandra.js";
+// /* eslint-disable no-process-env */
+// Hangs when run with other tests, uncomment for development
-const cassandraConfig = {
- serviceProviderArgs: {
- astra: {
- token: process.env.ASTRA_TOKEN as string,
- endpoint: process.env.ASTRA_DB_ENDPOINT as string,
- },
- },
- keyspace: "test",
- table: "test_message_history",
-};
+// import { test, expect, describe } from "@jest/globals";
+// import { AIMessage, HumanMessage } from "@langchain/core/messages";
+// import { CassandraClientFactory } from "../../utils/cassandra.js";
+// import { CassandraChatMessageHistory } from "../message/cassandra.js";
-let client;
+test("Empty test to prevent runner from complaining", async () => {});
-// For internal testing:
-// 1. switch "describe.skip(" to "describe("
-// 2. Export OPENAI_API_KEY, ASTRA_DB_ENDPOINT, and ASTRA_TOKEN
-// 3. cd langchainjs/libs/langchain-community
-// 4. yarn test:single src/stores/tests/cassandra.int.test.ts
-// Once manual testing is complete, re-instate the ".skip"
-describe.skip("CassandraChatMessageHistory", () => {
- beforeAll(async () => {
- client = await CassandraClientFactory.getClient(cassandraConfig);
- await client.execute("DROP TABLE IF EXISTS test.test_message_history;");
- });
+// const cassandraConfig = {
+// serviceProviderArgs: {
+// astra: {
+// token: process.env.ASTRA_TOKEN as string,
+// endpoint: process.env.ASTRA_DB_ENDPOINT as string,
+// },
+// },
+// keyspace: "test",
+// table: "test_message_history",
+// };
- test("CassandraChatMessageHistory: empty history", async () => {
- const messageHistory = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_A123",
- });
- expect(await messageHistory.getMessages()).toEqual([]);
- });
+// let client;
- test("CassandraChatMessageHistory: add and get messages", async () => {
- const messageHistory = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_B123",
- });
+// // For internal testing:
+// // 1. switch "describe.skip(" to "describe("
+// // 2. Export OPENAI_API_KEY, ASTRA_DB_ENDPOINT, and ASTRA_TOKEN
+// // 3. cd langchainjs/libs/langchain-community
+// // 4. yarn test:single src/stores/tests/cassandra.int.test.ts
+// // Once manual testing is complete, re-instate the ".skip"
+// describe.skip("CassandraChatMessageHistory", () => {
+// beforeAll(async () => {
+// client = await CassandraClientFactory.getClient(cassandraConfig);
+// await client.execute("DROP TABLE IF EXISTS test.test_message_history;");
+// });
- await messageHistory.addUserMessage("I am a nice human.");
- await messageHistory.addAIChatMessage(
- "Yes you seem to be. I am a nice AI."
- );
- await messageHistory.addUserMessage("We will see about that.");
+// test("CassandraChatMessageHistory: empty history", async () => {
+// const messageHistory = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_A123",
+// });
+// expect(await messageHistory.getMessages()).toEqual([]);
+// });
- const expectedMessages = [
- new HumanMessage("I am a nice human."),
- new AIMessage("Yes you seem to be. I am a nice AI."),
- new HumanMessage("We will see about that."),
- ];
+// test("CassandraChatMessageHistory: add and get messages", async () => {
+// const messageHistory = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_B123",
+// });
- expect(await messageHistory.getMessages()).toEqual(expectedMessages);
+// await messageHistory.addUserMessage("I am a nice human.");
+// await messageHistory.addAIChatMessage(
+// "Yes you seem to be. I am a nice AI."
+// );
+// await messageHistory.addUserMessage("We will see about that.");
- const messageHistoryDifferentSession = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_B456",
- });
- expect(await messageHistoryDifferentSession.getMessages()).toEqual([]);
+// const expectedMessages = [
+// new HumanMessage("I am a nice human."),
+// new AIMessage("Yes you seem to be. I am a nice AI."),
+// new HumanMessage("We will see about that."),
+// ];
- const messageHistorySameSession = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_B123",
- });
- expect(await messageHistorySameSession.getMessages()).toEqual(
- expectedMessages
- );
- });
+// expect(await messageHistory.getMessages()).toEqual(expectedMessages);
- test("CassandraChatMessageHistory: clear messages", async () => {
- const messageHistory = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_C123",
- });
- await messageHistory.addUserMessage("I am a nice human.");
- await messageHistory.addAIChatMessage(
- "Yes you seem to be. I am a nice AI."
- );
- await messageHistory.addUserMessage("We will see about that.");
- const expectedMessages = [
- new HumanMessage("I am a nice human."),
- new AIMessage("Yes you seem to be. I am a nice AI."),
- new HumanMessage("We will see about that."),
- ];
+// const messageHistoryDifferentSession = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_B456",
+// });
+// expect(await messageHistoryDifferentSession.getMessages()).toEqual([]);
- const messageHistoryToClear = new CassandraChatMessageHistory({
- ...cassandraConfig,
- sessionId: "test_session_C789",
- });
- await messageHistoryToClear.addUserMessage("Hello.");
- await messageHistoryToClear.addAIChatMessage("Hello. How may I help?");
- const expectedMessagesToClear = [
- new HumanMessage("Hello."),
- new AIMessage("Hello. How may I help?"),
- ];
- expect(await messageHistoryToClear.getMessages()).toEqual(
- expectedMessagesToClear
- );
+// const messageHistorySameSession = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_B123",
+// });
+// expect(await messageHistorySameSession.getMessages()).toEqual(
+// expectedMessages
+// );
+// });
- await messageHistoryToClear.clear();
- expect(await messageHistoryToClear.getMessages()).toEqual([]);
- expect(await messageHistory.getMessages()).toEqual(expectedMessages);
- });
-});
+// test("CassandraChatMessageHistory: clear messages", async () => {
+// const messageHistory = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_C123",
+// });
+// await messageHistory.addUserMessage("I am a nice human.");
+// await messageHistory.addAIChatMessage(
+// "Yes you seem to be. I am a nice AI."
+// );
+// await messageHistory.addUserMessage("We will see about that.");
+// const expectedMessages = [
+// new HumanMessage("I am a nice human."),
+// new AIMessage("Yes you seem to be. I am a nice AI."),
+// new HumanMessage("We will see about that."),
+// ];
+
+// const messageHistoryToClear = new CassandraChatMessageHistory({
+// ...cassandraConfig,
+// sessionId: "test_session_C789",
+// });
+// await messageHistoryToClear.addUserMessage("Hello.");
+// await messageHistoryToClear.addAIChatMessage("Hello. How may I help?");
+// const expectedMessagesToClear = [
+// new HumanMessage("Hello."),
+// new AIMessage("Hello. How may I help?"),
+// ];
+// expect(await messageHistoryToClear.getMessages()).toEqual(
+// expectedMessagesToClear
+// );
+
+// await messageHistoryToClear.clear();
+// expect(await messageHistoryToClear.getMessages()).toEqual([]);
+// expect(await messageHistory.getMessages()).toEqual(expectedMessages);
+// });
+// });
diff --git a/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts b/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts
index 3be87b56b813..32d99f4695e4 100644
--- a/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts
+++ b/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts
@@ -8,24 +8,28 @@ import { FakeEmbeddings } from "closevector-common/dist/fake.js";
import { AstraDBVectorStore, AstraLibArgs } from "../astradb.js";
describe.skip("AstraDBVectorStore", () => {
- const clientConfig = {
- token: process.env.ASTRA_DB_APPLICATION_TOKEN ?? "dummy",
- endpoint: process.env.ASTRA_DB_ENDPOINT ?? "dummy",
- namespace: process.env.ASTRA_DB_NAMESPACE ?? "default_keyspace",
- };
-
- const client = new AstraDB(clientConfig.token, clientConfig.endpoint);
-
- const astraConfig: AstraLibArgs = {
- ...clientConfig,
- collection: process.env.ASTRA_DB_COLLECTION ?? "langchain_test",
- collectionOptions: {
- vector: {
- dimension: 1536,
- metric: "cosine",
+ let client: AstraDB;
+ let astraConfig: AstraLibArgs;
+ beforeAll(() => {
+ const clientConfig = {
+ token: process.env.ASTRA_DB_APPLICATION_TOKEN ?? "dummy",
+ endpoint: process.env.ASTRA_DB_ENDPOINT ?? "dummy",
+ namespace: process.env.ASTRA_DB_NAMESPACE ?? "default_keyspace",
+ };
+
+ client = new AstraDB(clientConfig.token, clientConfig.endpoint);
+
+ astraConfig = {
+ ...clientConfig,
+ collection: process.env.ASTRA_DB_COLLECTION ?? "langchain_test",
+ collectionOptions: {
+ vector: {
+ dimension: 1536,
+ metric: "cosine",
+ },
},
- },
- };
+ };
+ });
beforeEach(async () => {
try {
diff --git a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts
index 05429f3001ac..a11477cb5d2f 100644
--- a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts
+++ b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts
@@ -220,7 +220,7 @@ describe("PGVectorStore", () => {
}
});
- test("PGvector supports different vector types", async () => {
+ test.skip("PGvector supports different vector types", async () => {
// verify by asserting different pgvector operators based on vector type
pgvectorVectorStore.distanceStrategy = "cosine";
expect(pgvectorVectorStore.computedOperatorString).toEqual("<=>");
diff --git a/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts b/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts
index d74bbd018423..345aa5116d19 100644
--- a/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts
+++ b/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts
@@ -108,7 +108,7 @@ test("Should drop metadata keys from docs with non-string metadata", async () =>
},
{
pageContent: "goodbye",
- metadata: { created_at: { time: (createdAt + 1).toString() } },
+ metadata: { created_at: (createdAt + 1).toString() },
},
]);
diff --git a/libs/langchain-google-common/.gitignore b/libs/langchain-google-common/.gitignore
index c10034e2f1be..df014a2d426b 100644
--- a/libs/langchain-google-common/.gitignore
+++ b/libs/langchain-google-common/.gitignore
@@ -2,6 +2,14 @@ index.cjs
index.js
index.d.ts
index.d.cts
+utils.cjs
+utils.js
+utils.d.ts
+utils.d.cts
+types.cjs
+types.js
+types.d.ts
+types.d.cts
node_modules
dist
.yarn
diff --git a/libs/langchain-google-common/README.md b/libs/langchain-google-common/README.md
index 7e914f1cee68..f0babe16e70a 100644
--- a/libs/langchain-google-common/README.md
+++ b/libs/langchain-google-common/README.md
@@ -27,14 +27,14 @@ file storage.
## Google services supported
* Gemini model through LLM and Chat classes (both through Google AI Studio and
- Google Cloud Vertex AI)
+ Google Cloud Vertex AI). Including:
+ * Function/Tool support
## TODO
Tasks and services still to be implemented:
-* Functions for Gemini
* PaLM Vertex AI support and backwards compatibility
* PaLM MakerSuite support and backwards compatibility
* Semantic Retrieval / AQA model
@@ -43,5 +43,10 @@ Tasks and services still to be implemented:
* Multimodal embeddings
* Vertex AI Search
* Vertex AI Model Garden
+ * Online prediction endpoints
+ * Gemma
+ * Google managed models
+ * Claude
+* AI Studio Tuned Models
* MakerSuite / Google Drive Hub
* Google Cloud Vector Store
\ No newline at end of file
diff --git a/libs/langchain-google-common/langchain.config.js b/libs/langchain-google-common/langchain.config.js
index 416001cb4772..df02f88bd793 100644
--- a/libs/langchain-google-common/langchain.config.js
+++ b/libs/langchain-google-common/langchain.config.js
@@ -14,6 +14,8 @@ export const config = {
internals: [/node\:/, /@langchain\/core\//],
entrypoints: {
index: "index",
+ utils: "utils/index",
+ types: "types",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
diff --git a/libs/langchain-google-common/package.json b/libs/langchain-google-common/package.json
index 6dcb864d4402..7f67f7cb52be 100644
--- a/libs/langchain-google-common/package.json
+++ b/libs/langchain-google-common/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/google-common",
- "version": "0.0.1",
+ "version": "0.0.2",
"description": "Core types and classes for Google services.",
"type": "module",
"engines": {
@@ -39,7 +39,8 @@
"author": "LangChain",
"license": "MIT",
"dependencies": {
- "@langchain/core": "~0.1.1"
+ "@langchain/core": "~0.1.1",
+ "zod-to-json-schema": "^3.22.4"
},
"devDependencies": {
"@jest/globals": "^29.5.0",
@@ -63,7 +64,8 @@
"release-it": "^15.10.1",
"rollup": "^4.5.2",
"ts-jest": "^29.1.0",
- "typescript": "<5.2.0"
+ "typescript": "<5.2.0",
+ "zod": "^3.22.4"
},
"publishConfig": {
"access": "public"
@@ -78,6 +80,24 @@
"import": "./index.js",
"require": "./index.cjs"
},
+ "./utils": {
+ "types": {
+ "import": "./utils.d.ts",
+ "require": "./utils.d.cts",
+ "default": "./utils.d.ts"
+ },
+ "import": "./utils.js",
+ "require": "./utils.cjs"
+ },
+ "./types": {
+ "types": {
+ "import": "./types.d.ts",
+ "require": "./types.d.cts",
+ "default": "./types.d.ts"
+ },
+ "import": "./types.js",
+ "require": "./types.cjs"
+ },
"./package.json": "./package.json"
},
"files": [
@@ -85,6 +105,14 @@
"index.cjs",
"index.js",
"index.d.ts",
- "index.d.cts"
+ "index.d.cts",
+ "utils.cjs",
+ "utils.js",
+ "utils.d.ts",
+ "utils.d.cts",
+ "types.cjs",
+ "types.js",
+ "types.d.ts",
+ "types.d.cts"
]
}
diff --git a/libs/langchain-google-common/src/chat_models.ts b/libs/langchain-google-common/src/chat_models.ts
index 109244aa9c88..e9f794929247 100644
--- a/libs/langchain-google-common/src/chat_models.ts
+++ b/libs/langchain-google-common/src/chat_models.ts
@@ -1,6 +1,5 @@
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { type BaseMessage } from "@langchain/core/messages";
-import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
@@ -9,6 +8,18 @@ import {
} from "@langchain/core/language_models/chat_models";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
import { AIMessageChunk } from "@langchain/core/messages";
+import {
+ BaseLanguageModelInput,
+ StructuredOutputMethodOptions,
+} from "@langchain/core/language_models/base";
+import type { z } from "zod";
+import {
+ Runnable,
+ RunnablePassthrough,
+ RunnableSequence,
+} from "@langchain/core/runnables";
+import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
+import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
import {
GoogleAIBaseLLMInput,
GoogleAIModelParams,
@@ -16,6 +27,8 @@ import {
GoogleConnectionParams,
GooglePlatformType,
GeminiContent,
+ GeminiTool,
+ GoogleAIBaseLanguageModelCallOptions,
} from "./types.js";
import {
copyAIModelParams,
@@ -35,7 +48,10 @@ import type {
GoogleBaseLLMInput,
GoogleAISafetyHandler,
GoogleAISafetyParams,
+ GeminiFunctionDeclaration,
+ GeminiFunctionSchema,
} from "./types.js";
+import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
class ChatConnection extends AbstractGoogleLLMConnection<
BaseMessage[],
@@ -64,7 +80,7 @@ export interface ChatGoogleBaseInput
* Integration with a chat model.
*/
export abstract class ChatGoogleBase
- extends BaseChatModel
+ extends BaseChatModel
implements ChatGoogleBaseInput
{
// Used for tracing, replace with the same name as your class
@@ -74,8 +90,11 @@ export abstract class ChatGoogleBase
lc_serializable = true;
+ /** @deprecated Prefer `modelName` */
model = "gemini-pro";
+ modelName = "gemini-pro";
+
temperature = 0.7;
maxOutputTokens = 1024;
@@ -161,7 +180,7 @@ export abstract class ChatGoogleBase
options: this["ParsedCallOptions"],
_runManager: CallbackManagerForLLMRun | undefined
): Promise {
- const parameters = copyAIModelParams(this);
+ const parameters = copyAIModelParams(this, options);
const response = await this.connection.request(
messages,
parameters,
@@ -173,15 +192,15 @@ export abstract class ChatGoogleBase
async *_streamResponseChunks(
_messages: BaseMessage[],
- _options: this["ParsedCallOptions"],
+ options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
): AsyncGenerator {
// Make the call as a streaming request
- const parameters = copyAIModelParams(this);
+ const parameters = copyAIModelParams(this, options);
const response = await this.streamedConnection.request(
_messages,
parameters,
- _options
+ options
);
// Get the streaming parser of the response
@@ -210,4 +229,142 @@ export abstract class ChatGoogleBase
_combineLLMOutput() {
return [];
}
+
+ withStructuredOutput<
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ RunOutput extends Record = Record
+ >(
+ outputSchema:
+ | z.ZodType
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ | Record,
+ config?: StructuredOutputMethodOptions
+ ): Runnable;
+
+ withStructuredOutput<
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ RunOutput extends Record = Record
+ >(
+ outputSchema:
+ | z.ZodType
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ | Record,
+ config?: StructuredOutputMethodOptions
+ ): Runnable;
+
+ withStructuredOutput<
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ RunOutput extends Record = Record
+ >(
+ outputSchema:
+ | z.ZodType
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ | Record,
+ config?: StructuredOutputMethodOptions
+ ):
+ | Runnable
+ | Runnable<
+ BaseLanguageModelInput,
+ { raw: BaseMessage; parsed: RunOutput }
+ > {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const schema: z.ZodType | Record = outputSchema;
+ const name = config?.name;
+ const method = config?.method;
+ const includeRaw = config?.includeRaw;
+ if (method === "jsonMode") {
+ throw new Error(`Google only supports "functionCalling" as a method.`);
+ }
+
+ let functionName = name ?? "extract";
+ let outputParser: BaseLLMOutputParser;
+ let tools: GeminiTool[];
+ if (isZodSchema(schema)) {
+ const jsonSchema = zodToGeminiParameters(schema);
+ tools = [
+ {
+ functionDeclarations: [
+ {
+ name: functionName,
+ description:
+ jsonSchema.description ?? "A function available to call.",
+ parameters: jsonSchema as GeminiFunctionSchema,
+ },
+ ],
+ },
+ ];
+ outputParser = new JsonOutputKeyToolsParser({
+ returnSingle: true,
+ keyName: functionName,
+ zodSchema: schema,
+ });
+ } else {
+ let geminiFunctionDefinition: GeminiFunctionDeclaration;
+ if (
+ typeof schema.name === "string" &&
+ typeof schema.parameters === "object" &&
+ schema.parameters != null
+ ) {
+ geminiFunctionDefinition = schema as GeminiFunctionDeclaration;
+ functionName = schema.name;
+ } else {
+ geminiFunctionDefinition = {
+ name: functionName,
+ description: schema.description ?? "",
+ parameters: schema as GeminiFunctionSchema,
+ };
+ }
+ tools = [
+ {
+ functionDeclarations: [geminiFunctionDefinition],
+ },
+ ];
+ outputParser = new JsonOutputKeyToolsParser({
+ returnSingle: true,
+ keyName: functionName,
+ });
+ }
+ const llm = this.bind({
+ tools,
+ });
+
+ if (!includeRaw) {
+ return llm.pipe(outputParser).withConfig({
+ runName: "ChatGoogleStructuredOutput",
+ }) as Runnable;
+ }
+
+ const parserAssign = RunnablePassthrough.assign({
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ parsed: (input: any, config) => outputParser.invoke(input.raw, config),
+ });
+ const parserNone = RunnablePassthrough.assign({
+ parsed: () => null,
+ });
+ const parsedWithFallback = parserAssign.withFallbacks({
+ fallbacks: [parserNone],
+ });
+ return RunnableSequence.from<
+ BaseLanguageModelInput,
+ { raw: BaseMessage; parsed: RunOutput }
+ >([
+ {
+ raw: llm,
+ },
+ parsedWithFallback,
+ ]).withConfig({
+ runName: "StructuredOutputRunnable",
+ });
+ }
+}
+
+function isZodSchema<
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ RunOutput extends Record = Record
+>(
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ input: z.ZodType | Record
+): input is z.ZodType {
+ // Check for a characteristic method of Zod schemas
+ return typeof (input as z.ZodType)?.parse === "function";
}
diff --git a/libs/langchain-google-common/src/connection.ts b/libs/langchain-google-common/src/connection.ts
index ca32dae5aba8..050dab3b204e 100644
--- a/libs/langchain-google-common/src/connection.ts
+++ b/libs/langchain-google-common/src/connection.ts
@@ -4,9 +4,9 @@ import {
AsyncCallerCallOptions,
} from "@langchain/core/utils/async_caller";
import { getRuntimeEnvironment } from "@langchain/core/utils/env";
+import { StructuredToolInterface } from "@langchain/core/tools";
import type {
GoogleAIBaseLLMInput,
- GoogleAIModelParams,
GoogleConnectionParams,
GoogleLLMModelFamily,
GooglePlatformType,
@@ -16,12 +16,16 @@ import type {
GeminiGenerationConfig,
GeminiRequest,
GeminiSafetySetting,
+ GeminiTool,
+ GeminiFunctionDeclaration,
+ GoogleAIModelRequestParams,
} from "./types.js";
import {
GoogleAbstractedClient,
GoogleAbstractedClientOps,
GoogleAbstractedClientOpsMethod,
} from "./auth.js";
+import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
export abstract class GoogleConnection<
CallOptions extends AsyncCallerCallOptions,
@@ -159,8 +163,11 @@ export abstract class GoogleAIConnection<
extends GoogleHostConnection
implements GoogleAIBaseLLMInput
{
+ /** @deprecated Prefer `modelName` */
model: string;
+ modelName: string;
+
client: GoogleAbstractedClient;
constructor(
@@ -171,11 +178,11 @@ export abstract class GoogleAIConnection<
) {
super(fields, caller, client, streaming);
this.client = client;
- this.model = fields?.model ?? this.model;
+ this.modelName = fields?.modelName ?? fields?.model ?? this.modelName;
}
get modelFamily(): GoogleLLMModelFamily {
- if (this.model.startsWith("gemini")) {
+ if (this.modelName.startsWith("gemini")) {
return "gemini";
} else {
return null;
@@ -194,14 +201,14 @@ export abstract class GoogleAIConnection<
async buildUrlGenerativeLanguage(): Promise {
const method = await this.buildUrlMethod();
- const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`;
+ const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.modelName}:${method}`;
return url;
}
async buildUrlVertex(): Promise {
const projectId = await this.client.getProjectId();
const method = await this.buildUrlMethod();
- const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`;
+ const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.modelName}:${method}`;
return url;
}
@@ -216,12 +223,12 @@ export abstract class GoogleAIConnection<
abstract formatData(
input: MessageType,
- parameters: GoogleAIModelParams
+ parameters: GoogleAIModelRequestParams
): unknown;
async request(
input: MessageType,
- parameters: GoogleAIModelParams,
+ parameters: GoogleAIModelRequestParams,
options: CallOptions
): Promise {
const data = this.formatData(input, parameters);
@@ -254,12 +261,12 @@ export abstract class AbstractGoogleLLMConnection<
abstract formatContents(
input: MessageType,
- parameters: GoogleAIModelParams
+ parameters: GoogleAIModelRequestParams
): GeminiContent[];
formatGenerationConfig(
_input: MessageType,
- parameters: GoogleAIModelParams
+ parameters: GoogleAIModelRequestParams
): GeminiGenerationConfig {
return {
temperature: parameters.temperature,
@@ -272,14 +279,61 @@ export abstract class AbstractGoogleLLMConnection<
formatSafetySettings(
_input: MessageType,
- parameters: GoogleAIModelParams
+ parameters: GoogleAIModelRequestParams
): GeminiSafetySetting[] {
return parameters.safetySettings ?? [];
}
+ // Borrowed from the OpenAI invocation params test
+ isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[] {
+ return (
+ tools !== undefined &&
+ tools.every((tool) =>
+ Array.isArray((tool as StructuredToolInterface).lc_namespace)
+ )
+ );
+ }
+
+ structuredToolToFunctionDeclaration(
+ tool: StructuredToolInterface
+ ): GeminiFunctionDeclaration {
+ const jsonSchema = zodToGeminiParameters(tool.schema);
+ return {
+ name: tool.name,
+ description: tool.description,
+ parameters: jsonSchema,
+ };
+ }
+
+ structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[] {
+ return [
+ {
+ functionDeclarations: tools.map(
+ this.structuredToolToFunctionDeclaration
+ ),
+ },
+ ];
+ }
+
+ formatTools(
+ _input: MessageType,
+ parameters: GoogleAIModelRequestParams
+ ): GeminiTool[] {
+ const tools = parameters?.tools;
+ if (!tools || tools.length === 0) {
+ return [];
+ }
+
+ if (this.isStructuredToolArray(tools)) {
+ return this.structuredToolsToGeminiTools(tools);
+ } else {
+ return tools as GeminiTool[];
+ }
+ }
+
formatData(
input: MessageType,
- parameters: GoogleAIModelParams
+ parameters: GoogleAIModelRequestParams
): GeminiRequest {
/*
const parts = messageContentToParts(input);
@@ -292,12 +346,16 @@ export abstract class AbstractGoogleLLMConnection<
*/
const contents = this.formatContents(input, parameters);
const generationConfig = this.formatGenerationConfig(input, parameters);
+ const tools = this.formatTools(input, parameters);
const safetySettings = this.formatSafetySettings(input, parameters);
const ret: GeminiRequest = {
contents,
generationConfig,
};
+ if (tools && tools.length) {
+ ret.tools = tools;
+ }
if (safetySettings && safetySettings.length) {
ret.safetySettings = safetySettings;
}
diff --git a/libs/langchain-google-common/src/index.ts b/libs/langchain-google-common/src/index.ts
index a238b9d241eb..3e4311e2b040 100644
--- a/libs/langchain-google-common/src/index.ts
+++ b/libs/langchain-google-common/src/index.ts
@@ -6,3 +6,4 @@ export * from "./connection.js";
export * from "./types.js";
export * from "./utils/stream.js";
export * from "./utils/common.js";
+export * from "./utils/zod_to_gemini_parameters.js";
diff --git a/libs/langchain-google-common/src/llms.ts b/libs/langchain-google-common/src/llms.ts
index 7ef0ba3f21a7..a64da4efc9a3 100644
--- a/libs/langchain-google-common/src/llms.ts
+++ b/libs/langchain-google-common/src/llms.ts
@@ -85,7 +85,7 @@ export abstract class GoogleBaseLLM
lc_serializable = true;
- model = "gemini-pro";
+ modelName = "gemini-pro";
temperature = 0.7;
@@ -182,7 +182,7 @@ export abstract class GoogleBaseLLM
prompt: string,
options: this["ParsedCallOptions"]
): Promise {
- const parameters = copyAIModelParams(this);
+ const parameters = copyAIModelParams(this, options);
const result = await this.connection.request(prompt, parameters, options);
const ret = safeResponseToString(result, this.safetyHandler);
return ret;
diff --git a/libs/langchain-google-common/src/tests/chat_models.test.ts b/libs/langchain-google-common/src/tests/chat_models.test.ts
index f0ef0ff444cd..1b6d83dcd1a8 100644
--- a/libs/langchain-google-common/src/tests/chat_models.test.ts
+++ b/libs/langchain-google-common/src/tests/chat_models.test.ts
@@ -8,10 +8,16 @@ import {
MessageContentComplex,
MessageContentText,
SystemMessage,
+ ToolMessage,
} from "@langchain/core/messages";
+import { StructuredToolInterface } from "@langchain/core/tools";
+import { FakeTool } from "@langchain/core/utils/testing";
+// eslint-disable-next-line import/no-extraneous-dependencies
+import { z } from "zod";
+
import { ChatGoogleBase, ChatGoogleBaseInput } from "../chat_models.js";
import { authOptions, MockClient, MockClientAuthInfo, mockId } from "./mock.js";
-import { GoogleAIBaseLLMInput } from "../types.js";
+import { GeminiTool, GoogleAIBaseLLMInput } from "../types.js";
import { GoogleAbstractedClient } from "../auth.js";
import { GoogleAISafetyError } from "../utils/safety.js";
@@ -76,7 +82,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- await model.call(messages);
+ await model.invoke(messages);
expect(record?.opts?.headers).toHaveProperty("User-Agent");
expect(record.opts.headers["User-Agent"]).toMatch(
@@ -132,7 +138,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
console.log("record", JSON.stringify(record, null, 1));
console.log("result", JSON.stringify(result, null, 1));
@@ -167,7 +173,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
console.log("record", JSON.stringify(record, null, 1));
console.log("result", JSON.stringify(result, null, 1));
@@ -202,7 +208,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
@@ -233,7 +239,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
expect(result._getType()).toEqual("ai");
const aiMessage = result as AIMessage;
@@ -269,7 +275,7 @@ describe("Mock ChatGoogle", () => {
new AIMessage("H"),
new HumanMessage("Flip it again"),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
console.log("record", JSON.stringify(record, null, 1));
console.log("result", JSON.stringify(result, null, 1));
@@ -309,7 +315,7 @@ describe("Mock ChatGoogle", () => {
];
let caught = false;
try {
- await model.call(messages);
+ await model.invoke(messages);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (xx: any) {
@@ -348,7 +354,7 @@ describe("Mock ChatGoogle", () => {
};
const model = new ChatGoogle({
authOptions,
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
@@ -366,7 +372,7 @@ describe("Mock ChatGoogle", () => {
new HumanMessageChunk({ content: message }),
];
- const result = await model.call(messages);
+ const result = await model.invoke(messages);
expect(record.opts).toHaveProperty("data");
expect(record.opts.data).toHaveProperty("contents");
@@ -385,4 +391,354 @@ describe("Mock ChatGoogle", () => {
"A blue square."
);
});
+
+ test("4. Functions Bind - Gemini format request", async () => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const record: Record = {};
+ const projectId = mockId();
+ const authOptions: MockClientAuthInfo = {
+ record,
+ projectId,
+ resultFile: "chat-4-mock.json",
+ };
+
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+
+ const baseModel = new ChatGoogle({
+ authOptions,
+ });
+ const model = baseModel.bind({
+ tools,
+ });
+
+ const result = await model.invoke("What?");
+
+ console.log(JSON.stringify(record, null, 1));
+
+ expect(result).toBeDefined();
+
+ const toolsResult = record?.opts?.data?.tools;
+ expect(toolsResult).toBeDefined();
+ expect(Array.isArray(toolsResult)).toBeTruthy();
+ expect(toolsResult).toHaveLength(1);
+
+ const toolResult = toolsResult[0];
+ expect(toolResult).toBeDefined();
+ expect(toolResult).toHaveProperty("functionDeclarations");
+ expect(Array.isArray(toolResult.functionDeclarations)).toBeTruthy();
+ expect(toolResult.functionDeclarations).toHaveLength(1);
+
+ const functionDeclaration = toolResult.functionDeclarations[0];
+ expect(functionDeclaration.name).toBe("test");
+ expect(functionDeclaration.description).toBe(
+ "Run a test with a specific name and get if it passed or failed"
+ );
+ expect(functionDeclaration.parameters).toBeDefined();
+ expect(typeof functionDeclaration.parameters).toBe("object");
+
+ const parameters = functionDeclaration?.parameters;
+ expect(parameters.type).toBe("object");
+ expect(parameters).toHaveProperty("properties");
+ expect(typeof parameters.properties).toBe("object");
+
+ expect(parameters.properties.testName).toBeDefined();
+ expect(typeof parameters.properties.testName).toBe("object");
+ expect(parameters.properties.testName.type).toBe("string");
+ expect(parameters.properties.testName.description).toBe(
+ "The name of the test that should be run."
+ );
+
+ expect(parameters.required).toBeDefined();
+ expect(Array.isArray(parameters.required)).toBeTruthy();
+ expect(parameters.required).toHaveLength(1);
+ expect(parameters.required[0]).toBe("testName");
+ });
+
+ test("4. Functions withStructuredOutput - Gemini format request", async () => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const record: Record = {};
+ const projectId = mockId();
+ const authOptions: MockClientAuthInfo = {
+ record,
+ projectId,
+ resultFile: "chat-4-mock.json",
+ };
+
+ const tool = {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ };
+
+ const baseModel = new ChatGoogle({
+ authOptions,
+ });
+ const model = baseModel.withStructuredOutput(tool);
+
+ await model.invoke("What?");
+
+ console.log(JSON.stringify(record, null, 1));
+
+ const toolsResult = record?.opts?.data?.tools;
+ expect(toolsResult).toBeDefined();
+ expect(Array.isArray(toolsResult)).toBeTruthy();
+ expect(toolsResult).toHaveLength(1);
+
+ const toolResult = toolsResult[0];
+ expect(toolResult).toBeDefined();
+ expect(toolResult).toHaveProperty("functionDeclarations");
+ expect(Array.isArray(toolResult.functionDeclarations)).toBeTruthy();
+ expect(toolResult.functionDeclarations).toHaveLength(1);
+
+ const functionDeclaration = toolResult.functionDeclarations[0];
+ expect(functionDeclaration.name).toBe("test");
+ expect(functionDeclaration.description).toBe(
+ "Run a test with a specific name and get if it passed or failed"
+ );
+ expect(functionDeclaration.parameters).toBeDefined();
+ expect(typeof functionDeclaration.parameters).toBe("object");
+
+ const parameters = functionDeclaration?.parameters;
+ expect(parameters.type).toBe("object");
+ expect(parameters).toHaveProperty("properties");
+ expect(typeof parameters.properties).toBe("object");
+
+ expect(parameters.properties.testName).toBeDefined();
+ expect(typeof parameters.properties.testName).toBe("object");
+ expect(parameters.properties.testName.type).toBe("string");
+ expect(parameters.properties.testName.description).toBe(
+ "The name of the test that should be run."
+ );
+
+ expect(parameters.required).toBeDefined();
+ expect(Array.isArray(parameters.required)).toBeTruthy();
+ expect(parameters.required).toHaveLength(1);
+ expect(parameters.required[0]).toBe("testName");
+ });
+
+ test("4. Functions - zod format request", async () => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const record: Record = {};
+ const projectId = mockId();
+ const authOptions: MockClientAuthInfo = {
+ record,
+ projectId,
+ resultFile: "chat-4-mock.json",
+ };
+
+ const zodSchema = z.object({
+ testName: z.string().describe("The name of the test that should be run."),
+ });
+ const tools: StructuredToolInterface[] = [
+ new FakeTool({
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ schema: zodSchema,
+ }),
+ ];
+
+ const model = new ChatGoogle({
+ authOptions,
+ }).bind({
+ tools,
+ });
+
+ const result = await model.invoke("What?");
+
+ const toolsResult = record?.opts?.data?.tools;
+ console.log("toolsResult", JSON.stringify(toolsResult, null, 1));
+ expect(toolsResult).toBeDefined();
+ expect(Array.isArray(toolsResult)).toBeTruthy();
+ expect(toolsResult).toHaveLength(1);
+
+ const toolResult = toolsResult[0];
+ expect(toolResult).toBeDefined();
+ expect(toolResult).toHaveProperty("functionDeclarations");
+ expect(Array.isArray(toolResult.functionDeclarations)).toBeTruthy();
+ expect(toolResult.functionDeclarations).toHaveLength(1);
+
+ const functionDeclaration = toolResult.functionDeclarations[0];
+ expect(functionDeclaration.name).toBe("test");
+ expect(functionDeclaration.description).toBe(
+ "Run a test with a specific name and get if it passed or failed"
+ );
+ expect(functionDeclaration.parameters).toBeDefined();
+ expect(typeof functionDeclaration.parameters).toBe("object");
+
+ const parameters = functionDeclaration?.parameters;
+ expect(parameters.type).toBe("object");
+ expect(parameters).toHaveProperty("properties");
+ expect(parameters).not.toHaveProperty("additionalProperties");
+ expect(parameters).not.toHaveProperty("$schema");
+ expect(typeof parameters.properties).toBe("object");
+
+ expect(parameters.properties.testName).toBeDefined();
+ expect(typeof parameters.properties.testName).toBe("object");
+ expect(parameters.properties.testName.type).toBe("string");
+ expect(parameters.properties.testName.description).toBe(
+ "The name of the test that should be run."
+ );
+
+ expect(parameters.required).toBeDefined();
+ expect(Array.isArray(parameters.required)).toBeTruthy();
+ expect(parameters.required).toHaveLength(1);
+ expect(parameters.required[0]).toBe("testName");
+
+ console.log(result);
+ });
+
+ test("4. Functions - results", async () => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const record: Record = {};
+ const projectId = mockId();
+ const authOptions: MockClientAuthInfo = {
+ record,
+ projectId,
+ resultFile: "chat-4-mock.json",
+ };
+
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+
+ const model = new ChatGoogle({
+ authOptions,
+ }).bind({
+ tools,
+ });
+
+ const result = await model.invoke("What?");
+
+ console.log(JSON.stringify(result, null, 1));
+ expect(result).toHaveProperty("content");
+ expect(Array.isArray(result.content)).toBeTruthy();
+ expect(result.content).toHaveLength(0);
+ const args = result?.lc_kwargs?.additional_kwargs;
+ expect(args).toBeDefined();
+ expect(args).toHaveProperty("tool_calls");
+ expect(Array.isArray(args.tool_calls)).toBeTruthy();
+ expect(args.tool_calls).toHaveLength(1);
+ const call = args.tool_calls[0];
+ expect(call).toHaveProperty("type");
+ expect(call.type).toBe("function");
+ expect(call).toHaveProperty("function");
+ const func = call.function;
+ expect(func).toBeDefined();
+ expect(func).toHaveProperty("name");
+ expect(func.name).toBe("test");
+ expect(func).toHaveProperty("arguments");
+ expect(typeof func.arguments).toBe("string");
+ expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}');
+ });
+
+ test("5. Functions - function reply", async () => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const record: Record = {};
+ const projectId = mockId();
+ const authOptions: MockClientAuthInfo = {
+ record,
+ projectId,
+ resultFile: "chat-5-mock.json",
+ };
+
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+
+ const model = new ChatGoogle({
+ authOptions,
+ }).bind({
+ tools,
+ });
+ const toolResult = {
+ testPassed: true,
+ };
+ const messages: BaseMessageLike[] = [
+ new HumanMessage("Run a test on the cobalt project."),
+ new AIMessage("", {
+ tool_calls: [
+ {
+ id: "test",
+ type: "function",
+ function: {
+ name: "test",
+ arguments: '{"testName":"cobalt"}',
+ },
+ },
+ ],
+ }),
+ new ToolMessage(JSON.stringify(toolResult), "test"),
+ ];
+ const result = await model.invoke(messages);
+ expect(result).toBeDefined();
+
+ console.log(JSON.stringify(record?.opts?.data, null, 1));
+ });
});
diff --git a/libs/langchain-google-common/src/tests/data/chat-4-mock.json b/libs/langchain-google-common/src/tests/data/chat-4-mock.json
new file mode 100644
index 000000000000..e91458593517
--- /dev/null
+++ b/libs/langchain-google-common/src/tests/data/chat-4-mock.json
@@ -0,0 +1,59 @@
+{
+ "candidates": [
+ {
+ "content": {
+ "parts": [
+ {
+ "functionCall": {
+ "name": "test",
+ "args": {
+ "testName": "cobalt"
+ }
+ }
+ }
+ ],
+ "role": "model"
+ },
+ "finishReason": "STOP",
+ "index": 0,
+ "safetyRatings": [
+ {
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HATE_SPEECH",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HARASSMENT",
+ "probability": "NEGLIGIBLE"
+ }
+ ]
+ }
+ ],
+ "promptFeedback": {
+ "safetyRatings": [
+ {
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HATE_SPEECH",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HARASSMENT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
+ "probability": "NEGLIGIBLE"
+ }
+ ]
+ }
+}
diff --git a/libs/langchain-google-common/src/tests/data/chat-5-mock.json b/libs/langchain-google-common/src/tests/data/chat-5-mock.json
new file mode 100644
index 000000000000..7c01e747e766
--- /dev/null
+++ b/libs/langchain-google-common/src/tests/data/chat-5-mock.json
@@ -0,0 +1,54 @@
+{
+ "candidates": [
+ {
+ "content": {
+ "parts": [
+ {
+ "text": "The cobalt model passed."
+ }
+ ],
+ "role": "model"
+ },
+ "finishReason": "STOP",
+ "index": 0,
+ "safetyRatings": [
+ {
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HATE_SPEECH",
+ "probability": "LOW"
+ },
+ {
+ "category": "HARM_CATEGORY_HARASSMENT",
+ "probability": "MEDIUM"
+ },
+ {
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
+ "probability": "NEGLIGIBLE"
+ }
+ ]
+ }
+ ],
+ "promptFeedback": {
+ "safetyRatings": [
+ {
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HATE_SPEECH",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_HARASSMENT",
+ "probability": "NEGLIGIBLE"
+ },
+ {
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
+ "probability": "NEGLIGIBLE"
+ }
+ ]
+ }
+}
diff --git a/libs/langchain-google-common/src/tests/llms.test.ts b/libs/langchain-google-common/src/tests/llms.test.ts
index 6b72dcd1a207..092380eb8654 100644
--- a/libs/langchain-google-common/src/tests/llms.test.ts
+++ b/libs/langchain-google-common/src/tests/llms.test.ts
@@ -387,7 +387,7 @@ describe("Mock Google LLM", () => {
const model = new GoogleLLM({
authOptions,
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
@@ -439,7 +439,7 @@ describe("Mock Google LLM", () => {
const model = new GoogleLLM({
authOptions,
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
@@ -491,7 +491,7 @@ describe("Mock Google LLM", () => {
};
const model = new GoogleLLM({
authOptions,
- model: "gemini-pro-image",
+ modelName: "gemini-pro-image",
});
const message: MessageContentComplex[] = [
diff --git a/libs/langchain-google-common/src/tests/utils.test.ts b/libs/langchain-google-common/src/tests/utils.test.ts
new file mode 100644
index 000000000000..1085ba70ef27
--- /dev/null
+++ b/libs/langchain-google-common/src/tests/utils.test.ts
@@ -0,0 +1,36 @@
+import { expect, test } from "@jest/globals";
+import { z } from "zod";
+import { zodToGeminiParameters } from "../utils/zod_to_gemini_parameters.js";
+
+test("zodToGeminiParameters can convert zod schema to gemini schema", () => {
+ const zodSchema = z
+ .object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The type of operation to execute"),
+ number1: z.number().describe("The first number to operate on."),
+ number2: z.number().describe("The second number to operate on."),
+ })
+ .describe("A simple calculator tool");
+
+ const convertedSchema = zodToGeminiParameters(zodSchema);
+
+ expect(convertedSchema.type).toBe("object");
+ expect(convertedSchema.description).toBe("A simple calculator tool");
+ expect(convertedSchema.properties).toEqual({
+ operation: {
+ type: "string",
+ enum: ["add", "subtract", "multiply", "divide"],
+ description: "The type of operation to execute",
+ },
+ number1: {
+ type: "number",
+ description: "The first number to operate on.",
+ },
+ number2: {
+ type: "number",
+ description: "The second number to operate on.",
+ },
+ });
+ expect(convertedSchema.required).toEqual(["operation", "number1", "number2"]);
+});
diff --git a/libs/langchain-google-common/src/types.ts b/libs/langchain-google-common/src/types.ts
index 2359fb89a0ac..f004bb153725 100644
--- a/libs/langchain-google-common/src/types.ts
+++ b/libs/langchain-google-common/src/types.ts
@@ -1,4 +1,6 @@
import type { BaseLLMParams } from "@langchain/core/language_models/llms";
+import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
+import { StructuredToolInterface } from "@langchain/core/tools";
import type { JsonStream } from "./utils/stream.js";
/**
@@ -46,8 +48,10 @@ export interface GoogleAISafetySetting {
}
export interface GoogleAIModelParams {
- /** Model to use */
+ /** @deprecated Prefer `modelName` */
model?: string;
+ /** Model to use */
+ modelName?: string;
/** Sampling temperature to use */
temperature?: number;
@@ -84,12 +88,24 @@ export interface GoogleAIModelParams {
safetySettings?: GoogleAISafetySetting[];
}
+/**
+ * The params which can be passed to the API at request time.
+ */
+export interface GoogleAIModelRequestParams extends GoogleAIModelParams {
+ tools?: StructuredToolInterface[] | GeminiTool[];
+}
+
export interface GoogleAIBaseLLMInput
extends BaseLLMParams,
GoogleConnectionParams,
GoogleAIModelParams,
GoogleAISafetyParams {}
+export interface GoogleAIBaseLanguageModelCallOptions
+ extends BaseLanguageModelCallOptions,
+ GoogleAIModelRequestParams,
+ GoogleAISafetyParams {}
+
/**
* Input to LLM class.
*/
@@ -153,7 +169,7 @@ export interface GeminiSafetyRating {
probability: string;
}
-export type GeminiRole = "user" | "model";
+export type GeminiRole = "user" | "model" | "function";
// Vertex AI requires the role
@@ -163,9 +179,34 @@ export interface GeminiContent {
}
export interface GeminiTool {
- // TODO: Implement
+ functionDeclarations?: GeminiFunctionDeclaration[];
+}
+
+export interface GeminiFunctionDeclaration {
+ name: string;
+ description: string;
+ parameters?: GeminiFunctionSchema;
}
+export interface GeminiFunctionSchema {
+ type: GeminiFunctionSchemaType;
+ format?: string;
+ description?: string;
+ nullable?: boolean;
+ enum?: string[];
+ properties?: Record;
+ required?: string[];
+ items?: GeminiFunctionSchema;
+}
+
+export type GeminiFunctionSchemaType =
+ | "string"
+ | "number"
+ | "integer"
+ | "boolean"
+ | "array"
+ | "object";
+
export interface GeminiGenerationConfig {
stopSequences?: string[];
candidateCount?: number;
diff --git a/libs/langchain-google-common/src/utils/common.ts b/libs/langchain-google-common/src/utils/common.ts
index 09e9e21f4895..634cf5383ac9 100644
--- a/libs/langchain-google-common/src/utils/common.ts
+++ b/libs/langchain-google-common/src/utils/common.ts
@@ -1,26 +1,40 @@
-import type { GoogleAIModelParams, GoogleLLMModelFamily } from "../types.js";
+import type {
+ GoogleAIBaseLanguageModelCallOptions,
+ GoogleAIModelParams,
+ GoogleAIModelRequestParams,
+ GoogleLLMModelFamily,
+} from "../types.js";
import { isModelGemini, validateGeminiParams } from "./gemini.js";
export function copyAIModelParams(
- params: GoogleAIModelParams | undefined
-): GoogleAIModelParams {
- return copyAIModelParamsInto(params, {});
+ params: GoogleAIModelParams | undefined,
+ options: GoogleAIBaseLanguageModelCallOptions | undefined
+): GoogleAIModelRequestParams {
+ return copyAIModelParamsInto(params, options, {});
}
export function copyAIModelParamsInto(
params: GoogleAIModelParams | undefined,
+ options: GoogleAIBaseLanguageModelCallOptions | undefined,
target: GoogleAIModelParams
-): GoogleAIModelParams {
- const ret: GoogleAIModelParams = target || {};
+): GoogleAIModelRequestParams {
+ const ret: GoogleAIModelRequestParams = target || {};
- ret.model = params?.model ?? target.model;
+ ret.modelName = options?.modelName ?? params?.modelName ?? target.modelName;
+ ret.temperature =
+ options?.temperature ?? params?.temperature ?? target.temperature;
+ ret.maxOutputTokens =
+ options?.maxOutputTokens ??
+ params?.maxOutputTokens ??
+ target.maxOutputTokens;
+ ret.topP = options?.topP ?? params?.topP ?? target.topP;
+ ret.topK = options?.topK ?? params?.topK ?? target.topK;
+ ret.stopSequences =
+ options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
+ ret.safetySettings =
+ options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
- ret.temperature = params?.temperature ?? target.temperature;
- ret.maxOutputTokens = params?.maxOutputTokens ?? target.maxOutputTokens;
- ret.topP = params?.topP ?? target.topP;
- ret.topK = params?.topK ?? target.topK;
- ret.stopSequences = params?.stopSequences ?? target.stopSequences;
- ret.safetySettings = params?.safetySettings ?? target.safetySettings;
+ ret.tools = options?.tools;
return ret;
}
@@ -41,7 +55,7 @@ export function validateModelParams(
params: GoogleAIModelParams | undefined
): void {
const testParams: GoogleAIModelParams = params ?? {};
- switch (modelToFamily(testParams.model)) {
+ switch (modelToFamily(testParams.modelName)) {
case "gemini":
return validateGeminiParams(testParams);
default:
@@ -55,7 +69,7 @@ export function copyAndValidateModelParamsInto(
params: GoogleAIModelParams | undefined,
target: GoogleAIModelParams
): GoogleAIModelParams {
- copyAIModelParamsInto(params, target);
+ copyAIModelParamsInto(params, undefined, target);
validateModelParams(target);
return target;
}
diff --git a/libs/langchain-google-common/src/utils/gemini.ts b/libs/langchain-google-common/src/utils/gemini.ts
index 75594d7818ca..cfce2eed2923 100644
--- a/libs/langchain-google-common/src/utils/gemini.ts
+++ b/libs/langchain-google-common/src/utils/gemini.ts
@@ -3,11 +3,13 @@ import {
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
+ BaseMessageFields,
MessageContent,
MessageContentComplex,
MessageContentImageUrl,
MessageContentText,
SystemMessage,
+ ToolMessage,
} from "@langchain/core/messages";
import {
ChatGeneration,
@@ -26,13 +28,20 @@ import type {
GeminiContent,
GenerateContentResponseData,
GoogleAISafetyHandler,
+ GeminiPartFunctionCall,
} from "../types.js";
import { GoogleAISafetyError } from "./safety.js";
-function messageContentText(content: MessageContentText): GeminiPartText {
- return {
- text: content.text,
- };
+function messageContentText(
+ content: MessageContentText
+): GeminiPartText | null {
+ if (content?.text && content?.text.length > 0) {
+ return {
+ text: content.text,
+ };
+ } else {
+ return null;
+ }
}
function messageContentImageUrl(
@@ -78,27 +87,73 @@ export function messageContentToParts(content: MessageContent): GeminiPart[] {
: content;
// eslint-disable-next-line array-callback-return
- const parts: GeminiPart[] = messageContent.map((content) => {
- // eslint-disable-next-line default-case
- switch (content.type) {
- case "text":
- return messageContentText(content);
- case "image_url":
- return messageContentImageUrl(content);
+ const parts: GeminiPart[] = messageContent
+ .map((content) => {
+ switch (content.type) {
+ case "text":
+ return messageContentText(content);
+ case "image_url":
+ return messageContentImageUrl(content);
+ default:
+ throw new Error(
+ `Unsupported type received while converting message to message parts`
+ );
+ }
+ })
+ .reduce((acc: GeminiPart[], val: GeminiPart | null | undefined) => {
+ if (val) {
+ return [...acc, val];
+ } else {
+ return acc;
+ }
+ }, []);
+
+ return parts;
+}
+
+function messageToolCallsToParts(toolCalls: ToolCall[]): GeminiPart[] {
+ if (!toolCalls || toolCalls.length === 0) {
+ return [];
+ }
+
+ return toolCalls.map((tool: ToolCall) => {
+ let args = {};
+ if (tool?.function?.arguments) {
+ const argStr = tool.function.arguments;
+ args = JSON.parse(argStr);
}
+ return {
+ functionCall: {
+ name: tool.function.name,
+ args,
+ },
+ };
});
+}
- return parts;
+function messageKwargsToParts(kwargs: Record): GeminiPart[] {
+ const ret: GeminiPart[] = [];
+
+ if (kwargs?.tool_calls) {
+ ret.push(...messageToolCallsToParts(kwargs.tool_calls as ToolCall[]));
+ }
+
+ return ret;
}
function roleMessageToContent(
role: GeminiRole,
message: BaseMessage
): GeminiContent[] {
+ const contentParts: GeminiPart[] = messageContentToParts(message.content);
+ const toolParts: GeminiPart[] = messageKwargsToParts(
+ message.additional_kwargs
+ );
+ const parts: GeminiPart[] = [...contentParts, ...toolParts];
return [
{
role,
- parts: messageContentToParts(message.content),
+ parts,
},
];
}
@@ -110,6 +165,36 @@ function systemMessageToContent(message: SystemMessage): GeminiContent[] {
];
}
+function toolMessageToContent(message: ToolMessage): GeminiContent[] {
+ const contentStr =
+ typeof message.content === "string"
+ ? message.content
+ : message.content.reduce(
+ (acc: string, content: MessageContentComplex) => {
+ if (content.type === "text") {
+ return acc + content.text;
+ } else {
+ return acc;
+ }
+ },
+ ""
+ );
+ const content = JSON.parse(contentStr);
+ return [
+ {
+ role: "function",
+ parts: [
+ {
+ functionResponse: {
+ name: message.tool_call_id,
+ response: content,
+ },
+ },
+ ],
+ },
+ ];
+}
+
export function baseMessageToContent(message: BaseMessage): GeminiContent[] {
const type = message._getType();
switch (type) {
@@ -119,6 +204,8 @@ export function baseMessageToContent(message: BaseMessage): GeminiContent[] {
return roleMessageToContent("user", message);
case "ai":
return roleMessageToContent("model", message);
+ case "tool":
+ return toolMessageToContent(message as ToolMessage);
default:
console.log(`Unsupported message type: ${type}`);
return [];
@@ -173,6 +260,73 @@ export function partsToMessageContent(parts: GeminiPart[]): MessageContent {
}, [] as MessageContentComplex[]);
}
+interface FunctionCall {
+ name: string;
+ arguments: string;
+}
+
+interface ToolCall {
+ id: string;
+ type: "function";
+ function: FunctionCall;
+}
+
+interface FunctionCallRaw {
+ name: string;
+ arguments: object;
+}
+
+interface ToolCallRaw {
+ id: string;
+ type: "function";
+ function: FunctionCallRaw;
+}
+
+function toolRawToTool(raw: ToolCallRaw): ToolCall {
+ return {
+ id: raw.id,
+ type: raw.type,
+ function: {
+ name: raw.function.name,
+ arguments: JSON.stringify(raw.function.arguments),
+ },
+ };
+}
+
+function functionCallPartToToolRaw(part: GeminiPartFunctionCall): ToolCallRaw {
+ return {
+ id: part?.functionCall?.name ?? "",
+ type: "function",
+ function: {
+ name: part.functionCall.name,
+ arguments: part.functionCall.args ?? {},
+ },
+ };
+}
+
+export function partsToToolsRaw(parts: GeminiPart[]): ToolCallRaw[] {
+ return parts
+ .map((part: GeminiPart) => {
+ if (part === undefined || part === null) {
+ return null;
+ } else if ("functionCall" in part) {
+ return functionCallPartToToolRaw(part);
+ } else {
+ return null;
+ }
+ })
+ .reduce((acc, content) => {
+ if (content) {
+ acc.push(content);
+ }
+ return acc;
+ }, [] as ToolCallRaw[]);
+}
+
+export function toolsRawToTools(raws: ToolCallRaw[]): ToolCall[] {
+ return raws.map((raw) => toolRawToTool(raw));
+}
+
export function responseToGenerateContentResponseData(
response: GoogleLLMResponse
): GenerateContentResponseData {
@@ -290,8 +444,8 @@ export function chunkToString(chunk: BaseMessageChunk): string {
}
export function partToMessage(part: GeminiPart): BaseMessageChunk {
- const content = partsToMessageContent([part]);
- return new AIMessageChunk({ content });
+ const fields = partsToBaseMessageFields([part]);
+ return new AIMessageChunk(fields);
}
export function partToChatGeneration(part: GeminiPart): ChatGeneration {
@@ -311,19 +465,35 @@ export function responseToChatGenerations(
return ret;
}
-export function responseToMessageContent(
+export function responseToBaseMessageFields(
response: GoogleLLMResponse
-): MessageContent {
+): BaseMessageFields {
const parts = responseToParts(response);
- return partsToMessageContent(parts);
+ return partsToBaseMessageFields(parts);
+}
+
+export function partsToBaseMessageFields(
+ parts: GeminiPart[]
+): BaseMessageFields {
+ const fields: BaseMessageFields = {
+ content: partsToMessageContent(parts),
+ };
+
+ const rawTools = partsToToolsRaw(parts);
+ if (rawTools.length > 0) {
+ const tools = toolsRawToTools(rawTools);
+ fields.additional_kwargs = {
+ tool_calls: tools,
+ };
+ }
+ return fields;
}
export function responseToBaseMessage(
response: GoogleLLMResponse
): BaseMessage {
- return new AIMessage({
- content: responseToMessageContent(response),
- });
+ const fields = responseToBaseMessageFields(response);
+ return new AIMessage(fields);
}
export function safeResponseToBaseMessage(
diff --git a/libs/langchain-google-common/src/utils/index.ts b/libs/langchain-google-common/src/utils/index.ts
new file mode 100644
index 000000000000..3aa0e7dbbf21
--- /dev/null
+++ b/libs/langchain-google-common/src/utils/index.ts
@@ -0,0 +1,7 @@
+export * from "./common.js";
+export * from "./failed_handler.js";
+export * from "./gemini.js";
+export * from "./zod_to_gemini_parameters.js";
+export * from "./palm.js";
+export * from "./safety.js";
+export * from "./stream.js";
diff --git a/libs/langchain-google-common/src/utils/zod_to_gemini_parameters.ts b/libs/langchain-google-common/src/utils/zod_to_gemini_parameters.ts
new file mode 100644
index 000000000000..666f359719fa
--- /dev/null
+++ b/libs/langchain-google-common/src/utils/zod_to_gemini_parameters.ts
@@ -0,0 +1,16 @@
+import type { z } from "zod";
+import { zodToJsonSchema } from "zod-to-json-schema";
+import { GeminiFunctionSchema } from "../types.js";
+
+export function zodToGeminiParameters(
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ zodObj: z.ZodType
+): GeminiFunctionSchema {
+ // Gemini doesn't accept either the $schema or additionalProperties
+ // attributes, so we need to explicitly remove them.
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const jsonSchema = zodToJsonSchema(zodObj) as any;
+ const { $schema, additionalProperties, ...rest } = jsonSchema;
+
+ return rest;
+}
diff --git a/libs/langchain-google-gauth/.gitignore b/libs/langchain-google-gauth/.gitignore
index c10034e2f1be..df014a2d426b 100644
--- a/libs/langchain-google-gauth/.gitignore
+++ b/libs/langchain-google-gauth/.gitignore
@@ -2,6 +2,14 @@ index.cjs
index.js
index.d.ts
index.d.cts
+utils.cjs
+utils.js
+utils.d.ts
+utils.d.cts
+types.cjs
+types.js
+types.d.ts
+types.d.cts
node_modules
dist
.yarn
diff --git a/libs/langchain-google-gauth/langchain.config.js b/libs/langchain-google-gauth/langchain.config.js
index 416001cb4772..d277fdea7707 100644
--- a/libs/langchain-google-gauth/langchain.config.js
+++ b/libs/langchain-google-gauth/langchain.config.js
@@ -14,6 +14,8 @@ export const config = {
internals: [/node\:/, /@langchain\/core\//],
entrypoints: {
index: "index",
+ utils: "utils",
+ types: "types",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
diff --git a/libs/langchain-google-gauth/package.json b/libs/langchain-google-gauth/package.json
index ce9541d80fba..d8721e990a26 100644
--- a/libs/langchain-google-gauth/package.json
+++ b/libs/langchain-google-gauth/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/google-gauth",
- "version": "0.0.0",
+ "version": "0.0.1",
"description": "Google auth based authentication support for Google services",
"type": "module",
"engines": {
@@ -40,7 +40,7 @@
"license": "MIT",
"dependencies": {
"@langchain/core": "~0.1.1",
- "@langchain/google-common": "~0.0.0",
+ "@langchain/google-common": "~0.0.2",
"google-auth-library": "^8.9.0"
},
"devDependencies": {
@@ -80,6 +80,24 @@
"import": "./index.js",
"require": "./index.cjs"
},
+ "./utils": {
+ "types": {
+ "import": "./utils.d.ts",
+ "require": "./utils.d.cts",
+ "default": "./utils.d.ts"
+ },
+ "import": "./utils.js",
+ "require": "./utils.cjs"
+ },
+ "./types": {
+ "types": {
+ "import": "./types.d.ts",
+ "require": "./types.d.cts",
+ "default": "./types.d.ts"
+ },
+ "import": "./types.js",
+ "require": "./types.cjs"
+ },
"./package.json": "./package.json"
},
"files": [
@@ -87,6 +105,14 @@
"index.cjs",
"index.js",
"index.d.ts",
- "index.d.cts"
+ "index.d.cts",
+ "utils.cjs",
+ "utils.js",
+ "utils.d.ts",
+ "utils.d.cts",
+ "types.cjs",
+ "types.js",
+ "types.d.ts",
+ "types.d.cts"
]
}
diff --git a/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts b/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts
index e55b4ec600ea..bb02b5f98286 100644
--- a/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts
+++ b/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts
@@ -1,4 +1,4 @@
-import { test } from "@jest/globals";
+import { expect, test } from "@jest/globals";
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { ChatPromptValue } from "@langchain/core/prompt_values";
import {
@@ -6,15 +6,18 @@ import {
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
+ BaseMessageLike,
HumanMessage,
MessageContentComplex,
MessageContentText,
SystemMessage,
+ ToolMessage,
} from "@langchain/core/messages";
+import { GeminiTool } from "@langchain/google-common";
import { ChatGoogle } from "../chat_models.js";
import { GoogleLLM } from "../llms.js";
-describe("GAuth Chat", () => {
+describe.skip("GAuth Chat", () => {
test("platform", async () => {
const model = new GoogleLLM();
expect(model.platform).toEqual("gcp");
@@ -109,4 +112,120 @@ describe("GAuth Chat", () => {
throw e;
}
});
+
+ test("function", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle().bind({ tools });
+ const result = await model.invoke("Run a test on the cobalt project");
+ expect(result).toHaveProperty("content");
+ expect(Array.isArray(result.content)).toBeTruthy();
+ expect(result.content).toHaveLength(0);
+ const args = result?.lc_kwargs?.additional_kwargs;
+ expect(args).toBeDefined();
+ expect(args).toHaveProperty("tool_calls");
+ expect(Array.isArray(args.tool_calls)).toBeTruthy();
+ expect(args.tool_calls).toHaveLength(1);
+ const call = args.tool_calls[0];
+ expect(call).toHaveProperty("type");
+ expect(call.type).toBe("function");
+ expect(call).toHaveProperty("function");
+ const func = call.function;
+ expect(func).toBeDefined();
+ expect(func).toHaveProperty("name");
+ expect(func.name).toBe("test");
+ expect(func).toHaveProperty("arguments");
+ expect(typeof func.arguments).toBe("string");
+ expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}');
+ });
+
+ test("function reply", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle().bind({ tools });
+ const toolResult = {
+ testPassed: true,
+ };
+ const messages: BaseMessageLike[] = [
+ new HumanMessage("Run a test on the cobalt project."),
+ new AIMessage("", {
+ tool_calls: [
+ {
+ id: "test",
+ type: "function",
+ function: {
+ name: "test",
+ arguments: '{"testName":"cobalt"}',
+ },
+ },
+ ],
+ }),
+ new ToolMessage(JSON.stringify(toolResult), "test"),
+ ];
+ const res = await model.stream(messages);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ console.log(JSON.stringify(resArray, null, 2));
+ });
+
+ test("withStructuredOutput", async () => {
+ const tool = {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ };
+ const model = new ChatGoogle().withStructuredOutput(tool);
+ const result = await model.invoke("Run a test on the cobalt project");
+ expect(result).toHaveProperty("testName");
+ });
});
diff --git a/libs/langchain-google-gauth/src/tests/llms.int.test.ts b/libs/langchain-google-gauth/src/tests/llms.int.test.ts
index 9642fd03f178..0933d936e00c 100644
--- a/libs/langchain-google-gauth/src/tests/llms.int.test.ts
+++ b/libs/langchain-google-gauth/src/tests/llms.int.test.ts
@@ -13,7 +13,7 @@ const imgData = {
"iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=",
};
-describe("GAuth LLM", () => {
+describe.skip("GAuth LLM", () => {
test("platform", async () => {
const model = new GoogleLLM();
expect(model.platform).toEqual("gcp");
@@ -59,7 +59,7 @@ describe("GAuth LLM", () => {
test("predictMessage image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -84,7 +84,7 @@ describe("GAuth LLM", () => {
test("invoke image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -108,7 +108,7 @@ describe("GAuth LLM", () => {
});
});
-describe("GAuth LLM gai", () => {
+describe.skip("GAuth LLM gai", () => {
test("platform", async () => {
const model = new GoogleLLM({
platformType: "gai",
@@ -185,7 +185,7 @@ describe("GAuth LLM gai", () => {
test("predictMessage image", async () => {
const model = new GoogleLLM({
platformType: "gai",
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -211,7 +211,7 @@ describe("GAuth LLM gai", () => {
test("invoke image", async () => {
const model = new GoogleLLM({
platformType: "gai",
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
diff --git a/libs/langchain-google-gauth/src/types.ts b/libs/langchain-google-gauth/src/types.ts
new file mode 100644
index 000000000000..01116e7f338e
--- /dev/null
+++ b/libs/langchain-google-gauth/src/types.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-common/types";
diff --git a/libs/langchain-google-gauth/src/utils.ts b/libs/langchain-google-gauth/src/utils.ts
new file mode 100644
index 000000000000..f21efb45914c
--- /dev/null
+++ b/libs/langchain-google-gauth/src/utils.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-common/utils";
diff --git a/libs/langchain-google-vertexai-web/.eslintrc.cjs b/libs/langchain-google-vertexai-web/.eslintrc.cjs
new file mode 100644
index 000000000000..344f8a9d6cd9
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/.eslintrc.cjs
@@ -0,0 +1,66 @@
+module.exports = {
+ extends: [
+ "airbnb-base",
+ "eslint:recommended",
+ "prettier",
+ "plugin:@typescript-eslint/recommended",
+ ],
+ parserOptions: {
+ ecmaVersion: 12,
+ parser: "@typescript-eslint/parser",
+ project: "./tsconfig.json",
+ sourceType: "module",
+ },
+ plugins: ["@typescript-eslint", "no-instanceof"],
+ ignorePatterns: [
+ ".eslintrc.cjs",
+ "scripts",
+ "node_modules",
+ "dist",
+ "dist-cjs",
+ "*.js",
+ "*.cjs",
+ "*.d.ts",
+ ],
+ rules: {
+ "no-process-env": 2,
+ "no-instanceof/no-instanceof": 2,
+ "@typescript-eslint/explicit-module-boundary-types": 0,
+ "@typescript-eslint/no-empty-function": 0,
+ "@typescript-eslint/no-shadow": 0,
+ "@typescript-eslint/no-empty-interface": 0,
+ "@typescript-eslint/no-use-before-define": ["error", "nofunc"],
+ "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
+ "@typescript-eslint/no-floating-promises": "error",
+ "@typescript-eslint/no-misused-promises": "error",
+ camelcase: 0,
+ "class-methods-use-this": 0,
+ "import/extensions": [2, "ignorePackages"],
+ "import/no-extraneous-dependencies": [
+ "error",
+ { devDependencies: ["**/*.test.ts"] },
+ ],
+ "import/no-unresolved": 0,
+ "import/prefer-default-export": 0,
+ "keyword-spacing": "error",
+ "max-classes-per-file": 0,
+ "max-len": 0,
+ "no-await-in-loop": 0,
+ "no-bitwise": 0,
+ "no-console": 0,
+ "no-restricted-syntax": 0,
+ "no-shadow": 0,
+ "no-continue": 0,
+ "no-void": 0,
+ "no-underscore-dangle": 0,
+ "no-use-before-define": 0,
+ "no-useless-constructor": 0,
+ "no-return-await": 0,
+ "consistent-return": 0,
+ "no-else-return": 0,
+ "func-names": 0,
+ "no-lonely-if": 0,
+ "prefer-rest-params": 0,
+ "new-cap": ["error", { properties: false, capIsNew: false }],
+ },
+};
diff --git a/libs/langchain-google-vertexai-web/.gitignore b/libs/langchain-google-vertexai-web/.gitignore
new file mode 100644
index 000000000000..df014a2d426b
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/.gitignore
@@ -0,0 +1,15 @@
+index.cjs
+index.js
+index.d.ts
+index.d.cts
+utils.cjs
+utils.js
+utils.d.ts
+utils.d.cts
+types.cjs
+types.js
+types.d.ts
+types.d.cts
+node_modules
+dist
+.yarn
diff --git a/libs/langchain-google-vertexai-web/.prettierrc b/libs/langchain-google-vertexai-web/.prettierrc
new file mode 100644
index 000000000000..ba08ff04f677
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/.prettierrc
@@ -0,0 +1,19 @@
+{
+ "$schema": "https://json.schemastore.org/prettierrc",
+ "printWidth": 80,
+ "tabWidth": 2,
+ "useTabs": false,
+ "semi": true,
+ "singleQuote": false,
+ "quoteProps": "as-needed",
+ "jsxSingleQuote": false,
+ "trailingComma": "es5",
+ "bracketSpacing": true,
+ "arrowParens": "always",
+ "requirePragma": false,
+ "insertPragma": false,
+ "proseWrap": "preserve",
+ "htmlWhitespaceSensitivity": "css",
+ "vueIndentScriptAndStyle": false,
+ "endOfLine": "lf"
+}
diff --git a/libs/langchain-google-vertexai-web/.release-it.json b/libs/langchain-google-vertexai-web/.release-it.json
new file mode 100644
index 000000000000..06850ca85be1
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/.release-it.json
@@ -0,0 +1,12 @@
+{
+ "github": {
+ "release": true,
+ "autoGenerate": true,
+ "tokenRef": "GITHUB_TOKEN_RELEASE"
+ },
+ "npm": {
+ "versionArgs": [
+ "--workspaces-update=false"
+ ]
+ }
+}
diff --git a/libs/langchain-google-vertexai-web/LICENSE b/libs/langchain-google-vertexai-web/LICENSE
new file mode 100644
index 000000000000..8cd8f501eb49
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2023 LangChain
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/libs/langchain-google-vertexai-web/README.md b/libs/langchain-google-vertexai-web/README.md
new file mode 100644
index 000000000000..c455675b4a55
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/README.md
@@ -0,0 +1,38 @@
+# LangChain google-vertexai-web
+
+This package contains resources to access Google AI/ML models
+and other Google services via Vertex AI. Authorization to these
+services use either an API Key or service account credentials
+that are included in an environment variable.
+
+If you are running this on the Google Cloud Platform, or in a way
+where service account credentials can be stored on a file system,
+consider using the @langchain/google-vertexai
+package *instead*. You do not need to use both packages. See the
+section on **Authorization** below.
+
+
+## Installation
+
+```bash
+$ yarn add @langchain/google-vertexai-web
+```
+
+
+## Authorization
+
+Authorization is done through a Google Cloud Service Account.
+
+To handle service accounts, this package uses the `google-auth-library`
+package, and you may wish to consult the documentation for that library
+about how it does so. But in short, classes in this package will use
+credentials from the first of the following that apply:
+
+1. An API Key that is passed to the constructor using the `apiKey` attribute
+2. Credentials that are passed to the constructor using the `authInfo` attribute
+3. An API Key that is set in the environment variable `API_KEY`
+4. The Service Account credentials that are saved directly into the
+ `GOOGLE_WEB_CREDENTIALS`
+5. The Service Account credentials that are saved directly into the
+ `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` (deprecated)
+
diff --git a/libs/langchain-google-vertexai-web/jest.config.cjs b/libs/langchain-google-vertexai-web/jest.config.cjs
new file mode 100644
index 000000000000..a06cb3338861
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/jest.config.cjs
@@ -0,0 +1,20 @@
+/** @type {import('ts-jest').JestConfigWithTsJest} */
+module.exports = {
+ preset: "ts-jest/presets/default-esm",
+ testEnvironment: "./jest.env.cjs",
+ modulePathIgnorePatterns: ["dist/", "docs/"],
+ moduleNameMapper: {
+ "^(\\.{1,2}/.*)\\.js$": "$1",
+ },
+ transform: {
+ "^.+\\.tsx?$": ["@swc/jest"],
+ },
+ transformIgnorePatterns: [
+ "/node_modules/",
+ "\\.pnp\\.[^\\/]+$",
+ "./scripts/jest-setup-after-env.js",
+ ],
+ setupFiles: ["dotenv/config"],
+ testTimeout: 20_000,
+ passWithNoTests: true,
+};
diff --git a/libs/langchain-google-vertexai-web/jest.env.cjs b/libs/langchain-google-vertexai-web/jest.env.cjs
new file mode 100644
index 000000000000..2ccedccb8672
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/jest.env.cjs
@@ -0,0 +1,12 @@
+const { TestEnvironment } = require("jest-environment-node");
+
+class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
+ constructor(config, context) {
+ // Make `instanceof Float32Array` return true in tests
+ // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
+ super(config, context);
+ this.global.Float32Array = Float32Array;
+ }
+}
+
+module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
diff --git a/libs/langchain-google-vertexai-web/langchain.config.js b/libs/langchain-google-vertexai-web/langchain.config.js
new file mode 100644
index 000000000000..d277fdea7707
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/langchain.config.js
@@ -0,0 +1,24 @@
+import { resolve, dirname } from "node:path";
+import { fileURLToPath } from "node:url";
+
+/**
+ * @param {string} relativePath
+ * @returns {string}
+ */
+function abs(relativePath) {
+ return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
+}
+
+
+export const config = {
+ internals: [/node\:/, /@langchain\/core\//],
+ entrypoints: {
+ index: "index",
+ utils: "utils",
+ types: "types",
+ },
+ tsConfigPath: resolve("./tsconfig.json"),
+ cjsSource: "./dist-cjs",
+ cjsDestination: "./dist",
+ abs,
+}
\ No newline at end of file
diff --git a/libs/langchain-google-vertexai-web/package.json b/libs/langchain-google-vertexai-web/package.json
new file mode 100644
index 000000000000..ea5c68448e90
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/package.json
@@ -0,0 +1,117 @@
+{
+ "name": "@langchain/google-vertexai-web",
+ "version": "0.0.1",
+ "description": "LangChain.js support for Google Vertex AI Web",
+ "type": "module",
+ "engines": {
+ "node": ">=18"
+ },
+ "main": "./index.js",
+ "types": "./index.d.ts",
+ "repository": {
+ "type": "git",
+ "url": "git@github.com:langchain-ai/langchainjs.git"
+ },
+ "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-vertexai-web/",
+ "scripts": {
+ "build": "yarn run build:deps && yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",
+ "build:deps": "yarn run turbo:command build --filter=@langchain/google-gauth",
+ "build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rm -rf dist/tests dist/**/tests",
+ "build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && yarn move-cjs-to-dist && rm -rf dist-cjs",
+ "build:watch": "yarn create-entrypoints && tsc --outDir dist/ --watch",
+ "build:scripts": "yarn create-entrypoints && yarn check-tree-shaking",
+ "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
+ "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
+ "lint": "yarn lint:eslint && yarn lint:dpdm",
+ "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
+ "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn lc-build --config ./langchain.config.js --create-entrypoints --pre",
+ "prepack": "yarn build",
+ "test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
+ "test:watch": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
+ "test:single": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
+ "test:integration": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
+ "format": "prettier --config .prettierrc --write \"src\"",
+ "format:check": "prettier --config .prettierrc --check \"src\"",
+ "move-cjs-to-dist": "yarn lc-build --config ./langchain.config.js --move-cjs-dist",
+ "create-entrypoints": "yarn lc-build --config ./langchain.config.js --create-entrypoints",
+ "check-tree-shaking": "yarn lc-build --config ./langchain.config.js --tree-shaking"
+ },
+ "author": "LangChain",
+ "license": "MIT",
+ "dependencies": {
+ "@langchain/core": "~0.1.1",
+ "@langchain/google-webauth": "~0.0.1"
+ },
+ "devDependencies": {
+ "@jest/globals": "^29.5.0",
+ "@langchain/scripts": "~0.0",
+ "@swc/core": "^1.3.90",
+ "@swc/jest": "^0.2.29",
+ "@tsconfig/recommended": "^1.0.3",
+ "@typescript-eslint/eslint-plugin": "^6.12.0",
+ "@typescript-eslint/parser": "^6.12.0",
+ "dotenv": "^16.3.1",
+ "dpdm": "^3.12.0",
+ "eslint": "^8.33.0",
+ "eslint-config-airbnb-base": "^15.0.0",
+ "eslint-config-prettier": "^8.6.0",
+ "eslint-plugin-import": "^2.27.5",
+ "eslint-plugin-no-instanceof": "^1.0.1",
+ "eslint-plugin-prettier": "^4.2.1",
+ "jest": "^29.5.0",
+ "jest-environment-node": "^29.6.4",
+ "prettier": "^2.8.3",
+ "release-it": "^15.10.1",
+ "rollup": "^4.5.2",
+ "ts-jest": "^29.1.0",
+ "typescript": "<5.2.0"
+ },
+ "publishConfig": {
+ "access": "public"
+ },
+ "exports": {
+ ".": {
+ "types": {
+ "import": "./index.d.ts",
+ "require": "./index.d.cts",
+ "default": "./index.d.ts"
+ },
+ "import": "./index.js",
+ "require": "./index.cjs"
+ },
+ "./utils": {
+ "types": {
+ "import": "./utils.d.ts",
+ "require": "./utils.d.cts",
+ "default": "./utils.d.ts"
+ },
+ "import": "./utils.js",
+ "require": "./utils.cjs"
+ },
+ "./types": {
+ "types": {
+ "import": "./types.d.ts",
+ "require": "./types.d.cts",
+ "default": "./types.d.ts"
+ },
+ "import": "./types.js",
+ "require": "./types.cjs"
+ },
+ "./package.json": "./package.json"
+ },
+ "files": [
+ "dist/",
+ "index.cjs",
+ "index.js",
+ "index.d.ts",
+ "index.d.cts",
+ "utils.cjs",
+ "utils.js",
+ "utils.d.ts",
+ "utils.d.cts",
+ "types.cjs",
+ "types.js",
+ "types.d.ts",
+ "types.d.cts"
+ ]
+}
diff --git a/libs/langchain-google-vertexai-web/src/chat_models.ts b/libs/langchain-google-vertexai-web/src/chat_models.ts
new file mode 100644
index 000000000000..9a4f20dc3912
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/chat_models.ts
@@ -0,0 +1,22 @@
+import { type ChatGoogleInput, ChatGoogle } from "@langchain/google-webauth";
+
+/**
+ * Input to chat model class.
+ */
+export interface ChatVertexAIInput extends ChatGoogleInput {}
+
+/**
+ * Integration with a chat model.
+ */
+export class ChatVertexAI extends ChatGoogle {
+ static lc_name() {
+ return "ChatVertexAI";
+ }
+
+ constructor(fields?: ChatVertexAIInput) {
+ super({
+ ...fields,
+ platformType: "gcp",
+ });
+ }
+}
diff --git a/libs/langchain-google-vertexai-web/src/index.ts b/libs/langchain-google-vertexai-web/src/index.ts
new file mode 100644
index 000000000000..2c8aa4ecb468
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/index.ts
@@ -0,0 +1,2 @@
+export * from "./chat_models.js";
+export * from "./llms.js";
diff --git a/libs/langchain-google-vertexai-web/src/llms.ts b/libs/langchain-google-vertexai-web/src/llms.ts
new file mode 100644
index 000000000000..5ab89b081541
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/llms.ts
@@ -0,0 +1,22 @@
+import { type GoogleLLMInput, GoogleLLM } from "@langchain/google-webauth";
+
+/**
+ * Input to LLM model class.
+ */
+export interface VertexAIInput extends GoogleLLMInput {}
+
+/**
+ * Integration with a LLM model.
+ */
+export class VertexAI extends GoogleLLM {
+ static lc_name() {
+ return "VertexAI";
+ }
+
+ constructor(fields?: VertexAIInput) {
+ super({
+ ...fields,
+ platformType: "gcp",
+ });
+ }
+}
diff --git a/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts b/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts
new file mode 100644
index 000000000000..e0a434a097f9
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts
@@ -0,0 +1,198 @@
+import { test } from "@jest/globals";
+import {
+ AIMessage,
+ AIMessageChunk,
+ BaseMessage,
+ BaseMessageChunk,
+ HumanMessage,
+ MessageContentComplex,
+ MessageContentText,
+ SystemMessage,
+} from "@langchain/core/messages";
+import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
+import { ChatPromptValue } from "@langchain/core/prompt_values";
+import { ChatVertexAI } from "../chat_models.js";
+
+describe("Google APIKey Chat", () => {
+ test("invoke", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const res = await model.invoke("What is 1 + 1?");
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(textContent.text).toEqual("2");
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("generate", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const messages: BaseMessage[] = [
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(["H", "T"]).toContainEqual(textContent.text);
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("stream", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const input: BaseLanguageModelInput = new ChatPromptValue([
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ]);
+ const res = await model.stream(input);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ expect(resArray).toBeDefined();
+ expect(resArray.length).toBeGreaterThanOrEqual(1);
+
+ const lastChunk = resArray[resArray.length - 1];
+ expect(lastChunk).toBeDefined();
+ expect(lastChunk._getType()).toEqual("ai");
+ const aiChunk = lastChunk as AIMessageChunk;
+ console.log(aiChunk);
+
+ console.log(JSON.stringify(resArray, null, 2));
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+});
+
+describe("Google Webauth Chat", () => {
+ test("invoke", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const res = await model.invoke("What is 1 + 1?");
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(textContent.text).toEqual("2");
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("generate", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const messages: BaseMessage[] = [
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(["H", "T"]).toContainEqual(textContent.text);
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("stream", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const input: BaseLanguageModelInput = new ChatPromptValue([
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ]);
+ const res = await model.stream(input);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ expect(resArray).toBeDefined();
+ expect(resArray.length).toBeGreaterThanOrEqual(1);
+
+ const lastChunk = resArray[resArray.length - 1];
+ expect(lastChunk).toBeDefined();
+ expect(lastChunk._getType()).toEqual("ai");
+ const aiChunk = lastChunk as AIMessageChunk;
+ console.log(aiChunk);
+
+ console.log(JSON.stringify(resArray, null, 2));
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+});
diff --git a/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts b/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts
new file mode 100644
index 000000000000..2b8155710edf
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts
@@ -0,0 +1,294 @@
+import { test } from "@jest/globals";
+import {
+ AIMessage,
+ BaseMessage,
+ HumanMessageChunk,
+ MessageContentComplex,
+} from "@langchain/core/messages";
+import { ChatPromptValue } from "@langchain/core/prompt_values";
+import { VertexAI } from "../llms.js";
+
+const imgData = {
+ blueSquare:
+ "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=",
+};
+
+describe("Google APIKey LLM", () => {
+ test("platform", async () => {
+ const model = new VertexAI();
+ expect(model.platform).toEqual("gai");
+ });
+
+ /*
+ * This test currently fails in AI Studio due to zealous safety systems
+ */
+ test("call", async () => {
+ const model = new VertexAI();
+ const res = await model.invoke("1 + 1 = ");
+ if (res.length === 1) {
+ expect(res).toBe("2");
+ } else {
+ expect(res.length).toBeGreaterThan(0);
+ console.log("call result:", res);
+ }
+ });
+
+ test("call", async () => {
+ const model = new VertexAI();
+ try {
+ const res = await model.invoke("If the time is 1:00, what time is it?");
+ expect(res.length).toBeGreaterThan(0);
+ expect(res.substring(0, 4)).toEqual("1:00");
+ } catch (xx) {
+ console.error(xx);
+ throw xx;
+ }
+ });
+
+ test("stream", async () => {
+ const model = new VertexAI();
+ const stream = await model.stream(
+ "What is the answer to live, the universe, and everything? Be verbose."
+ );
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+ expect(chunks.length).toBeGreaterThan(1);
+ });
+
+ test("predictMessage image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeInstanceOf(AIMessage);
+ expect(Array.isArray(res.content)).toEqual(true);
+ expect(res.content[0]).toHaveProperty("text");
+ console.log("res", res);
+ });
+
+ test("invoke image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const input = new ChatPromptValue(messages);
+ const res = await model.invoke(input);
+ expect(res).toBeDefined();
+ expect(res.length).toBeGreaterThan(0);
+ console.log("res", res);
+ });
+});
+
+describe("Google WebAuth LLM", () => {
+ test("platform", async () => {
+ const model = new VertexAI();
+ expect(model.platform).toEqual("gcp");
+ });
+
+ test("call", async () => {
+ const model = new VertexAI();
+ const res = await model.invoke("1 + 1 = ");
+ if (res.length === 1) {
+ expect(res).toBe("2");
+ } else {
+ expect(res.length).toBeGreaterThan(0);
+ console.log("call result:", res);
+ }
+ });
+
+ test("stream", async () => {
+ const model = new VertexAI();
+ const stream = await model.stream(
+ "What is the answer to live, the universe, and everything? Be verbose."
+ );
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+ expect(chunks.length).toBeGreaterThan(1);
+ });
+
+ test("predictMessage image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeInstanceOf(AIMessage);
+ expect(Array.isArray(res.content)).toEqual(true);
+ expect(res.content[0]).toHaveProperty("text");
+ console.log("res", res);
+ });
+
+ test("invoke image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const input = new ChatPromptValue(messages);
+ const res = await model.invoke(input);
+ expect(res).toBeDefined();
+ expect(res.length).toBeGreaterThan(0);
+ console.log("res", res);
+ });
+});
+
+describe("Google WebAuth gai LLM", () => {
+ test("platform", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ expect(model.platform).toEqual("gai");
+ });
+
+ /*
+ * This test currently fails in AI Studio due to zealous safety systems
+ */
+ test("call", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ const res = await model.invoke("1 + 1 = ");
+ if (res.length === 1) {
+ expect(res).toBe("2");
+ } else {
+ expect(res.length).toBeGreaterThan(0);
+ console.log("call result:", res);
+ }
+ });
+
+ test("call", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ try {
+ const res = await model.invoke("If the time is 1:00, what time is it?");
+ expect(res.length).toBeGreaterThan(0);
+ expect(res.substring(0, 4)).toEqual("1:00");
+ } catch (xx) {
+ console.error(xx);
+ throw xx;
+ }
+ });
+
+ test("stream", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ const stream = await model.stream(
+ "What is the answer to live, the universe, and everything? Be verbose."
+ );
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+ expect(chunks.length).toBeGreaterThan(1);
+ });
+
+ test("predictMessage image", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeInstanceOf(AIMessage);
+ expect(Array.isArray(res.content)).toEqual(true);
+ expect(res.content[0]).toHaveProperty("text");
+ console.log("res", res);
+ });
+
+ test("invoke image", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const input = new ChatPromptValue(messages);
+ const res = await model.invoke(input);
+ expect(res).toBeDefined();
+ expect(res.length).toBeGreaterThan(0);
+ console.log("res", res);
+ });
+});
diff --git a/libs/langchain-google-vertexai-web/src/types.ts b/libs/langchain-google-vertexai-web/src/types.ts
new file mode 100644
index 000000000000..1473b77e1e1e
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/types.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-webauth/types";
diff --git a/libs/langchain-google-vertexai-web/src/utils.ts b/libs/langchain-google-vertexai-web/src/utils.ts
new file mode 100644
index 000000000000..82eb1024d1bc
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/src/utils.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-webauth/utils";
diff --git a/libs/langchain-google-vertexai-web/tsconfig.cjs.json b/libs/langchain-google-vertexai-web/tsconfig.cjs.json
new file mode 100644
index 000000000000..3b7026ea406c
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/tsconfig.cjs.json
@@ -0,0 +1,8 @@
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "module": "commonjs",
+ "declaration": false
+ },
+ "exclude": ["node_modules", "dist", "docs", "**/tests"]
+}
diff --git a/libs/langchain-google-vertexai-web/tsconfig.json b/libs/langchain-google-vertexai-web/tsconfig.json
new file mode 100644
index 000000000000..bc85d83b6229
--- /dev/null
+++ b/libs/langchain-google-vertexai-web/tsconfig.json
@@ -0,0 +1,23 @@
+{
+ "extends": "@tsconfig/recommended",
+ "compilerOptions": {
+ "outDir": "../dist",
+ "rootDir": "./src",
+ "target": "ES2021",
+ "lib": ["ES2021", "ES2022.Object", "DOM"],
+ "module": "ES2020",
+ "moduleResolution": "nodenext",
+ "esModuleInterop": true,
+ "declaration": true,
+ "noImplicitReturns": true,
+ "noFallthroughCasesInSwitch": true,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "useDefineForClassFields": true,
+ "strictPropertyInitialization": false,
+ "allowJs": true,
+ "strict": true
+ },
+ "include": ["src/**/*"],
+ "exclude": ["node_modules", "dist", "docs"]
+}
diff --git a/libs/langchain-google-vertexai/.eslintrc.cjs b/libs/langchain-google-vertexai/.eslintrc.cjs
new file mode 100644
index 000000000000..344f8a9d6cd9
--- /dev/null
+++ b/libs/langchain-google-vertexai/.eslintrc.cjs
@@ -0,0 +1,66 @@
+module.exports = {
+ extends: [
+ "airbnb-base",
+ "eslint:recommended",
+ "prettier",
+ "plugin:@typescript-eslint/recommended",
+ ],
+ parserOptions: {
+ ecmaVersion: 12,
+ parser: "@typescript-eslint/parser",
+ project: "./tsconfig.json",
+ sourceType: "module",
+ },
+ plugins: ["@typescript-eslint", "no-instanceof"],
+ ignorePatterns: [
+ ".eslintrc.cjs",
+ "scripts",
+ "node_modules",
+ "dist",
+ "dist-cjs",
+ "*.js",
+ "*.cjs",
+ "*.d.ts",
+ ],
+ rules: {
+ "no-process-env": 2,
+ "no-instanceof/no-instanceof": 2,
+ "@typescript-eslint/explicit-module-boundary-types": 0,
+ "@typescript-eslint/no-empty-function": 0,
+ "@typescript-eslint/no-shadow": 0,
+ "@typescript-eslint/no-empty-interface": 0,
+ "@typescript-eslint/no-use-before-define": ["error", "nofunc"],
+ "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
+ "@typescript-eslint/no-floating-promises": "error",
+ "@typescript-eslint/no-misused-promises": "error",
+ camelcase: 0,
+ "class-methods-use-this": 0,
+ "import/extensions": [2, "ignorePackages"],
+ "import/no-extraneous-dependencies": [
+ "error",
+ { devDependencies: ["**/*.test.ts"] },
+ ],
+ "import/no-unresolved": 0,
+ "import/prefer-default-export": 0,
+ "keyword-spacing": "error",
+ "max-classes-per-file": 0,
+ "max-len": 0,
+ "no-await-in-loop": 0,
+ "no-bitwise": 0,
+ "no-console": 0,
+ "no-restricted-syntax": 0,
+ "no-shadow": 0,
+ "no-continue": 0,
+ "no-void": 0,
+ "no-underscore-dangle": 0,
+ "no-use-before-define": 0,
+ "no-useless-constructor": 0,
+ "no-return-await": 0,
+ "consistent-return": 0,
+ "no-else-return": 0,
+ "func-names": 0,
+ "no-lonely-if": 0,
+ "prefer-rest-params": 0,
+ "new-cap": ["error", { properties: false, capIsNew: false }],
+ },
+};
diff --git a/libs/langchain-google-vertexai/.gitignore b/libs/langchain-google-vertexai/.gitignore
new file mode 100644
index 000000000000..df014a2d426b
--- /dev/null
+++ b/libs/langchain-google-vertexai/.gitignore
@@ -0,0 +1,15 @@
+index.cjs
+index.js
+index.d.ts
+index.d.cts
+utils.cjs
+utils.js
+utils.d.ts
+utils.d.cts
+types.cjs
+types.js
+types.d.ts
+types.d.cts
+node_modules
+dist
+.yarn
diff --git a/libs/langchain-google-vertexai/.prettierrc b/libs/langchain-google-vertexai/.prettierrc
new file mode 100644
index 000000000000..ba08ff04f677
--- /dev/null
+++ b/libs/langchain-google-vertexai/.prettierrc
@@ -0,0 +1,19 @@
+{
+ "$schema": "https://json.schemastore.org/prettierrc",
+ "printWidth": 80,
+ "tabWidth": 2,
+ "useTabs": false,
+ "semi": true,
+ "singleQuote": false,
+ "quoteProps": "as-needed",
+ "jsxSingleQuote": false,
+ "trailingComma": "es5",
+ "bracketSpacing": true,
+ "arrowParens": "always",
+ "requirePragma": false,
+ "insertPragma": false,
+ "proseWrap": "preserve",
+ "htmlWhitespaceSensitivity": "css",
+ "vueIndentScriptAndStyle": false,
+ "endOfLine": "lf"
+}
diff --git a/libs/langchain-google-vertexai/.release-it.json b/libs/langchain-google-vertexai/.release-it.json
new file mode 100644
index 000000000000..06850ca85be1
--- /dev/null
+++ b/libs/langchain-google-vertexai/.release-it.json
@@ -0,0 +1,12 @@
+{
+ "github": {
+ "release": true,
+ "autoGenerate": true,
+ "tokenRef": "GITHUB_TOKEN_RELEASE"
+ },
+ "npm": {
+ "versionArgs": [
+ "--workspaces-update=false"
+ ]
+ }
+}
diff --git a/libs/langchain-google-vertexai/LICENSE b/libs/langchain-google-vertexai/LICENSE
new file mode 100644
index 000000000000..8cd8f501eb49
--- /dev/null
+++ b/libs/langchain-google-vertexai/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2023 LangChain
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/libs/langchain-google-vertexai/README.md b/libs/langchain-google-vertexai/README.md
new file mode 100644
index 000000000000..31c4177ac8ce
--- /dev/null
+++ b/libs/langchain-google-vertexai/README.md
@@ -0,0 +1,40 @@
+# LangChain google-vertexai
+
+This package contains resources to access Google AI/ML models
+and other Google services via Vertex AI. Authorization to these
+services use service account credentials stored on the local
+file system or provided through the Google Cloud Platform
+environment it is running on.
+
+If you are running this on a platform where the credentials cannot
+be provided this way, consider using the @langchain/google-vertexai-web
+package *instead*. You do not need to use both packages. See the
+section on **Authorization** below.
+
+
+## Installation
+
+```bash
+$ yarn add @langchain/google-vertexai
+```
+
+
+## Authorization
+
+Authorization is done through a Google Cloud Service Account.
+
+To handle service accounts, this package uses the `google-auth-library`
+package, and you may wish to consult the documentation for that library
+about how it does so. But in short, classes in this package will use
+credentials from the first of the following that apply:
+
+1. An API Key that is passed to the constructor using the `apiKey` attribute
+2. Credentials that are passed to the constructor using the `authInfo` attribute
+3. An API Key that is set in the environment variable `API_KEY`
+4. The Service Account credentials that are saved in a file. The path to
+ this file is set in the `GOOGLE_APPLICATION_CREDENTIALS` environment
+ variable.
+5. If you are running on a Google Cloud Platform resource, or if you have
+ logged in using `gcloud auth application-default login`, then the
+ default credentials.
+
diff --git a/libs/langchain-google-vertexai/jest.config.cjs b/libs/langchain-google-vertexai/jest.config.cjs
new file mode 100644
index 000000000000..a06cb3338861
--- /dev/null
+++ b/libs/langchain-google-vertexai/jest.config.cjs
@@ -0,0 +1,20 @@
+/** @type {import('ts-jest').JestConfigWithTsJest} */
+module.exports = {
+ preset: "ts-jest/presets/default-esm",
+ testEnvironment: "./jest.env.cjs",
+ modulePathIgnorePatterns: ["dist/", "docs/"],
+ moduleNameMapper: {
+ "^(\\.{1,2}/.*)\\.js$": "$1",
+ },
+ transform: {
+ "^.+\\.tsx?$": ["@swc/jest"],
+ },
+ transformIgnorePatterns: [
+ "/node_modules/",
+ "\\.pnp\\.[^\\/]+$",
+ "./scripts/jest-setup-after-env.js",
+ ],
+ setupFiles: ["dotenv/config"],
+ testTimeout: 20_000,
+ passWithNoTests: true,
+};
diff --git a/libs/langchain-google-vertexai/jest.env.cjs b/libs/langchain-google-vertexai/jest.env.cjs
new file mode 100644
index 000000000000..2ccedccb8672
--- /dev/null
+++ b/libs/langchain-google-vertexai/jest.env.cjs
@@ -0,0 +1,12 @@
+const { TestEnvironment } = require("jest-environment-node");
+
+class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment {
+ constructor(config, context) {
+ // Make `instanceof Float32Array` return true in tests
+ // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549
+ super(config, context);
+ this.global.Float32Array = Float32Array;
+ }
+}
+
+module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
diff --git a/libs/langchain-google-vertexai/langchain.config.js b/libs/langchain-google-vertexai/langchain.config.js
new file mode 100644
index 000000000000..d277fdea7707
--- /dev/null
+++ b/libs/langchain-google-vertexai/langchain.config.js
@@ -0,0 +1,24 @@
+import { resolve, dirname } from "node:path";
+import { fileURLToPath } from "node:url";
+
+/**
+ * @param {string} relativePath
+ * @returns {string}
+ */
+function abs(relativePath) {
+ return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
+}
+
+
+export const config = {
+ internals: [/node\:/, /@langchain\/core\//],
+ entrypoints: {
+ index: "index",
+ utils: "utils",
+ types: "types",
+ },
+ tsConfigPath: resolve("./tsconfig.json"),
+ cjsSource: "./dist-cjs",
+ cjsDestination: "./dist",
+ abs,
+}
\ No newline at end of file
diff --git a/libs/langchain-google-vertexai/package.json b/libs/langchain-google-vertexai/package.json
new file mode 100644
index 000000000000..00506a2c5b80
--- /dev/null
+++ b/libs/langchain-google-vertexai/package.json
@@ -0,0 +1,117 @@
+{
+ "name": "@langchain/google-vertexai",
+ "version": "0.0.1",
+ "description": "LangChain.js support for Google Vertex AI",
+ "type": "module",
+ "engines": {
+ "node": ">=18"
+ },
+ "main": "./index.js",
+ "types": "./index.d.ts",
+ "repository": {
+ "type": "git",
+ "url": "git@github.com:langchain-ai/langchainjs.git"
+ },
+ "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-vertexai/",
+ "scripts": {
+ "build": "yarn run build:deps && yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",
+ "build:deps": "yarn run turbo:command build --filter=@langchain/google-gauth",
+ "build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rm -rf dist/tests dist/**/tests",
+ "build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && yarn move-cjs-to-dist && rm -rf dist-cjs",
+ "build:watch": "yarn create-entrypoints && tsc --outDir dist/ --watch",
+ "build:scripts": "yarn create-entrypoints && yarn check-tree-shaking",
+ "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
+ "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
+ "lint": "yarn lint:eslint && yarn lint:dpdm",
+ "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
+ "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn lc-build --config ./langchain.config.js --create-entrypoints --pre",
+ "prepack": "yarn build",
+ "test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
+ "test:watch": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
+ "test:single": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
+ "test:integration": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
+ "format": "prettier --config .prettierrc --write \"src\"",
+ "format:check": "prettier --config .prettierrc --check \"src\"",
+ "move-cjs-to-dist": "yarn lc-build --config ./langchain.config.js --move-cjs-dist",
+ "create-entrypoints": "yarn lc-build --config ./langchain.config.js --create-entrypoints",
+ "check-tree-shaking": "yarn lc-build --config ./langchain.config.js --tree-shaking"
+ },
+ "author": "LangChain",
+ "license": "MIT",
+ "dependencies": {
+ "@langchain/core": "~0.1.1",
+ "@langchain/google-gauth": "~0.0.1"
+ },
+ "devDependencies": {
+ "@jest/globals": "^29.5.0",
+ "@langchain/scripts": "~0.0",
+ "@swc/core": "^1.3.90",
+ "@swc/jest": "^0.2.29",
+ "@tsconfig/recommended": "^1.0.3",
+ "@typescript-eslint/eslint-plugin": "^6.12.0",
+ "@typescript-eslint/parser": "^6.12.0",
+ "dotenv": "^16.3.1",
+ "dpdm": "^3.12.0",
+ "eslint": "^8.33.0",
+ "eslint-config-airbnb-base": "^15.0.0",
+ "eslint-config-prettier": "^8.6.0",
+ "eslint-plugin-import": "^2.27.5",
+ "eslint-plugin-no-instanceof": "^1.0.1",
+ "eslint-plugin-prettier": "^4.2.1",
+ "jest": "^29.5.0",
+ "jest-environment-node": "^29.6.4",
+ "prettier": "^2.8.3",
+ "release-it": "^15.10.1",
+ "rollup": "^4.5.2",
+ "ts-jest": "^29.1.0",
+ "typescript": "<5.2.0"
+ },
+ "publishConfig": {
+ "access": "public"
+ },
+ "exports": {
+ ".": {
+ "types": {
+ "import": "./index.d.ts",
+ "require": "./index.d.cts",
+ "default": "./index.d.ts"
+ },
+ "import": "./index.js",
+ "require": "./index.cjs"
+ },
+ "./utils": {
+ "types": {
+ "import": "./utils.d.ts",
+ "require": "./utils.d.cts",
+ "default": "./utils.d.ts"
+ },
+ "import": "./utils.js",
+ "require": "./utils.cjs"
+ },
+ "./types": {
+ "types": {
+ "import": "./types.d.ts",
+ "require": "./types.d.cts",
+ "default": "./types.d.ts"
+ },
+ "import": "./types.js",
+ "require": "./types.cjs"
+ },
+ "./package.json": "./package.json"
+ },
+ "files": [
+ "dist/",
+ "index.cjs",
+ "index.js",
+ "index.d.ts",
+ "index.d.cts",
+ "utils.cjs",
+ "utils.js",
+ "utils.d.ts",
+ "utils.d.cts",
+ "types.cjs",
+ "types.js",
+ "types.d.ts",
+ "types.d.cts"
+ ]
+}
diff --git a/libs/langchain-google-vertexai/src/chat_models.ts b/libs/langchain-google-vertexai/src/chat_models.ts
new file mode 100644
index 000000000000..86216c890c1a
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/chat_models.ts
@@ -0,0 +1,22 @@
+import { type ChatGoogleInput, ChatGoogle } from "@langchain/google-gauth";
+
+/**
+ * Input to chat model class.
+ */
+export interface ChatVertexAIInput extends ChatGoogleInput {}
+
+/**
+ * Integration with a chat model.
+ */
+export class ChatVertexAI extends ChatGoogle {
+ static lc_name() {
+ return "ChatVertexAI";
+ }
+
+ constructor(fields?: ChatVertexAIInput) {
+ super({
+ ...fields,
+ platformType: "gcp",
+ });
+ }
+}
diff --git a/libs/langchain-google-vertexai/src/index.ts b/libs/langchain-google-vertexai/src/index.ts
new file mode 100644
index 000000000000..2c8aa4ecb468
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/index.ts
@@ -0,0 +1,2 @@
+export * from "./chat_models.js";
+export * from "./llms.js";
diff --git a/libs/langchain-google-vertexai/src/llms.ts b/libs/langchain-google-vertexai/src/llms.ts
new file mode 100644
index 000000000000..919d54e19a2d
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/llms.ts
@@ -0,0 +1,22 @@
+import { type GoogleLLMInput, GoogleLLM } from "@langchain/google-gauth";
+
+/**
+ * Input to LLM model class.
+ */
+export interface VertexAIInput extends GoogleLLMInput {}
+
+/**
+ * Integration with a LLM model.
+ */
+export class VertexAI extends GoogleLLM {
+ static lc_name() {
+ return "VertexAI";
+ }
+
+ constructor(fields?: VertexAIInput) {
+ super({
+ ...fields,
+ platformType: "gcp",
+ });
+ }
+}
diff --git a/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts b/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts
new file mode 100644
index 000000000000..1218565bb046
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts
@@ -0,0 +1,112 @@
+import { test } from "@jest/globals";
+import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
+import { ChatPromptValue } from "@langchain/core/prompt_values";
+import {
+ AIMessage,
+ AIMessageChunk,
+ BaseMessage,
+ BaseMessageChunk,
+ HumanMessage,
+ MessageContentComplex,
+ MessageContentText,
+ SystemMessage,
+} from "@langchain/core/messages";
+import { ChatVertexAI } from "../chat_models.js";
+import { VertexAI } from "../llms.js";
+
+describe("GAuth Chat", () => {
+ test("platform", async () => {
+ const model = new VertexAI();
+ expect(model.platform).toEqual("gcp");
+ });
+
+ test("invoke", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const res = await model.invoke("What is 1 + 1?");
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(textContent.text).toEqual("2");
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("generate", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const messages: BaseMessage[] = [
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeDefined();
+ expect(res._getType()).toEqual("ai");
+
+ const aiMessage = res as AIMessageChunk;
+ expect(aiMessage.content).toBeDefined();
+ expect(aiMessage.content.length).toBeGreaterThan(0);
+ expect(aiMessage.content[0]).toBeDefined();
+
+ const content = aiMessage.content[0] as MessageContentComplex;
+ expect(content).toHaveProperty("type");
+ expect(content.type).toEqual("text");
+
+ const textContent = content as MessageContentText;
+ expect(textContent.text).toBeDefined();
+ expect(["H", "T"]).toContainEqual(textContent.text);
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+
+ test("stream", async () => {
+ const model = new ChatVertexAI();
+ try {
+ const input: BaseLanguageModelInput = new ChatPromptValue([
+ new SystemMessage(
+ "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ ),
+ new HumanMessage("Flip it"),
+ new AIMessage("T"),
+ new HumanMessage("Flip the coin again"),
+ ]);
+ const res = await model.stream(input);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ expect(resArray).toBeDefined();
+ expect(resArray.length).toBeGreaterThanOrEqual(1);
+
+ const lastChunk = resArray[resArray.length - 1];
+ expect(lastChunk).toBeDefined();
+ expect(lastChunk._getType()).toEqual("ai");
+ const aiChunk = lastChunk as AIMessageChunk;
+ console.log(aiChunk);
+
+ console.log(JSON.stringify(resArray, null, 2));
+ } catch (e) {
+ console.error(e);
+ throw e;
+ }
+ });
+});
diff --git a/libs/langchain-google-vertexai/src/tests/llms.int.test.ts b/libs/langchain-google-vertexai/src/tests/llms.int.test.ts
new file mode 100644
index 000000000000..1393539424ab
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/tests/llms.int.test.ts
@@ -0,0 +1,236 @@
+import { test } from "@jest/globals";
+import {
+ AIMessage,
+ BaseMessage,
+ HumanMessageChunk,
+ MessageContentComplex,
+} from "@langchain/core/messages";
+import { ChatPromptValue } from "@langchain/core/prompt_values";
+import { VertexAI } from "../llms.js";
+
+const imgData = {
+ blueSquare:
+ "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=",
+};
+
+describe("GAuth LLM", () => {
+ test("platform", async () => {
+ const model = new VertexAI();
+ expect(model.platform).toEqual("gcp");
+ });
+
+ test("call", async () => {
+ const model = new VertexAI();
+ try {
+ const res = await model.invoke("1 + 1 = ");
+ if (res.length === 1) {
+ expect(res).toBe("2");
+ } else {
+ expect(res.length).toBeGreaterThan(0);
+ console.log("call result:", res);
+ }
+ } catch (xx) {
+ console.error(xx);
+ throw xx;
+ }
+ });
+
+ test("generate", async () => {
+ const model = new VertexAI();
+ const res = await model.generate(["Print hello world."]);
+ expect(res).toHaveProperty("generations");
+ expect(res.generations.length).toBeGreaterThan(0);
+ expect(res.generations[0].length).toBeGreaterThan(0);
+ expect(res.generations[0][0]).toHaveProperty("text");
+ console.log("generate result:", JSON.stringify(res, null, 2));
+ });
+
+ test("stream", async () => {
+ const model = new VertexAI();
+ const stream = await model.stream(
+ "What is the answer to live, the universe, and everything? Be verbose."
+ );
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+ expect(chunks.length).toBeGreaterThan(1);
+ });
+
+ test("predictMessage image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeInstanceOf(AIMessage);
+ expect(Array.isArray(res.content)).toEqual(true);
+ expect(res.content[0]).toHaveProperty("text");
+ console.log("res", res);
+ });
+
+ test("invoke image", async () => {
+ const model = new VertexAI({
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const input = new ChatPromptValue(messages);
+ const res = await model.invoke(input);
+ expect(res).toBeDefined();
+ expect(res.length).toBeGreaterThan(0);
+ console.log("res", res);
+ });
+});
+
+describe("GAuth LLM gai", () => {
+ test("platform", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ expect(model.platform).toEqual("gai");
+ });
+
+ /*
+ * This test currently fails in AI Studio due to zealous safety systems
+ */
+ test.skip("call", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ try {
+ const res = await model.invoke("1 + 1 = ");
+ if (res.length === 1) {
+ expect(res).toBe("2");
+ } else {
+ console.log("call result:", res);
+ expect(res.length).toBeGreaterThan(0);
+ }
+ } catch (xx) {
+ console.error(xx);
+ throw xx;
+ }
+ });
+
+ test("call", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ try {
+ const res = await model.invoke("If the time is 1:00, what time is it?");
+ expect(res.length).toBeGreaterThan(0);
+ expect(res.substring(0, 4)).toEqual("1:00");
+ } catch (xx) {
+ console.error(xx);
+ throw xx;
+ }
+ });
+
+ test("generate", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ const res = await model.generate(["Print hello world."]);
+ expect(res).toHaveProperty("generations");
+ expect(res.generations.length).toBeGreaterThan(0);
+ expect(res.generations[0].length).toBeGreaterThan(0);
+ expect(res.generations[0][0]).toHaveProperty("text");
+ console.log("generate result:", JSON.stringify(res, null, 2));
+ });
+
+ test("stream", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ });
+ const stream = await model.stream(
+ "What is the answer to live, the universe, and everything? Be verbose."
+ );
+ const chunks = [];
+ try {
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ } catch (xx: any) {
+ expect(xx?.message).toEqual("Finish reason: RECITATION");
+ }
+ expect(chunks.length).toBeGreaterThan(1);
+ });
+
+ test("predictMessage image", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const res = await model.predictMessages(messages);
+ expect(res).toBeInstanceOf(AIMessage);
+ expect(Array.isArray(res.content)).toEqual(true);
+ expect(res.content[0]).toHaveProperty("text");
+ console.log("res", res);
+ });
+
+ test("invoke image", async () => {
+ const model = new VertexAI({
+ platformType: "gai",
+ modelName: "gemini-pro-vision",
+ });
+ const message: MessageContentComplex[] = [
+ {
+ type: "text",
+ text: "What is in this image?",
+ },
+ {
+ type: "image_url",
+ image_url: `data:image/png;base64,${imgData.blueSquare}`,
+ },
+ ];
+
+ const messages: BaseMessage[] = [
+ new HumanMessageChunk({ content: message }),
+ ];
+ const input = new ChatPromptValue(messages);
+ const res = await model.invoke(input);
+ expect(res).toBeDefined();
+ expect(res.length).toBeGreaterThan(0);
+ console.log("res", res);
+ });
+});
diff --git a/libs/langchain-google-vertexai/src/types.ts b/libs/langchain-google-vertexai/src/types.ts
new file mode 100644
index 000000000000..0eb8d62922f6
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/types.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-gauth/types";
diff --git a/libs/langchain-google-vertexai/src/utils.ts b/libs/langchain-google-vertexai/src/utils.ts
new file mode 100644
index 000000000000..3c87fe2467f6
--- /dev/null
+++ b/libs/langchain-google-vertexai/src/utils.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-gauth/utils";
diff --git a/libs/langchain-google-vertexai/tsconfig.cjs.json b/libs/langchain-google-vertexai/tsconfig.cjs.json
new file mode 100644
index 000000000000..3b7026ea406c
--- /dev/null
+++ b/libs/langchain-google-vertexai/tsconfig.cjs.json
@@ -0,0 +1,8 @@
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "module": "commonjs",
+ "declaration": false
+ },
+ "exclude": ["node_modules", "dist", "docs", "**/tests"]
+}
diff --git a/libs/langchain-google-vertexai/tsconfig.json b/libs/langchain-google-vertexai/tsconfig.json
new file mode 100644
index 000000000000..bc85d83b6229
--- /dev/null
+++ b/libs/langchain-google-vertexai/tsconfig.json
@@ -0,0 +1,23 @@
+{
+ "extends": "@tsconfig/recommended",
+ "compilerOptions": {
+ "outDir": "../dist",
+ "rootDir": "./src",
+ "target": "ES2021",
+ "lib": ["ES2021", "ES2022.Object", "DOM"],
+ "module": "ES2020",
+ "moduleResolution": "nodenext",
+ "esModuleInterop": true,
+ "declaration": true,
+ "noImplicitReturns": true,
+ "noFallthroughCasesInSwitch": true,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "useDefineForClassFields": true,
+ "strictPropertyInitialization": false,
+ "allowJs": true,
+ "strict": true
+ },
+ "include": ["src/**/*"],
+ "exclude": ["node_modules", "dist", "docs"]
+}
diff --git a/libs/langchain-google-webauth/.gitignore b/libs/langchain-google-webauth/.gitignore
index c10034e2f1be..df014a2d426b 100644
--- a/libs/langchain-google-webauth/.gitignore
+++ b/libs/langchain-google-webauth/.gitignore
@@ -2,6 +2,14 @@ index.cjs
index.js
index.d.ts
index.d.cts
+utils.cjs
+utils.js
+utils.d.ts
+utils.d.cts
+types.cjs
+types.js
+types.d.ts
+types.d.cts
node_modules
dist
.yarn
diff --git a/libs/langchain-google-webauth/langchain.config.js b/libs/langchain-google-webauth/langchain.config.js
index 5618893053cb..3ef8237db60f 100644
--- a/libs/langchain-google-webauth/langchain.config.js
+++ b/libs/langchain-google-webauth/langchain.config.js
@@ -14,6 +14,8 @@ export const config = {
internals: [/node\:/, /@langchain\/core\//, /web-auth-library\/google/],
entrypoints: {
index: "index",
+ utils: "utils",
+ types: "types",
},
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
diff --git a/libs/langchain-google-webauth/package.json b/libs/langchain-google-webauth/package.json
index 8113a1b32f2a..f126de91c561 100644
--- a/libs/langchain-google-webauth/package.json
+++ b/libs/langchain-google-webauth/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/google-webauth",
- "version": "0.0.0",
+ "version": "0.0.1",
"description": "Web-based authentication support for Google services",
"type": "module",
"engines": {
@@ -40,7 +40,7 @@
"license": "MIT",
"dependencies": {
"@langchain/core": "~0.1.1",
- "@langchain/google-common": "~0.0.0",
+ "@langchain/google-common": "~0.0.2",
"web-auth-library": "^1.0.3"
},
"devDependencies": {
@@ -80,6 +80,24 @@
"import": "./index.js",
"require": "./index.cjs"
},
+ "./utils": {
+ "types": {
+ "import": "./utils.d.ts",
+ "require": "./utils.d.cts",
+ "default": "./utils.d.ts"
+ },
+ "import": "./utils.js",
+ "require": "./utils.cjs"
+ },
+ "./types": {
+ "types": {
+ "import": "./types.d.ts",
+ "require": "./types.d.cts",
+ "default": "./types.d.ts"
+ },
+ "import": "./types.js",
+ "require": "./types.cjs"
+ },
"./package.json": "./package.json"
},
"files": [
@@ -87,6 +105,14 @@
"index.cjs",
"index.js",
"index.d.ts",
- "index.d.cts"
+ "index.d.cts",
+ "utils.cjs",
+ "utils.js",
+ "utils.d.ts",
+ "utils.d.cts",
+ "types.cjs",
+ "types.js",
+ "types.d.ts",
+ "types.d.cts"
]
}
diff --git a/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts b/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts
index 91525c4ec2a3..2ee2a870f009 100644
--- a/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts
+++ b/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts
@@ -1,23 +1,28 @@
-import { test } from "@jest/globals";
+import { expect, test } from "@jest/globals";
import {
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
+ BaseMessageLike,
HumanMessage,
MessageContentComplex,
MessageContentText,
SystemMessage,
+ ToolMessage,
} from "@langchain/core/messages";
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { ChatPromptValue } from "@langchain/core/prompt_values";
+import { GeminiTool, GoogleAISafetySetting } from "@langchain/google-common";
import { ChatGoogle } from "../chat_models.js";
-describe("Google APIKey Chat", () => {
+describe.skip("Google APIKey Chat", () => {
test("invoke", async () => {
const model = new ChatGoogle();
try {
- const res = await model.invoke("What is 1 + 1?");
+ const res = await model.invoke(
+ "What is the answer to life the universe and everything? Answer briefly."
+ );
expect(res).toBeDefined();
expect(res._getType()).toEqual("ai");
@@ -32,7 +37,7 @@ describe("Google APIKey Chat", () => {
const textContent = content as MessageContentText;
expect(textContent.text).toBeDefined();
- expect(textContent.text).toEqual("2");
+ expect(textContent.text).toEqual("42");
} catch (e) {
console.error(e);
throw e;
@@ -40,15 +45,33 @@ describe("Google APIKey Chat", () => {
});
test("generate", async () => {
- const model = new ChatGoogle();
+ const safetySettings: GoogleAISafetySetting[] = [
+ {
+ category: "HARM_CATEGORY_HARASSMENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_HATE_SPEECH",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ ];
+ const model = new ChatGoogle({ safetySettings });
try {
const messages: BaseMessage[] = [
new SystemMessage(
- "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
+ "You will reply to all requests to toss a coin with either H, indicating heads, or T, indicating tails."
),
- new HumanMessage("Flip it"),
+ new HumanMessage("Toss the coin"),
new AIMessage("T"),
- new HumanMessage("Flip the coin again"),
+ new HumanMessage("Toss the coin again"),
];
const res = await model.predictMessages(messages);
expect(res).toBeDefined();
@@ -67,19 +90,37 @@ describe("Google APIKey Chat", () => {
expect(textContent.text).toBeDefined();
expect(["H", "T"]).toContainEqual(textContent.text);
} catch (e) {
- console.error(e);
+ console.error(JSON.stringify(e, null, 1));
throw e;
}
});
test("stream", async () => {
- const model = new ChatGoogle();
+ const safetySettings: GoogleAISafetySetting[] = [
+ {
+ category: "HARM_CATEGORY_HARASSMENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_HATE_SPEECH",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ ];
+ const model = new ChatGoogle({ safetySettings });
try {
const input: BaseLanguageModelInput = new ChatPromptValue([
new SystemMessage(
"You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails."
),
- new HumanMessage("Flip it"),
+ new HumanMessage("Flip the coin"),
new AIMessage("T"),
new HumanMessage("Flip the coin again"),
]);
@@ -99,13 +140,116 @@ describe("Google APIKey Chat", () => {
console.log(JSON.stringify(resArray, null, 2));
} catch (e) {
- console.error(e);
+ console.error(JSON.stringify(e, null, 1));
throw e;
}
});
+
+ test("function", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle({
+ apiVersion: "v1beta",
+ }).bind({
+ tools,
+ });
+ const result = await model.invoke("Run a test on the cobalt project");
+ expect(result).toHaveProperty("content");
+ expect(Array.isArray(result.content)).toBeTruthy();
+ expect(result.content).toHaveLength(0);
+ const args = result?.lc_kwargs?.additional_kwargs;
+ expect(args).toBeDefined();
+ expect(args).toHaveProperty("tool_calls");
+ expect(Array.isArray(args.tool_calls)).toBeTruthy();
+ expect(args.tool_calls).toHaveLength(1);
+ const call = args.tool_calls[0];
+ expect(call).toHaveProperty("type");
+ expect(call.type).toBe("function");
+ expect(call).toHaveProperty("function");
+ const func = call.function;
+ expect(func).toBeDefined();
+ expect(func).toHaveProperty("name");
+ expect(func.name).toBe("test");
+ expect(func).toHaveProperty("arguments");
+ expect(typeof func.arguments).toBe("string");
+ expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}');
+ });
+
+ test("function reply", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle({
+ apiVersion: "v1beta",
+ }).bind({
+ tools,
+ });
+ const toolResult = {
+ testPassed: true,
+ };
+ const messages: BaseMessageLike[] = [
+ new HumanMessage("Run a test on the cobalt project."),
+ new AIMessage("", {
+ tool_calls: [
+ {
+ id: "test",
+ type: "function",
+ function: {
+ name: "test",
+ arguments: '{"testName":"cobalt"}',
+ },
+ },
+ ],
+ }),
+ new ToolMessage(JSON.stringify(toolResult), "test"),
+ ];
+ const res = await model.stream(messages);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ console.log(JSON.stringify(resArray, null, 2));
+ });
});
-describe("Google Webauth Chat", () => {
+describe.skip("Google Webauth Chat", () => {
test("invoke", async () => {
const model = new ChatGoogle();
try {
@@ -195,4 +339,103 @@ describe("Google Webauth Chat", () => {
throw e;
}
});
+
+ test("function", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle().bind({
+ tools,
+ });
+ const result = await model.invoke("Run a test on the cobalt project");
+ expect(result).toHaveProperty("content");
+ expect(Array.isArray(result.content)).toBeTruthy();
+ expect(result.content).toHaveLength(0);
+ const args = result?.lc_kwargs?.additional_kwargs;
+ expect(args).toBeDefined();
+ expect(args).toHaveProperty("tool_calls");
+ expect(Array.isArray(args.tool_calls)).toBeTruthy();
+ expect(args.tool_calls).toHaveLength(1);
+ const call = args.tool_calls[0];
+ expect(call).toHaveProperty("type");
+ expect(call.type).toBe("function");
+ expect(call).toHaveProperty("function");
+ const func = call.function;
+ expect(func).toBeDefined();
+ expect(func).toHaveProperty("name");
+ expect(func.name).toBe("test");
+ expect(func).toHaveProperty("arguments");
+ expect(typeof func.arguments).toBe("string");
+ expect(func.arguments.replaceAll("\n", "")).toBe('{"testName":"cobalt"}');
+ });
+
+ test("function reply", async () => {
+ const tools: GeminiTool[] = [
+ {
+ functionDeclarations: [
+ {
+ name: "test",
+ description:
+ "Run a test with a specific name and get if it passed or failed",
+ parameters: {
+ type: "object",
+ properties: {
+ testName: {
+ type: "string",
+ description: "The name of the test that should be run.",
+ },
+ },
+ required: ["testName"],
+ },
+ },
+ ],
+ },
+ ];
+ const model = new ChatGoogle().bind({
+ tools,
+ });
+ const toolResult = {
+ testPassed: true,
+ };
+ const messages: BaseMessageLike[] = [
+ new HumanMessage("Run a test on the cobalt project."),
+ new AIMessage("", {
+ tool_calls: [
+ {
+ id: "test",
+ type: "function",
+ function: {
+ name: "test",
+ arguments: '{"testName":"cobalt"}',
+ },
+ },
+ ],
+ }),
+ new ToolMessage(JSON.stringify(toolResult), "test"),
+ ];
+ const res = await model.stream(messages);
+ const resArray: BaseMessageChunk[] = [];
+ for await (const chunk of res) {
+ resArray.push(chunk);
+ }
+ console.log(JSON.stringify(resArray, null, 2));
+ });
});
diff --git a/libs/langchain-google-webauth/src/tests/llms.int.test.ts b/libs/langchain-google-webauth/src/tests/llms.int.test.ts
index 4c3611ff80fa..47cdbd8ee9dc 100644
--- a/libs/langchain-google-webauth/src/tests/llms.int.test.ts
+++ b/libs/langchain-google-webauth/src/tests/llms.int.test.ts
@@ -13,7 +13,7 @@ const imgData = {
"iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH6AIbFwQSRaexCAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAAJklEQVQY02P8//8/A27AxIAXsEAor31f0CS2OfEQ1j2Q0owU+RsAGNUJD2/04PgAAAAASUVORK5CYII=",
};
-describe("Google APIKey LLM", () => {
+describe.skip("Google APIKey LLM", () => {
test("platform", async () => {
const model = new GoogleLLM();
expect(model.platform).toEqual("gai");
@@ -59,7 +59,7 @@ describe("Google APIKey LLM", () => {
test("predictMessage image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -84,7 +84,7 @@ describe("Google APIKey LLM", () => {
test("invoke image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -108,7 +108,7 @@ describe("Google APIKey LLM", () => {
});
});
-describe("Google WebAuth LLM", () => {
+describe.skip("Google WebAuth LLM", () => {
test("platform", async () => {
const model = new GoogleLLM();
expect(model.platform).toEqual("gcp");
@@ -139,7 +139,7 @@ describe("Google WebAuth LLM", () => {
test("predictMessage image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -164,7 +164,7 @@ describe("Google WebAuth LLM", () => {
test("invoke image", async () => {
const model = new GoogleLLM({
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -188,7 +188,7 @@ describe("Google WebAuth LLM", () => {
});
});
-describe("Google WebAuth gai LLM", () => {
+describe.skip("Google WebAuth gai LLM", () => {
test("platform", async () => {
const model = new GoogleLLM({
platformType: "gai",
@@ -243,7 +243,7 @@ describe("Google WebAuth gai LLM", () => {
test("predictMessage image", async () => {
const model = new GoogleLLM({
platformType: "gai",
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
@@ -269,7 +269,7 @@ describe("Google WebAuth gai LLM", () => {
test("invoke image", async () => {
const model = new GoogleLLM({
platformType: "gai",
- model: "gemini-pro-vision",
+ modelName: "gemini-pro-vision",
});
const message: MessageContentComplex[] = [
{
diff --git a/libs/langchain-google-webauth/src/types.ts b/libs/langchain-google-webauth/src/types.ts
new file mode 100644
index 000000000000..01116e7f338e
--- /dev/null
+++ b/libs/langchain-google-webauth/src/types.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-common/types";
diff --git a/libs/langchain-google-webauth/src/utils.ts b/libs/langchain-google-webauth/src/utils.ts
new file mode 100644
index 000000000000..f21efb45914c
--- /dev/null
+++ b/libs/langchain-google-webauth/src/utils.ts
@@ -0,0 +1 @@
+export * from "@langchain/google-common/utils";
diff --git a/libs/langchain-groq/package.json b/libs/langchain-groq/package.json
index 33a642c14cdf..1175197931ab 100644
--- a/libs/langchain-groq/package.json
+++ b/libs/langchain-groq/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/groq",
- "version": "0.0.4",
+ "version": "0.0.5",
"description": "Groq integration for LangChain.js",
"type": "module",
"engines": {
diff --git a/libs/langchain-groq/src/chat_models.ts b/libs/langchain-groq/src/chat_models.ts
index e4522a0ab9ab..5c65f088498e 100644
--- a/libs/langchain-groq/src/chat_models.ts
+++ b/libs/langchain-groq/src/chat_models.ts
@@ -30,7 +30,9 @@ import {
ChatCompletionCreateParamsStreaming,
} from "groq-sdk/resources/chat/completions";
-export interface ChatGroqCallOptions extends BaseChatModelCallOptions {}
+export interface ChatGroqCallOptions extends BaseChatModelCallOptions {
+ headers?: Record;
+}
export interface ChatGroqInput extends BaseChatModelParams {
/**
@@ -246,7 +248,10 @@ export class ChatGroq extends BaseChatModel {
messages: messagesMapped,
stream: true,
},
- params
+ {
+ signal: options?.signal,
+ headers: options?.headers,
+ }
);
for await (const data of response) {
const choice = data?.choices[0];
@@ -303,6 +308,7 @@ export class ChatGroq extends BaseChatModel {
},
{
signal: options?.signal,
+ headers: options?.headers,
}
);
diff --git a/libs/langchain-groq/src/tests/chat_models.int.test.ts b/libs/langchain-groq/src/tests/chat_models.int.test.ts
index ed289b50b719..d5deb8dfaa29 100644
--- a/libs/langchain-groq/src/tests/chat_models.int.test.ts
+++ b/libs/langchain-groq/src/tests/chat_models.int.test.ts
@@ -24,6 +24,30 @@ describe("ChatGroq", () => {
expect((res.content as string).toLowerCase()).not.toContain("six");
});
+ test("invoke should respect passed headers", async () => {
+ const chat = new ChatGroq({
+ maxRetries: 0,
+ });
+ const message = new HumanMessage("Count to ten.");
+ await expect(async () => {
+ await chat.invoke([message], {
+ headers: { Authorization: "badbadbad" },
+ });
+ }).rejects.toThrowError();
+ });
+
+ test("stream should respect passed headers", async () => {
+ const chat = new ChatGroq({
+ maxRetries: 0,
+ });
+ const message = new HumanMessage("Count to ten.");
+ await expect(async () => {
+ await chat.stream([message], {
+ headers: { Authorization: "badbadbad" },
+ });
+ }).rejects.toThrowError();
+ });
+
test("generate", async () => {
const chat = new ChatGroq();
const message = new HumanMessage("Hello!");
diff --git a/libs/langchain-openai/package.json b/libs/langchain-openai/package.json
index 8300d5983960..268e93678c47 100644
--- a/libs/langchain-openai/package.json
+++ b/libs/langchain-openai/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/openai",
- "version": "0.0.23",
+ "version": "0.0.25",
"description": "OpenAI integrations for LangChain.js",
"type": "module",
"engines": {
diff --git a/libs/langchain-openai/src/chat_models.ts b/libs/langchain-openai/src/chat_models.ts
index 6a433490001b..f44f33166065 100644
--- a/libs/langchain-openai/src/chat_models.ts
+++ b/libs/langchain-openai/src/chat_models.ts
@@ -1001,6 +1001,7 @@ export class ChatOpenAI<
openAIFunctionDefinition = schema as FunctionDefinition;
functionName = schema.name;
} else {
+ functionName = schema.title ?? functionName;
openAIFunctionDefinition = {
name: functionName,
description: schema.description ?? "",
diff --git a/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts b/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts
index 79d41b67a08b..7b2c4961bf59 100644
--- a/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts
+++ b/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts
@@ -172,6 +172,46 @@ Respond with a JSON object containing three keys:
expect("number2" in result).toBe(true);
});
+test("withStructuredOutput JSON schema", async () => {
+ const model = new ChatOpenAI({
+ temperature: 0,
+ modelName: "gpt-4-turbo-preview",
+ });
+
+ const jsonSchema = {
+ title: "calculator",
+ description: "A simple calculator",
+ type: "object",
+ properties: {
+ operation: {
+ type: "string",
+ enum: ["add", "subtract", "multiply", "divide"],
+ },
+ number1: { type: "number" },
+ number2: { type: "number" },
+ },
+ };
+ const modelWithStructuredOutput = model.withStructuredOutput(jsonSchema);
+
+ const prompt = ChatPromptTemplate.fromMessages([
+ "system",
+ `You are VERY bad at math and must always use a calculator.
+Respond with a JSON object containing three keys:
+'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
+'number1': the first number to operate on,
+'number2': the second number to operate on.
+`,
+ "human",
+ "Please help me!! What is 2 + 2?",
+ ]);
+ const chain = prompt.pipe(modelWithStructuredOutput);
+ const result = await chain.invoke({});
+ console.log(result);
+ expect("operation" in result).toBe(true);
+ expect("number1" in result).toBe(true);
+ expect("number2" in result).toBe(true);
+});
+
test("withStructuredOutput includeRaw true", async () => {
const model = new ChatOpenAI({
temperature: 0,
diff --git a/libs/langchain-pinecone/package.json b/libs/langchain-pinecone/package.json
index 82ad521c2464..063b30cf54d0 100644
--- a/libs/langchain-pinecone/package.json
+++ b/libs/langchain-pinecone/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/pinecone",
- "version": "0.0.3",
+ "version": "0.0.4",
"description": "LangChain integration for Pinecone's vector database",
"type": "module",
"engines": {
@@ -39,7 +39,7 @@
"license": "MIT",
"dependencies": {
"@langchain/core": "~0.1",
- "@pinecone-database/pinecone": "^2.0.0",
+ "@pinecone-database/pinecone": "^2.2.0",
"flat": "^5.0.2",
"uuid": "^9.0.0"
},
diff --git a/libs/langchain-pinecone/src/tests/vectorstores.int.test.ts b/libs/langchain-pinecone/src/tests/vectorstores.int.test.ts
index 53b3d16fd475..e9b5a5a9758d 100644
--- a/libs/langchain-pinecone/src/tests/vectorstores.int.test.ts
+++ b/libs/langchain-pinecone/src/tests/vectorstores.int.test.ts
@@ -210,4 +210,40 @@ describe.skip("PineconeStore", () => {
expect(results.length).toEqual(1);
expect(results[0].metadata.foo).toBe(id1);
});
+
+ test("auto instantiated pinecone index class", async () => {
+ const documentId = uuid.v4();
+ const pageContent = faker.lorem.sentence(5);
+ const embeddings = new SyntheticEmbeddings({
+ vectorSize: 1536,
+ });
+
+ const store = new PineconeStore(embeddings, {
+ pineconeConfig: {
+ indexName: testIndexName,
+ config: {
+ apiKey: process.env.PINECONE_API_KEY!,
+ },
+ },
+ });
+
+ await store.addDocuments([{ pageContent, metadata: {} }], [documentId]);
+ await sleep(35000);
+
+ const results = await store.similaritySearch(pageContent, 1);
+
+ expect(results).toEqual([new Document({ metadata: {}, pageContent })]);
+
+ await store.addDocuments(
+ [{ pageContent: `${pageContent} upserted`, metadata: {} }],
+ [documentId]
+ );
+ await sleep(35000);
+
+ const results2 = await store.similaritySearch(pageContent, 1);
+
+ expect(results2).toEqual([
+ new Document({ metadata: {}, pageContent: `${pageContent} upserted` }),
+ ]);
+ });
});
diff --git a/libs/langchain-pinecone/src/tests/vectorstores.test.ts b/libs/langchain-pinecone/src/tests/vectorstores.test.ts
index d47be4e5b093..c83d0efe3af5 100644
--- a/libs/langchain-pinecone/src/tests/vectorstores.test.ts
+++ b/libs/langchain-pinecone/src/tests/vectorstores.test.ts
@@ -118,3 +118,50 @@ test("PineconeStore with string arrays", async () => {
},
]);
});
+
+test("PineconeStore can instantiate without passing in client", async () => {
+ const embeddings = new FakeEmbeddings();
+
+ const store = new PineconeStore(embeddings, {
+ pineconeConfig: {
+ indexName: "indexName",
+ config: {
+ apiKey: "apiKey",
+ },
+ },
+ });
+
+ expect(store.pineconeIndex).toBeDefined();
+});
+
+test("PineconeStore throws when no config or index is passed", async () => {
+ const embeddings = new FakeEmbeddings();
+
+ expect(() => new PineconeStore(embeddings, {})).toThrow();
+});
+
+test("PineconeStore throws when config and index is passed", async () => {
+ const upsert = jest.fn();
+ const client = {
+ namespace: jest.fn().mockReturnValue({
+ upsert,
+ query: jest.fn().mockResolvedValue({
+ matches: [],
+ }),
+ }),
+ };
+ const embeddings = new FakeEmbeddings();
+
+ expect(
+ () =>
+ new PineconeStore(embeddings, {
+ pineconeIndex: client as any,
+ pineconeConfig: {
+ indexName: "indexName",
+ config: {
+ apiKey: "apiKey",
+ },
+ },
+ })
+ ).toThrow();
+});
diff --git a/libs/langchain-pinecone/src/vectorstores.ts b/libs/langchain-pinecone/src/vectorstores.ts
index 9648a9a75276..b9e3be201326 100644
--- a/libs/langchain-pinecone/src/vectorstores.ts
+++ b/libs/langchain-pinecone/src/vectorstores.ts
@@ -23,14 +23,33 @@ import { maximalMarginalRelevance } from "@langchain/core/utils/math";
// eslint-disable-next-line @typescript-eslint/ban-types, @typescript-eslint/no-explicit-any
type PineconeMetadata = Record;
+type HTTPHeaders = {
+ [key: string]: string;
+};
+
/**
* Database config for your vectorstore.
*/
export interface PineconeStoreParams extends AsyncCallerParams {
- pineconeIndex: PineconeIndex;
+ /**
+ * The Pinecone index to use.
+ * Either this or pineconeConfig must be provided.
+ */
+ pineconeIndex?: PineconeIndex;
textKey?: string;
namespace?: string;
filter?: PineconeMetadata;
+ /**
+ * Configuration for the Pinecone index.
+ * Either this or pineconeIndex must be provided.
+ */
+ pineconeConfig?: {
+ indexName: ConstructorParameters[0];
+ config: ConstructorParameters[1];
+ namespace?: string;
+ indexHostUrl?: string;
+ additionalHeaders?: HTTPHeaders;
+ };
}
/**
@@ -69,10 +88,39 @@ export class PineconeStore extends VectorStore {
super(embeddings, params);
this.embeddings = embeddings;
- const { namespace, pineconeIndex, textKey, filter, ...asyncCallerArgs } =
- params;
+ const {
+ namespace,
+ pineconeIndex,
+ textKey,
+ filter,
+ pineconeConfig,
+ ...asyncCallerArgs
+ } = params;
this.namespace = namespace;
- this.pineconeIndex = pineconeIndex;
+ if (!pineconeIndex && !pineconeConfig) {
+ throw new Error("pineconeConfig or pineconeIndex must be provided.");
+ }
+ if (pineconeIndex && pineconeConfig) {
+ throw new Error(
+ "Only one of pineconeConfig or pineconeIndex can be provided."
+ );
+ }
+
+ if (pineconeIndex) {
+ this.pineconeIndex = pineconeIndex;
+ } else if (pineconeConfig) {
+ this.pineconeIndex = new PineconeIndex(
+ pineconeConfig.indexName,
+ {
+ ...pineconeConfig.config,
+ sourceTag: "langchainjs",
+ },
+ pineconeConfig.namespace,
+ pineconeConfig.indexHostUrl,
+ pineconeConfig.additionalHeaders
+ );
+ }
+
this.textKey = textKey ?? "text";
this.filter = filter;
this.caller = new AsyncCaller(asyncCallerArgs);
diff --git a/yarn.lock b/yarn.lock
index f402d6d6237d..3b510373fe80 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -9037,7 +9037,7 @@ __metadata:
google-auth-library: ^8.9.0
googleapis: ^126.0.1
graphql: ^16.6.0
- hnswlib-node: ^1.4.2
+ hnswlib-node: ^3.0.0
html-to-text: ^9.0.5
interface-datastore: ^8.2.11
ioredis: ^5.3.2
@@ -9420,7 +9420,7 @@ __metadata:
languageName: unknown
linkType: soft
-"@langchain/google-common@workspace:libs/langchain-google-common, @langchain/google-common@~0.0.0":
+"@langchain/google-common@workspace:*, @langchain/google-common@workspace:libs/langchain-google-common, @langchain/google-common@~0.0.2":
version: 0.0.0-use.local
resolution: "@langchain/google-common@workspace:libs/langchain-google-common"
dependencies:
@@ -9447,16 +9447,18 @@ __metadata:
rollup: ^4.5.2
ts-jest: ^29.1.0
typescript: <5.2.0
+ zod: ^3.22.4
+ zod-to-json-schema: ^3.22.4
languageName: unknown
linkType: soft
-"@langchain/google-gauth@workspace:libs/langchain-google-gauth":
+"@langchain/google-gauth@workspace:libs/langchain-google-gauth, @langchain/google-gauth@~0.0.1":
version: 0.0.0-use.local
resolution: "@langchain/google-gauth@workspace:libs/langchain-google-gauth"
dependencies:
"@jest/globals": ^29.5.0
"@langchain/core": ~0.1.1
- "@langchain/google-common": ~0.0.0
+ "@langchain/google-common": ~0.0.2
"@langchain/scripts": ~0.0
"@swc/core": ^1.3.90
"@swc/jest": ^0.2.29
@@ -9514,13 +9516,75 @@ __metadata:
languageName: unknown
linkType: soft
-"@langchain/google-webauth@workspace:libs/langchain-google-webauth":
+"@langchain/google-vertexai-web@workspace:*, @langchain/google-vertexai-web@workspace:libs/langchain-google-vertexai-web":
+ version: 0.0.0-use.local
+ resolution: "@langchain/google-vertexai-web@workspace:libs/langchain-google-vertexai-web"
+ dependencies:
+ "@jest/globals": ^29.5.0
+ "@langchain/core": ~0.1.1
+ "@langchain/google-webauth": ~0.0.1
+ "@langchain/scripts": ~0.0
+ "@swc/core": ^1.3.90
+ "@swc/jest": ^0.2.29
+ "@tsconfig/recommended": ^1.0.3
+ "@typescript-eslint/eslint-plugin": ^6.12.0
+ "@typescript-eslint/parser": ^6.12.0
+ dotenv: ^16.3.1
+ dpdm: ^3.12.0
+ eslint: ^8.33.0
+ eslint-config-airbnb-base: ^15.0.0
+ eslint-config-prettier: ^8.6.0
+ eslint-plugin-import: ^2.27.5
+ eslint-plugin-no-instanceof: ^1.0.1
+ eslint-plugin-prettier: ^4.2.1
+ jest: ^29.5.0
+ jest-environment-node: ^29.6.4
+ prettier: ^2.8.3
+ release-it: ^15.10.1
+ rollup: ^4.5.2
+ ts-jest: ^29.1.0
+ typescript: <5.2.0
+ languageName: unknown
+ linkType: soft
+
+"@langchain/google-vertexai@workspace:*, @langchain/google-vertexai@workspace:libs/langchain-google-vertexai":
+ version: 0.0.0-use.local
+ resolution: "@langchain/google-vertexai@workspace:libs/langchain-google-vertexai"
+ dependencies:
+ "@jest/globals": ^29.5.0
+ "@langchain/core": ~0.1.1
+ "@langchain/google-gauth": ~0.0.1
+ "@langchain/scripts": ~0.0
+ "@swc/core": ^1.3.90
+ "@swc/jest": ^0.2.29
+ "@tsconfig/recommended": ^1.0.3
+ "@typescript-eslint/eslint-plugin": ^6.12.0
+ "@typescript-eslint/parser": ^6.12.0
+ dotenv: ^16.3.1
+ dpdm: ^3.12.0
+ eslint: ^8.33.0
+ eslint-config-airbnb-base: ^15.0.0
+ eslint-config-prettier: ^8.6.0
+ eslint-plugin-import: ^2.27.5
+ eslint-plugin-no-instanceof: ^1.0.1
+ eslint-plugin-prettier: ^4.2.1
+ jest: ^29.5.0
+ jest-environment-node: ^29.6.4
+ prettier: ^2.8.3
+ release-it: ^15.10.1
+ rollup: ^4.5.2
+ ts-jest: ^29.1.0
+ typescript: <5.2.0
+ languageName: unknown
+ linkType: soft
+
+"@langchain/google-webauth@workspace:libs/langchain-google-webauth, @langchain/google-webauth@~0.0.1":
version: 0.0.0-use.local
resolution: "@langchain/google-webauth@workspace:libs/langchain-google-webauth"
dependencies:
"@jest/globals": ^29.5.0
"@langchain/core": ~0.1.1
- "@langchain/google-common": ~0.0.0
+ "@langchain/google-common": ~0.0.2
"@langchain/scripts": ~0.0
"@swc/core": ^1.3.90
"@swc/jest": ^0.2.29
@@ -9718,7 +9782,7 @@ __metadata:
"@jest/globals": ^29.5.0
"@langchain/core": ~0.1
"@langchain/scripts": ~0.0
- "@pinecone-database/pinecone": ^2.0.0
+ "@pinecone-database/pinecone": ^2.2.0
"@swc/core": ^1.3.90
"@swc/jest": ^0.2.29
"@tsconfig/recommended": ^1.0.3
@@ -10683,15 +10747,15 @@ __metadata:
languageName: node
linkType: hard
-"@pinecone-database/pinecone@npm:^2.0.0":
- version: 2.0.1
- resolution: "@pinecone-database/pinecone@npm:2.0.1"
+"@pinecone-database/pinecone@npm:^2.2.0":
+ version: 2.2.0
+ resolution: "@pinecone-database/pinecone@npm:2.2.0"
dependencies:
"@sinclair/typebox": ^0.29.0
ajv: ^8.12.0
cross-fetch: ^3.1.5
encoding: ^0.1.13
- checksum: 43ece04cd66a597281a92d189561bd8eed133a541c32c9d2f8a7c10b2b6c899eeaaf3c7a14387ba62e05f0fa5c441bd3588a9a6c36dddd265699587a9eea8cd1
+ checksum: c1844eaa716746a3895871499cbf3fa6ecf20deb0b236999d28285cfb9b438fb053fb41a4c9c17f96b28f0dab9c09be59308476a1534d4a8f35518b761a1a148
languageName: node
linkType: hard
@@ -21567,7 +21631,10 @@ __metadata:
"@langchain/community": "workspace:*"
"@langchain/core": "workspace:*"
"@langchain/exa": "workspace:*"
+ "@langchain/google-common": "workspace:*"
"@langchain/google-genai": "workspace:*"
+ "@langchain/google-vertexai": "workspace:*"
+ "@langchain/google-vertexai-web": "workspace:*"
"@langchain/groq": "workspace:*"
"@langchain/mistralai": "workspace:*"
"@langchain/mongodb": "workspace:*"
@@ -21579,7 +21646,7 @@ __metadata:
"@langchain/weaviate": "workspace:*"
"@langchain/yandex": "workspace:*"
"@opensearch-project/opensearch": ^2.2.0
- "@pinecone-database/pinecone": ^2.0.0
+ "@pinecone-database/pinecone": ^2.2.0
"@planetscale/database": ^1.8.0
"@prisma/client": ^4.11.0
"@raycast/api": ^1.55.2
@@ -23719,6 +23786,17 @@ __metadata:
languageName: node
linkType: hard
+"hnswlib-node@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "hnswlib-node@npm:3.0.0"
+ dependencies:
+ bindings: ^1.5.0
+ node-addon-api: ^8.0.0
+ node-gyp: latest
+ checksum: 539a12581cae4efc99d0bfb36a6413a17021b68601c2ea47628ac40fd431b6fc01a259392e6fd997f2ca47b18194fd6985017a8714c99c390880e53e5f8dc622
+ languageName: node
+ linkType: hard
+
"hoist-non-react-statics@npm:^3.1.0":
version: 3.3.2
resolution: "hoist-non-react-statics@npm:3.3.2"
@@ -28475,6 +28553,15 @@ __metadata:
languageName: node
linkType: hard
+"node-addon-api@npm:^8.0.0":
+ version: 8.0.0
+ resolution: "node-addon-api@npm:8.0.0"
+ dependencies:
+ node-gyp: latest
+ checksum: 4996f919b40125b435beff2744a43d846e649421f97321c58a7e205c125514b2bb0f5b299291876fdbcecb47ecf06e507e9f59d2848b6e495abf99fe585e8a47
+ languageName: node
+ linkType: hard
+
"node-api-headers@npm:^0.0.2":
version: 0.0.2
resolution: "node-api-headers@npm:0.0.2"