Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Google AI Gemini Spring Boot Starter #92

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 67 additions & 0 deletions langchain4j-google-ai-gemini-spring-boot-starter/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<parent>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-spring</artifactId>
<version>0.37.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>

<artifactId>langchain4j-google-ai-gemini-spring-boot-starter</artifactId>
<name>LangChain4j Spring Boot starter for Google AI Gemini</name>

<dependencies>

<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-google-ai-gemini</artifactId>
<version>${project.version}</version>
</dependency>

<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>

<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure-processor</artifactId>
<optional>true</optional>
</dependency>

<!-- should be listed before spring-boot-configuration-processor -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>

<!-- needed to generate automatic metadata about available config properties -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>

<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>

</dependencies>

<licenses>
<license>
<name>Apache-2.0</name>
<url>https://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
<comments>A business-friendly OSS license</comments>
</license>
</licenses>

</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
package dev.langchain4j.googleaigemini.spring;

import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiTokenizer;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;

import static dev.langchain4j.googleaigemini.spring.Properties.PREFIX;

@AutoConfiguration
@EnableConfigurationProperties(Properties.class)
public class AutoConfig {

/**
* Creates a bean for the {@link GoogleAiGeminiChatModel}.
* <p>
* This method configures and initializes a chat model using the provided properties.
* The bean is only created if the property {@code langchain4j.google-ai-gemini.chat-model.api-key} is defined.
* </p>
*
* @param properties the configuration properties containing the chat model settings
* @return a configured instance of {@link GoogleAiGeminiChatModel}
*/
@Bean
@ConditionalOnProperty(name = PREFIX + ".chat-model.api-key")
GoogleAiGeminiChatModel googleAiGeminiChatModel(Properties properties) {
ChatModelProperties chatModelProperties = properties.getChatModel();
return GoogleAiGeminiChatModel.builder()
.apiKey(chatModelProperties.apiKey())
.modelName(chatModelProperties.modelName())
.temperature(chatModelProperties.temperature())
.maxOutputTokens(chatModelProperties.maxOutputTokens())
.topK(chatModelProperties.topK())
.topP(chatModelProperties.topP())
.maxRetries(chatModelProperties.maxRetries())
.logRequestsAndResponses(chatModelProperties.logRequestsAndResponses())
.allowCodeExecution(chatModelProperties.allowCodeExecution())
.includeCodeExecutionOutput(chatModelProperties.includeCodeExecutionOutput())
.timeout(chatModelProperties.timeout())
.build();
}

/**
* Creates a bean for the {@link GoogleAiGeminiStreamingChatModel}.
* <p>
* This method configures and initializes a streaming chat model using the provided properties.
* The bean is only created if the property {@code langchain4j.google-ai-gemini.streaming-chat-model.api-key} is defined.
* </p>
*
* @param properties the configuration properties containing the streaming chat model settings
* @return a configured instance of {@link GoogleAiGeminiStreamingChatModel}
*/
@Bean
@ConditionalOnProperty(name = PREFIX + ".streaming-chat-model.api-key")
GoogleAiGeminiStreamingChatModel googleAiGeminiStreamingChatModel(Properties properties) {
ChatModelProperties streamingChatModelProperties = properties.getStreamingChatModel();
return GoogleAiGeminiStreamingChatModel.builder()
.apiKey(streamingChatModelProperties.apiKey())
.modelName(streamingChatModelProperties.modelName())
.temperature(streamingChatModelProperties.temperature())
.maxOutputTokens(streamingChatModelProperties.maxOutputTokens())
.topK(streamingChatModelProperties.topK())
.topP(streamingChatModelProperties.topP())
.maxRetries(streamingChatModelProperties.maxRetries())
.logRequestsAndResponses(streamingChatModelProperties.logRequestsAndResponses())
.allowCodeExecution(streamingChatModelProperties.allowCodeExecution())
.includeCodeExecutionOutput(streamingChatModelProperties.includeCodeExecutionOutput())
.timeout(streamingChatModelProperties.timeout())
.build();
}

/**
* Creates a bean for the {@link GoogleAiGeminiTokenizer}.
* <p>
* This method configures and initializes a tokenizer using the provided properties.
* The bean is only created if the property {@code langchain4j.google-ai-gemini.tokenizer.api-key} is defined.
* </p>
*
* @param properties the configuration properties containing the tokenizer settings
* @return a configured instance of {@link GoogleAiGeminiTokenizer}
*/
@Bean
@ConditionalOnProperty(name = PREFIX + ".tokenizer.api-key")
GoogleAiGeminiTokenizer googleAiGeminiTokenizer(Properties properties) {
TokenizerProperties tokenizerProperties = properties.getTokenizer();
return GoogleAiGeminiTokenizer.builder()
.apiKey(tokenizerProperties.apiKey())
.modelName(tokenizerProperties.modelName())
.maxRetries(tokenizerProperties.maxRetries())
.logRequestsAndResponses(tokenizerProperties.logRequestsAndResponses())
.timeout(tokenizerProperties.timeout())
.build();
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package dev.langchain4j.googleaigemini.spring;

import java.time.Duration;

/**
* Configuration properties for the Google AI Gemini Chat Model.
* <p>
* This class defines the necessary properties for configuring
* and using the chat model.
* </p>
*
* @param apiKey The API key for authenticating requests to the Google AI Gemini service.
* @param modelName The name of the model to use.
* @param temperature The temperature setting to control response randomness.
* @param maxOutputTokens The maximum number of tokens to include in the model's output.
* @param topK The top-K sampling parameter to refine the response.
* @param topP The top-P (nucleus sampling) parameter for controlling diversity.
* @param maxRetries The maximum number of retries for failed requests.
* @param timeout The timeout duration for chat model requests.
* @param logRequestsAndResponses Flag to enable or disable logging of requests and responses.
* @param allowCodeExecution Flag to allow or disallow the execution of code.
* @param includeCodeExecutionOutput Flag to include or exclude code execution output in the response.
*/
record ChatModelProperties(
String apiKey,
String modelName,
Double temperature,
Integer maxOutputTokens,
Integer topK,
Double topP,
Integer maxRetries,
Duration timeout,
boolean logRequestsAndResponses,
boolean allowCodeExecution,
boolean includeCodeExecutionOutput
) {
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
package dev.langchain4j.googleaigemini.spring;

import lombok.Getter;
import lombok.Setter;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.NestedConfigurationProperty;

@Getter
@Setter
@ConfigurationProperties(prefix = Properties.PREFIX)
public class Properties {

static final String PREFIX = "langchain4j.google-ai-gemini";

@NestedConfigurationProperty
ChatModelProperties chatModel;

@NestedConfigurationProperty
ChatModelProperties streamingChatModel;

@NestedConfigurationProperty
TokenizerProperties tokenizer;

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package dev.langchain4j.googleaigemini.spring;

import java.time.Duration;

/**
* Configuration properties for the Google AI Gemini Tokenizer.
* <p>
* This class defines the necessary properties for configuring
* and using the tokenizer.
* </p>
*
* @param apiKey The API key for authenticating requests to the Google AI Gemini service.
* @param modelName The name of the model to use.
* @param maxRetries The maximum number of retries for failed requests.
* @param logRequestsAndResponses Flag to enable or disable logging of requests and responses.
* @param timeout The timeout duration for tokenizer requests.
*/
record TokenizerProperties(
String apiKey,
String modelName,
Integer maxRetries,
boolean logRequestsAndResponses,
Duration timeout
) {
}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
dev.langchain4j.googleaigemini.spring.AutoConfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package dev.langchain4j.googleaigemini.spring;

import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiTokenizer;
import dev.langchain4j.model.output.Response;
import org.junit.jupiter.api.Test;
import org.springframework.boot.autoconfigure.AutoConfigurations;
import org.springframework.boot.test.context.runner.ApplicationContextRunner;

import java.util.concurrent.CompletableFuture;

import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;

class AutoConfigIT {

private static final String API_KEY = System.getenv("GOOGLE_API_KEY");
private static final String MODEL = "gemini-1.5-flash";

private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AutoConfig.class));

@Test
void should_provide_chat_model() {
contextRunner
.withPropertyValues(
"langchain4j.google-ai-gemini.chat-model.api-key="+API_KEY,
"langchain4j.google-ai-gemini.chat-model.model-name="+ MODEL,
"langchain4j.google-ai-gemini.chat-model.max-output-tokens=20"
)
.run(context -> {
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
assertThat(chatLanguageModel).isInstanceOf(GoogleAiGeminiChatModel.class);
assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin");
assertThat(context.getBean(GoogleAiGeminiChatModel.class)).isSameAs(chatLanguageModel);
});
}

@Test
void should_provide_streaming_chat_model() {
contextRunner
.withPropertyValues(
"langchain4j.google-ai-gemini.streaming-chat-model.api-key=" + API_KEY,
"langchain4j.google-ai-gemini.streaming-chat-model.model-name="+ MODEL,
"langchain4j.google-ai-gemini.streaming-chat-model.max-tokens=20"
)
.run(context -> {

StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class);
assertThat(streamingChatLanguageModel).isInstanceOf(GoogleAiGeminiStreamingChatModel.class);
CompletableFuture<Response<AiMessage>> future = new CompletableFuture<>();
streamingChatLanguageModel.generate("What is the capital of Germany?", new StreamingResponseHandler<>() {

@Override
public void onNext(String token) {
}

@Override
public void onComplete(Response<AiMessage> response) {
future.complete(response);
}

@Override
public void onError(Throwable error) {
}
});
Response<AiMessage> response = future.get(60, SECONDS);
assertThat(response.content().text()).contains("Berlin");

assertThat(context.getBean(GoogleAiGeminiStreamingChatModel.class)).isSameAs(streamingChatLanguageModel);
});
}

@Test
void should_provide_tokenizer() {
contextRunner
.withPropertyValues(
"langchain4j.google-ai-gemini.tokenizer.api-key=" + API_KEY,
"langchain4j.google-ai-gemini.tokenizer.model-name=" + MODEL,
"langchain4j.google-ai-gemini.language-model.max-retries=3"
)
.run(context -> {

Tokenizer tokenizer = context.getBean(Tokenizer.class);
assertThat(tokenizer).isInstanceOf(GoogleAiGeminiTokenizer.class);
assertThat(tokenizer.estimateTokenCountInText("What is the capital of Germany?")).isPositive();

assertThat(context.getBean(GoogleAiGeminiTokenizer.class)).isSameAs(tokenizer);
});
}

@Test
void should_not_create_bean_when_no_api_key() {
contextRunner
.run(context -> {
assertThat(context).doesNotHaveBean(GoogleAiGeminiChatModel.class);
assertThat(context).doesNotHaveBean(GoogleAiGeminiStreamingChatModel.class);
assertThat(context).doesNotHaveBean(GoogleAiGeminiTokenizer.class);
});
}

}
1 change: 1 addition & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
<module>langchain4j-azure-open-ai-spring-boot-starter</module>
<module>langchain4j-voyage-ai-spring-boot-starter</module>
<module>langchain4j-github-models-spring-boot-starter</module>
<module>langchain4j-google-ai-gemini-spring-boot-starter</module>

<module>langchain4j-vertex-ai-gemini-spring-boot-starter</module>
<module>langchain4j-elasticsearch-spring-boot-starter</module>
Expand Down