diff --git a/CHANGELOG.md b/CHANGELOG.md
index a96e9d1a..6cf01663 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,20 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [6.1.2] - 2024-10-07
+
+### Fixed
+
+- Cleared context state credential and updated the page history after logout
+
+### Changed
+
+- Added [Anthropic Claude 3.5 Sonnet](https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html) as an additional option to the list LLM models provided through cloudformation parameters `LLMBedrockModelId` and `BedrockKnowledgeBaseModel`
+
+### Deprecated
+
+- Sagemaker support has been deprecated and will be removed in the next release
+
## [6.1.1] - 2024-09-26
### Fixed
diff --git a/README.md b/README.md
index 3cba9168..d67f2d09 100644
--- a/README.md
+++ b/README.md
@@ -336,6 +336,7 @@ As QnABot evolves over the years, it makes use of various services and functiona
_Note: **Deployable solution versions** refers to the ability to deploy the version of QnABot in their AWS accounts. **Actively supported versions** for QnABot is only available for the latest version of QnABot._
### Deployable Versions
+- [v6.1.2](https://github.com/aws-solutions/qnabot-on-aws/releases/tag/v6.1.2) - [Public](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.2/qnabot-on-aws-main.template)/[VPC](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.2/qnabot-on-aws-vpc.template)
- [v6.1.1](https://github.com/aws-solutions/qnabot-on-aws/releases/tag/v6.1.1) - [Public](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.1/qnabot-on-aws-main.template)/[VPC](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.1/qnabot-on-aws-vpc.template)
- [v6.1.0](https://github.com/aws-solutions/qnabot-on-aws/releases/tag/v6.1.0) - [Public](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.0/qnabot-on-aws-main.template)/[VPC](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.1.0/qnabot-on-aws-vpc.template)
- [v6.0.3](https://github.com/aws-solutions/qnabot-on-aws/releases/tag/v6.0.3) - [Public](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.0.3/qnabot-on-aws-main.template)/[VPC](https://solutions-reference.s3.amazonaws.com/qnabot-on-aws/v6.0.3/qnabot-on-aws-vpc.template)
diff --git a/source/cli/aws_solutions/qnabot/cli/qnabot_cli.py b/source/cli/aws_solutions/qnabot/cli/qnabot_cli.py
index 4489e367..b0bf5d2d 100644
--- a/source/cli/aws_solutions/qnabot/cli/qnabot_cli.py
+++ b/source/cli/aws_solutions/qnabot/cli/qnabot_cli.py
@@ -23,7 +23,7 @@
@click.pass_context
def cli(ctx) -> None:
os.environ["SOLUTION_ID"] = "SO0189"
- os.environ["SOLUTION_VERSION"] = "v6.1.1"
+ os.environ["SOLUTION_VERSION"] = "v6.1.2"
@cli.command("import")
diff --git a/source/docs/LLM_Retrieval_and_generative_question_answering/README.md b/source/docs/LLM_Retrieval_and_generative_question_answering/README.md
index ad10987f..deb8bf00 100644
--- a/source/docs/LLM_Retrieval_and_generative_question_answering/README.md
+++ b/source/docs/LLM_Retrieval_and_generative_question_answering/README.md
@@ -71,6 +71,7 @@ Utilizes one of the Amazon Bedrock foundation models to generate text. Currently
- [Anthropic Claude Instant 1.2](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-instant-v1)
- [Anthropic Claude 2.1](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-v2:1)
- [Anthropic Claude 3 Sonnet](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-sonnet-20240229-v1:0)
+- [Anthropic Claude 3.5 Sonnet](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-5-sonnet-20240620-v1:0)
- [Anthropic Claude 3 Haiku](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-haiku-20240307-v1:0)
- [AI21 Jurassic-2 Ultra](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=ai21.j2-ultra-v1)
- [AI21 Jurassic-2 Mid](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=ai21.j2-mid-v1)
diff --git a/source/docs/bedrock_knowledgebase_rag/README.md b/source/docs/bedrock_knowledgebase_rag/README.md
index b052e3af..e2081b83 100644
--- a/source/docs/bedrock_knowledgebase_rag/README.md
+++ b/source/docs/bedrock_knowledgebase_rag/README.md
@@ -17,8 +17,9 @@ With this integration, QnABot on AWS can answer a question and its follow-up fro
- [Titan Text G1 - Premier](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=amazon.titan-text-premier-v1:0)
- [Anthropic Claude Instant 1.2](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-instant-v1)
- [Anthropic Claude 2.1](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-v2:1)
-- [Anthropic Claude 3 Sonnet](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-sonnet-20240229-v1:0)
- [Anthropic Claude 3 Haiku](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-haiku-20240307-v1:0)
+- [Anthropic Claude 3 Sonnet](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-sonnet-20240229-v1:0)
+- [Anthropic Claude 3.5 Sonnet](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=anthropic.claude-3-5-sonnet-20240620-v1:0)
## Amazon Bedrock Knowledge Base Quick Setup:
diff --git a/source/lambda/aws-sdk-layer/package-lock.json b/source/lambda/aws-sdk-layer/package-lock.json
index e1b47c62..5aaf94cb 100644
--- a/source/lambda/aws-sdk-layer/package-lock.json
+++ b/source/lambda/aws-sdk-layer/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "aws-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "aws-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-comprehend": "^3.621.0",
diff --git a/source/lambda/aws-sdk-layer/package.json b/source/lambda/aws-sdk-layer/package.json
index e36be9e6..f3abbc00 100644
--- a/source/lambda/aws-sdk-layer/package.json
+++ b/source/lambda/aws-sdk-layer/package.json
@@ -1,6 +1,6 @@
{
"name": "aws-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda aws-sdk-layer",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/cfn-lambda-layer/package-lock.json b/source/lambda/cfn-lambda-layer/package-lock.json
index 1f757c8c..2a90dabb 100644
--- a/source/lambda/cfn-lambda-layer/package-lock.json
+++ b/source/lambda/cfn-lambda-layer/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "cfn-lambda-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "cfn-lambda-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"cfn-lambda": "^5.1.0"
diff --git a/source/lambda/cfn-lambda-layer/package.json b/source/lambda/cfn-lambda-layer/package.json
index 5918bb77..6db02ebe 100644
--- a/source/lambda/cfn-lambda-layer/package.json
+++ b/source/lambda/cfn-lambda-layer/package.json
@@ -1,6 +1,6 @@
{
"name": "cfn-lambda-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Cfn Lambda Layer",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/cfn/package-lock.json b/source/lambda/cfn/package-lock.json
index 9bb877f2..b96bbbbb 100644
--- a/source/lambda/cfn/package-lock.json
+++ b/source/lambda/cfn/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "cfn",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cfn",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-api-gateway": "^3.621.0",
diff --git a/source/lambda/cfn/package.json b/source/lambda/cfn/package.json
index 7cbec9a6..a4b71ba1 100644
--- a/source/lambda/cfn/package.json
+++ b/source/lambda/cfn/package.json
@@ -1,6 +1,6 @@
{
"name": "cfn",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Cfn Lambda",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/common-modules-layer/package-lock.json b/source/lambda/common-modules-layer/package-lock.json
index 62a5c1f7..91aa6cfe 100644
--- a/source/lambda/common-modules-layer/package-lock.json
+++ b/source/lambda/common-modules-layer/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "common-modules-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "common-modules-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/credential-providers": "^3.511.0",
diff --git a/source/lambda/common-modules-layer/package.json b/source/lambda/common-modules-layer/package.json
index bc07b7a8..391cde73 100644
--- a/source/lambda/common-modules-layer/package.json
+++ b/source/lambda/common-modules-layer/package.json
@@ -1,6 +1,6 @@
{
"name": "common-modules-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Common-modules-layer lambda",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/connect/package-lock.json b/source/lambda/connect/package-lock.json
index 001f9736..4ea2132b 100644
--- a/source/lambda/connect/package-lock.json
+++ b/source/lambda/connect/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "connect",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "connect",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"devDependencies": {
"jest": "^29.7.0"
diff --git a/source/lambda/connect/package.json b/source/lambda/connect/package.json
index 80e19d33..31a16e45 100644
--- a/source/lambda/connect/package.json
+++ b/source/lambda/connect/package.json
@@ -1,6 +1,6 @@
{
"name": "connect",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda function used to support the Connect setup wizard",
"repository": {
"type": "git",
diff --git a/source/lambda/es-proxy-layer/lib/bedrock/bedrockClient.js b/source/lambda/es-proxy-layer/lib/bedrock/bedrockClient.js
index 233d7b22..31cf6793 100644
--- a/source/lambda/es-proxy-layer/lib/bedrock/bedrockClient.js
+++ b/source/lambda/es-proxy-layer/lib/bedrock/bedrockClient.js
@@ -33,6 +33,7 @@ const capabilityMapping = {
'meta.llama3-8b-instruct-v1': 'C041',
'amazon.titan-text-premier-v1': 'C042',
'amazon.titan-embed-text-v2': 'C043',
+ 'anthropic.claude-3.5-sonnet-v1': 'C044'
};
function isEmbedding(modelId) {
diff --git a/source/lambda/es-proxy-layer/package-lock.json b/source/lambda/es-proxy-layer/package-lock.json
index dcf278bb..14a9e9a1 100644
--- a/source/lambda/es-proxy-layer/package-lock.json
+++ b/source/lambda/es-proxy-layer/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-bedrock-agent-runtime": "^3.616.0",
diff --git a/source/lambda/es-proxy-layer/package.json b/source/lambda/es-proxy-layer/package.json
index 17410a23..d992d22b 100644
--- a/source/lambda/es-proxy-layer/package.json
+++ b/source/lambda/es-proxy-layer/package.json
@@ -1,6 +1,6 @@
{
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda managing querying of data store",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/es-proxy-layer/test/bedrockModels.test.js b/source/lambda/es-proxy-layer/test/bedrockModels.test.js
index f7f66390..128c2ea6 100644
--- a/source/lambda/es-proxy-layer/test/bedrockModels.test.js
+++ b/source/lambda/es-proxy-layer/test/bedrockModels.test.js
@@ -126,6 +126,26 @@ const llmModelBodies = {
}
],
},
+ 'anthropic.claude-3-5-sonnet-20240620-v1': {
+ max_tokens: 256,
+ temperature: 0,
+ top_k: 250,
+ top_p: 1,
+ stop_sequences: ['\n\nHuman:'],
+ anthropic_version: 'bedrock-2023-05-31',
+ system : 'You are a helpful AI assistant.',
+ messages : [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'text',
+ text: 'test prompt'
+ }
+ ]
+ }
+ ],
+ },
'anthropic.claude-3-haiku-20240307-v1': {
max_tokens: 256,
temperature: 0,
@@ -284,6 +304,17 @@ const llmModelResponses = {
})
)
},
+ 'anthropic.claude-3-5-sonnet-20240620-v1': {
+ body: Buffer.from(
+ JSON.stringify({
+ content: [
+ {
+ text: 'test response'
+ }
+ ]
+ })
+ )
+ },
'cohere.command-text-v14': {
body: Buffer.from(
JSON.stringify({
diff --git a/source/lambda/export/package-lock.json b/source/lambda/export/package-lock.json
index 3ede74cd..4a1fa1cc 100644
--- a/source/lambda/export/package-lock.json
+++ b/source/lambda/export/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "export",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "export",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"lodash": "^4.17.21"
diff --git a/source/lambda/export/package.json b/source/lambda/export/package.json
index 7fa21621..a93c1cea 100644
--- a/source/lambda/export/package.json
+++ b/source/lambda/export/package.json
@@ -1,6 +1,6 @@
{
"name": "export",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda handling export of QIDs",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/fulfillment/package-lock.json b/source/lambda/fulfillment/package-lock.json
index 18fcc481..e5ade67d 100644
--- a/source/lambda/fulfillment/package-lock.json
+++ b/source/lambda/fulfillment/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "fulfillment",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "fulfillment",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-dynamodb": "^3.511.0",
diff --git a/source/lambda/fulfillment/package.json b/source/lambda/fulfillment/package.json
index 87670f62..23213bff 100644
--- a/source/lambda/fulfillment/package.json
+++ b/source/lambda/fulfillment/package.json
@@ -1,6 +1,6 @@
{
"name": "fulfillment",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda handling fulfillment of user requests",
"main": "handler.js",
"scripts": {
diff --git a/source/lambda/genesys/package-lock.json b/source/lambda/genesys/package-lock.json
index 405f2fec..be92d05d 100644
--- a/source/lambda/genesys/package-lock.json
+++ b/source/lambda/genesys/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "genesys",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "genesys",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"devDependencies": {
"jest": "^29.7.0"
diff --git a/source/lambda/genesys/package.json b/source/lambda/genesys/package.json
index 080533af..0cb83e33 100644
--- a/source/lambda/genesys/package.json
+++ b/source/lambda/genesys/package.json
@@ -1,6 +1,6 @@
{
"name": "genesys",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda function used to support the Genesys setup wizard",
"repository": {
"type": "git",
diff --git a/source/lambda/import/package-lock.json b/source/lambda/import/package-lock.json
index 4363e2a2..3cefdb5e 100644
--- a/source/lambda/import/package-lock.json
+++ b/source/lambda/import/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "import",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "import",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"read-excel-file": "^5.8.5"
diff --git a/source/lambda/import/package.json b/source/lambda/import/package.json
index 5f0aee50..f9c8cd94 100644
--- a/source/lambda/import/package.json
+++ b/source/lambda/import/package.json
@@ -1,6 +1,6 @@
{
"name": "import",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda handling import of QIDs",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/js_lambda_hook_sdk/package-lock.json b/source/lambda/js_lambda_hook_sdk/package-lock.json
index 2ec71332..1d22143a 100644
--- a/source/lambda/js_lambda_hook_sdk/package-lock.json
+++ b/source/lambda/js_lambda_hook_sdk/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "js_lambda_hook_sdk",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "js_lambda_hook_sdk",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"lodash": "^4.17.21"
diff --git a/source/lambda/js_lambda_hook_sdk/package.json b/source/lambda/js_lambda_hook_sdk/package.json
index 5133f03e..651b0878 100644
--- a/source/lambda/js_lambda_hook_sdk/package.json
+++ b/source/lambda/js_lambda_hook_sdk/package.json
@@ -1,6 +1,6 @@
{
"name": "js_lambda_hook_sdk",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot convenience layer, allowing users to create custom lambda hooks",
"directories": {
"lambda_hook_sdk": "lambda_hook_sdk",
diff --git a/source/lambda/lex-build/package-lock.json b/source/lambda/lex-build/package-lock.json
index 805242b8..dd7c8f12 100644
--- a/source/lambda/lex-build/package-lock.json
+++ b/source/lambda/lex-build/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "lex-build",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "lex-build",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-lex-model-building-service": "^3.511.0"
diff --git a/source/lambda/lex-build/package.json b/source/lambda/lex-build/package.json
index 6888cca5..da1173a2 100644
--- a/source/lambda/lex-build/package.json
+++ b/source/lambda/lex-build/package.json
@@ -1,6 +1,6 @@
{
"name": "lex-build",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot lambda for rebuilding Amazon Lex bots",
"main": "handler.js",
"scripts": {
diff --git a/source/lambda/proxy-es/package-lock.json b/source/lambda/proxy-es/package-lock.json
index 870283be..5f61dbde 100644
--- a/source/lambda/proxy-es/package-lock.json
+++ b/source/lambda/proxy-es/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0"
}
}
diff --git a/source/lambda/proxy-es/package.json b/source/lambda/proxy-es/package.json
index 40638bb5..33735ac7 100644
--- a/source/lambda/proxy-es/package.json
+++ b/source/lambda/proxy-es/package.json
@@ -1,6 +1,6 @@
{
"name": "proxy-es",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot Lambda function is used to proxy request from ApiGateway to OpenSearch",
"main": "index.js",
"author": {
diff --git a/source/lambda/qnabot-common-layer/package-lock.json b/source/lambda/qnabot-common-layer/package-lock.json
index dee6053d..8e3040df 100644
--- a/source/lambda/qnabot-common-layer/package-lock.json
+++ b/source/lambda/qnabot-common-layer/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "qnabot-common-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "qnabot-common-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-ssm": "^3.511.0",
diff --git a/source/lambda/qnabot-common-layer/package.json b/source/lambda/qnabot-common-layer/package.json
index adecb04d..16679af1 100644
--- a/source/lambda/qnabot-common-layer/package.json
+++ b/source/lambda/qnabot-common-layer/package.json
@@ -1,6 +1,6 @@
{
"name": "qnabot-common-layer",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda layers used to provide common logging and utility functions",
"repository": {
"type": "git",
diff --git a/source/lambda/schema/package-lock.json b/source/lambda/schema/package-lock.json
index bfea564b..377ba2ca 100644
--- a/source/lambda/schema/package-lock.json
+++ b/source/lambda/schema/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "schema",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "schema",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"devDependencies": {
"jest": "^29.7.0"
diff --git a/source/lambda/schema/package.json b/source/lambda/schema/package.json
index 55392a16..95975217 100644
--- a/source/lambda/schema/package.json
+++ b/source/lambda/schema/package.json
@@ -1,6 +1,6 @@
{
"name": "schema",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda function used to provide the schemas for the various qid types",
"repository": {
"type": "git",
diff --git a/source/lambda/testall/package-lock.json b/source/lambda/testall/package-lock.json
index 3cfd8048..b7e85b86 100644
--- a/source/lambda/testall/package-lock.json
+++ b/source/lambda/testall/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "testall",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "testall",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"@aws-sdk/client-lex-runtime-v2": "^3.511.0"
diff --git a/source/lambda/testall/package.json b/source/lambda/testall/package.json
index a7be883c..39071828 100644
--- a/source/lambda/testall/package.json
+++ b/source/lambda/testall/package.json
@@ -1,6 +1,6 @@
{
"name": "testall",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda function that reads QnAs from opensearch and performs test validation against each question defined in qna against current Lex bot",
"main": "index.js",
"scripts": {
diff --git a/source/lambda/translate/package-lock.json b/source/lambda/translate/package-lock.json
index 77dc45cd..7eded835 100644
--- a/source/lambda/translate/package-lock.json
+++ b/source/lambda/translate/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "translate",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "translate",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"devDependencies": {
"aws-sdk-client-mock": "^3.0.1",
diff --git a/source/lambda/translate/package.json b/source/lambda/translate/package.json
index ef8e2c03..48f4622c 100644
--- a/source/lambda/translate/package.json
+++ b/source/lambda/translate/package.json
@@ -1,6 +1,6 @@
{
"name": "translate",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda function used to import custom terminologies into AWS Translate",
"repository": {
"type": "git",
diff --git a/source/package-lock.json b/source/package-lock.json
index 005cf056..4ef64db4 100644
--- a/source/package-lock.json
+++ b/source/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "qnabot-on-aws",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "qnabot-on-aws",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"os": [
"darwin",
diff --git a/source/package.json b/source/package.json
index a7514a90..c0b2b949 100644
--- a/source/package.json
+++ b/source/package.json
@@ -1,6 +1,6 @@
{
"name": "qnabot-on-aws",
- "version": "6.1.1",
+ "version": "6.1.2",
"engines": {
"node": ">=18.0.0",
"npm": ">=10.0.0"
diff --git a/source/templates/examples/examples/package-lock.json b/source/templates/examples/examples/package-lock.json
index 7d11d83d..eee1dd32 100644
--- a/source/templates/examples/examples/package-lock.json
+++ b/source/templates/examples/examples/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"cfn-response": "^1.0.1",
diff --git a/source/templates/examples/examples/package.json b/source/templates/examples/examples/package.json
index c4d9029f..083a1ea0 100644
--- a/source/templates/examples/examples/package.json
+++ b/source/templates/examples/examples/package.json
@@ -1,6 +1,6 @@
{
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda contains a collection of lambda hooks for QnABot and a custom resource to create the example documents",
"main": "index.js",
"scripts": {
diff --git a/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package-lock.json b/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package-lock.json
index 46332c0b..825b891d 100644
--- a/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package-lock.json
+++ b/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "createrecenttopicsresponse",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "createrecenttopicsresponse",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"lodash": "^4.17.21"
diff --git a/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package.json b/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package.json
index 9e0d1251..d274d376 100644
--- a/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package.json
+++ b/source/templates/examples/extensions/js_lambda_hooks/CreateRecentTopicsResponse/package.json
@@ -1,6 +1,6 @@
{
"name": "createrecenttopicsresponse",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Lambda hook that creates recent topic response",
"main": "CreateRecentTopicResponse.js",
"scripts": {
diff --git a/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package-lock.json b/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package-lock.json
index 2572d384..9e646b78 100644
--- a/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package-lock.json
+++ b/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"cfn-response": "^1.0.1",
diff --git a/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package.json b/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package.json
index 609c812e..4b7f48ed 100644
--- a/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package.json
+++ b/source/templates/examples/extensions/js_lambda_hooks/CustomJSHook/package.json
@@ -1,6 +1,6 @@
{
"name": "examples",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Creates custom JS Lambda Hooks",
"main": "index.js",
"scripts": {
diff --git a/source/templates/examples/extensions/ui_imports/package-lock.json b/source/templates/examples/extensions/ui_imports/package-lock.json
index f7760fb0..22489551 100644
--- a/source/templates/examples/extensions/ui_imports/package-lock.json
+++ b/source/templates/examples/extensions/ui_imports/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "ui_import",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "ui_import",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"dependencies": {
"cfn-response": "^1.0.1",
diff --git a/source/templates/examples/extensions/ui_imports/package.json b/source/templates/examples/extensions/ui_imports/package.json
index 70242070..97402511 100644
--- a/source/templates/examples/extensions/ui_imports/package.json
+++ b/source/templates/examples/extensions/ui_imports/package.json
@@ -1,6 +1,6 @@
{
"name": "ui_import",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "Add new content packages for Content Designer Import Examples/Extensions listing",
"main": "ui_import.js",
"scripts": {
diff --git a/source/templates/master/__snapshots__/index.test.js.snap b/source/templates/master/__snapshots__/index.test.js.snap
index 3f6a14f7..3a613d82 100644
--- a/source/templates/master/__snapshots__/index.test.js.snap
+++ b/source/templates/master/__snapshots__/index.test.js.snap
@@ -315,14 +315,14 @@ exports[`Verify master template is correct renders master template correctly 1`]
"ai21.j2-mid-v1": {
"MaxTokens": 8191,
"ModelID": "ai21.j2-mid-v1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Documents: {context} Instruction: Based on the above documents, provide a detailed answer for {query} Answer \\"Sorry, I don't know\\" if not present in the document. Solution:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
"ai21.j2-ultra-v1": {
"MaxTokens": 8191,
"ModelID": "ai21.j2-ultra-v1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Documents: {context} Instruction: Based on the above documents, provide a detailed answer for {query} Answer \\"Sorry, I don't know\\" if not present in the document. Solution:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
@@ -339,14 +339,14 @@ exports[`Verify master template is correct renders master template correctly 1`]
"amazon.titan-text-express-v1": {
"MaxTokens": 8000,
"ModelID": "amazon.titan-text-express-v1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{history}
Follow up question: {input}
Standalone question:",
},
"amazon.titan-text-lite-v1": {
"MaxTokens": 4000,
"ModelID": "amazon.titan-text-lite-v1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{history}
Follow up question: {input}
Standalone question:",
},
@@ -354,7 +354,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"KnowledgeBasePromptTemplate": "You are a question answering bot who gives helpful, detailed, and polite answers to the user's questions. In this session, the model has access to search results and a users question, your job is to answer the user's question using only information from the search results. Model Instructions: - You should provide concise answer to simple questions when the answer is directly contained in search results, but when comes to yes/no question, provide some details. - In case the question requires multi-hop reasoning, you should find relevant information from search results and summarize the answer based on relevant information with logical reasoning. - If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". - $output_format_instructions$ - DO NOT USE INFORMATION THAT IS NOT IN SEARCH RESULTS! User: $query$ Bot: Resource: Search Results: $search_results$ Bot:",
"MaxTokens": 32000,
"ModelID": "amazon.titan-text-premier-v1:0",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{history}
Follow up question: {input}
Standalone question:",
"maxTokenCount": 3072,
@@ -363,7 +363,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"KnowledgeBasePromptTemplate": "Human: You are a question answering agent. I will provide you with a set of search results and a user's question, your job is to answer the user's question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user's assertion. Here are the search results in numbered order: $search_results$. Here is the user's question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:",
"MaxTokens": 100000,
"ModelID": "anthropic.claude-3-haiku-20240307-v1:0",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
@@ -371,7 +371,15 @@ exports[`Verify master template is correct renders master template correctly 1`]
"KnowledgeBasePromptTemplate": "Human: You are a question answering agent. I will provide you with a set of search results and a user's question, your job is to answer the user's question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user's assertion. Here are the search results in numbered order: $search_results$. Here is the user's question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:",
"MaxTokens": 100000,
"ModelID": "anthropic.claude-3-sonnet-20240229-v1:0",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
+ "QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
+ },
+ "anthropic.claude-3.5-sonnet-v1": {
+ "KnowledgeBasePromptTemplate": "Human: You are a question answering agent. I will provide you with a set of search results and a user's question, your job is to answer the user's question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user's assertion. Here are the search results in numbered order: $search_results$. Here is the user's question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:",
+ "MaxTokens": 100000,
+ "ModelID": "anthropic.claude-3-5-sonnet-20240620-v1:0",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
@@ -379,7 +387,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"KnowledgeBasePromptTemplate": "Human: You are a question answering agent. I will provide you with a set of search results and a user's question, your job is to answer the user's question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user's assertion. Here are the search results in numbered order: $search_results$. Here is the user's question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:",
"MaxTokens": 100000,
"ModelID": "anthropic.claude-instant-v1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
@@ -387,14 +395,14 @@ exports[`Verify master template is correct renders master template correctly 1`]
"KnowledgeBasePromptTemplate": "Human: You are a question answering agent. I will provide you with a set of search results and a user's question, your job is to answer the user's question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don't know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user's assertion. Here are the search results in numbered order: $search_results$. Here is the user's question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:",
"MaxTokens": 100000,
"ModelID": "anthropic.claude-v2:1",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:",
},
"cohere.command-text-v14": {
"MaxTokens": 4000,
"ModelID": "cohere.command-text-v14",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{history}
Follow up question: {input}
Standalone question:",
},
@@ -411,7 +419,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"meta.llama3-8b-instruct-v1": {
"MaxTokens": 8000,
"ModelID": "meta.llama3-8b-instruct-v1:0",
- "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
+ "NoHitsRegex": "(Sorry, I don't know|unable to assist you|i don't have enough context|i don't have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don't see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)",
"QAPromptTemplate": "
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don't know\\".
{query}
Assistant: According to the reference passages, in under 50 words:",
"QueryPromptTemplate": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{history}
Follow up question: {input}
Standalone question:",
},
@@ -854,6 +862,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"anthropic.claude-instant-v1",
"anthropic.claude-v2.1",
"anthropic.claude-3-sonnet-v1",
+ "anthropic.claude-3.5-sonnet-v1",
"anthropic.claude-3-haiku-v1",
],
"Default": "anthropic.claude-instant-v1",
@@ -972,6 +981,7 @@ exports[`Verify master template is correct renders master template correctly 1`]
"anthropic.claude-instant-v1",
"anthropic.claude-v2.1",
"anthropic.claude-3-sonnet-v1",
+ "anthropic.claude-3.5-sonnet-v1",
"anthropic.claude-3-haiku-v1",
"cohere.command-text-v14",
"meta.llama3-8b-instruct-v1",
diff --git a/source/templates/master/index.js b/source/templates/master/index.js
index d5c6109b..c37f3c72 100644
--- a/source/templates/master/index.js
+++ b/source/templates/master/index.js
@@ -441,6 +441,7 @@ module.exports = {
'anthropic.claude-instant-v1',
'anthropic.claude-v2.1',
'anthropic.claude-3-sonnet-v1',
+ 'anthropic.claude-3.5-sonnet-v1',
'anthropic.claude-3-haiku-v1',
'cohere.command-text-v14',
'meta.llama3-8b-instruct-v1'
@@ -464,6 +465,7 @@ module.exports = {
'anthropic.claude-instant-v1',
'anthropic.claude-v2.1',
'anthropic.claude-3-sonnet-v1',
+ 'anthropic.claude-3.5-sonnet-v1',
'anthropic.claude-3-haiku-v1',
],
Default: 'anthropic.claude-instant-v1',
diff --git a/source/templates/master/mappings/bedrock-defaults.js b/source/templates/master/mappings/bedrock-defaults.js
index 9d0bb526..0e9f612d 100644
--- a/source/templates/master/mappings/bedrock-defaults.js
+++ b/source/templates/master/mappings/bedrock-defaults.js
@@ -15,7 +15,7 @@ const amazonQueryPromptTemplate = 'Given the following conversation and a follow
const amazonQAPromptTemplate = '
Human: You are a friendly AI assistant. Answer the question in tags only based on the provided reference passages. Here are reference passages in tags:
{context}
If the references contain the information needed to respond, then write a confident response in under 50 words, quoting the relevant references.
Otherwise, if you can make an informed guess based on the reference passages, then write a less confident response in under 50 words, stating your assumptions.
Finally, if the references do not have any relevant information, then respond saying \\"Sorry, I don\'t know\\".
{query}
Assistant: According to the reference passages, in under 50 words:';
const anthropicQueryPromptTemplate = '
Human: Here is a chat history in tags:
{history}
Human: And here is a follow up question or statement from the human in tags:
{input}
Human: Rephrase the follow up question or statement as a standalone question or statement that makes sense without reading the chat history.
Assistant: Here is the rephrased follow up question or statement:';
const ai21QAPromptTemplate = 'The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Documents: {context} Instruction: Based on the above documents, provide a detailed answer for {query} Answer \\"Sorry, I don\'t know\\" if not present in the document. Solution:';
-const anthropicNoHitsRegex = '(Sorry, I don\'t know|unable to assist you|i don\'t have enough context|i don\'t have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don\'t see any information in the provided search results|search results do not contain|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)';
+const anthropicNoHitsRegex = '(Sorry, I don\'t know|unable to assist you|i don\'t have enough context|i don\'t have enough information|do not contain any information|i could not find an exact answer|no information in the search results|don\'t see any information in the provided search results|search results do not contain|search results do not mention|search results do not provide|no information in the provided search results|not find any information|search results did not contain|unable to respond|There is no mention of|documents do not mention anything|There is no information provided|reference passages do not mention|could not find an answer to this question|the model cannot answer this question)';
const anthropicKnowledgebaseTemplate = 'Human: You are a question answering agent. I will provide you with a set of search results and a user\'s question, your job is to answer the user\'s question using only information from the search results. If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don\'t know that.\\". Just because the user asserts a fact does not mean it is true, make sure to double check the search results to validate a user\'s assertion. Here are the search results in numbered order: $search_results$. Here is the user\'s question: $query$ $output_format_instructions$. Do NOT directly quote the $search_results$ in your answer. Your job is to answer the as concisely as possible. Assistant:';
const amazonKnowledgebaseTemplate = 'You are a question answering bot who gives helpful, detailed, and polite answers to the user\'s questions. In this session, the model has access to search results and a user\s question, your job is to answer the user\'s question using only information from the search results. Model Instructions: - You should provide concise answer to simple questions when the answer is directly contained in search results, but when comes to yes/no question, provide some details. - In case the question requires multi-hop reasoning, you should find relevant information from search results and summarize the answer based on relevant information with logical reasoning. - If the search results do not contain information that can answer the question, then respond saying \\"Sorry, I don\'t know that.\\". - $output_format_instructions$ - DO NOT USE INFORMATION THAT IS NOT IN SEARCH RESULTS! User: $query$ Bot: Resource: Search Results: $search_results$ Bot:';
@@ -100,6 +100,14 @@ module.exports = {
NoHitsRegex: anthropicNoHitsRegex,
KnowledgeBasePromptTemplate: anthropicKnowledgebaseTemplate,
},
+ 'anthropic.claude-3.5-sonnet-v1': {
+ ModelID: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
+ MaxTokens: 100000,
+ QueryPromptTemplate: anthropicQueryPromptTemplate,
+ QAPromptTemplate: amazonQAPromptTemplate,
+ NoHitsRegex: anthropicNoHitsRegex,
+ KnowledgeBasePromptTemplate: anthropicKnowledgebaseTemplate,
+ },
'cohere.command-text-v14': {
ModelID: 'cohere.command-text-v14',
MaxTokens: 4000,
diff --git a/source/templates/package-lock.json b/source/templates/package-lock.json
index b5bbe2f9..e6f55b30 100644
--- a/source/templates/package-lock.json
+++ b/source/templates/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "qnabot-on-aws-infrastructure",
- "version": "6.1.1",
+ "version": "6.1.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "qnabot-on-aws-infrastructure",
- "version": "6.1.1",
+ "version": "6.1.2",
"license": "Apache-2.0",
"devDependencies": {
"@aws-sdk/client-s3": "^3.621.0",
diff --git a/source/templates/package.json b/source/templates/package.json
index 33fc466f..64c77d04 100644
--- a/source/templates/package.json
+++ b/source/templates/package.json
@@ -1,6 +1,6 @@
{
"name": "qnabot-on-aws-infrastructure",
- "version": "6.1.1",
+ "version": "6.1.2",
"description": "QnABot infrastructure",
"scripts": {
"clean": "rm -rf node_modules",
diff --git a/source/website/__tests__/lib/store/user/actions.test.js b/source/website/__tests__/lib/store/user/actions.test.js
index d25cc42e..f3a419ce 100644
--- a/source/website/__tests__/lib/store/user/actions.test.js
+++ b/source/website/__tests__/lib/store/user/actions.test.js
@@ -53,6 +53,7 @@ describe('user actions test', () => {
search: '?code=200',
origin: 'test.origin',
pathname: '/test/path',
+ replace: jest.fn(),
},
localStorage: {
clear: jest.fn(),
@@ -430,6 +431,9 @@ describe('user actions test', () => {
},
user: {
name: 'some-user',
+ credentials: {
+ expiration: new Date(Date.now() - 1000),
+ },
}
},
state: {
@@ -447,12 +451,16 @@ describe('user actions test', () => {
})
const expectedLogoutUrl = `${mockedContext.rootState.info._links.CognitoEndpoint.href}/logout?response_type=code&client_id=${mockedContext.rootState.info.ClientIdDesigner}&redirect_uri=test.origin/test/path`
-
await actionsModule.logout(mockedContext);
expect(cognitoIdentityProviderClientMock).toHaveReceivedCommandTimes(AdminUserGlobalSignOutCommand, 1);
expect(window.sessionStorage.clear).toHaveBeenCalledTimes(1);
expect(window.localStorage.clear).toHaveBeenCalledTimes(1);
- expect(window.location.href).toEqual(expectedLogoutUrl);
+ expect(mockedContext.rootState.user.name).toEqual('some-user')
+ expect(mockedContext.state.credentials).toEqual(undefined)
+ expect(mockedContext.rootState.user.credentials).toEqual(undefined)
+ expect(window.location.replace).toHaveBeenCalledWith(
+ expect.stringContaining(expectedLogoutUrl)
+ );
});
test('can logout when error occurs in credentials provider', async () => {
@@ -473,6 +481,9 @@ describe('user actions test', () => {
},
user: {
name: 'some-user',
+ credentials: {
+ expiration: new Date(Date.now() - 1000),
+ },
}
},
};
@@ -487,7 +498,12 @@ describe('user actions test', () => {
expect(cognitoIdentityProviderClientMock).toHaveReceivedCommandTimes(AdminUserGlobalSignOutCommand, 0);
expect(window.sessionStorage.clear).toHaveBeenCalledTimes(1);
expect(window.localStorage.clear).toHaveBeenCalledTimes(1);
- expect(window.location.href).toEqual(expectedLogoutUrl);
+ expect(mockedContext.rootState.user.name).toEqual('some-user')
+ expect(mockedContext.state).toEqual(undefined)
+ expect(mockedContext.rootState.user.credentials).toEqual(undefined)
+ expect(window.location.replace).toHaveBeenCalledWith(
+ expect.stringContaining(expectedLogoutUrl)
+ );
});
test('can logout when error occurs during global signout', async () => {
@@ -515,6 +531,9 @@ describe('user actions test', () => {
},
user: {
name: 'some-user',
+ credentials: {
+ expiration: new Date(Date.now() - 1000),
+ },
}
},
state: {
@@ -532,7 +551,12 @@ describe('user actions test', () => {
expect(cognitoIdentityProviderClientMock).toHaveReceivedCommandTimes(AdminUserGlobalSignOutCommand, 1);
expect(window.sessionStorage.clear).toHaveBeenCalledTimes(1);
expect(window.localStorage.clear).toHaveBeenCalledTimes(1);
- expect(window.location.href).toEqual(expectedLogoutUrl);
+ expect(mockedContext.rootState.user.name).toEqual('some-user')
+ expect(mockedContext.state.credentials).toEqual(undefined)
+ expect(mockedContext.rootState.user.credentials).toEqual(undefined)
+ expect(window.location.replace).toHaveBeenCalledWith(
+ expect.stringContaining(expectedLogoutUrl)
+ );
});
test('login -- id_token exists', async () => {
diff --git a/source/website/js/lib/store/user/actions.js b/source/website/js/lib/store/user/actions.js
index 46c000d1..7fa28401 100644
--- a/source/website/js/lib/store/user/actions.js
+++ b/source/website/js/lib/store/user/actions.js
@@ -126,10 +126,22 @@ const logout = async (context) => {
console.log(`Error fetching credentials ${e.message.substring(0, 500)}`);
}
- const logoutUrl = `${cognitoEndpoint}/logout?response_type=code&client_id=${clientId}&redirect_uri=${redirectUrl}`;
- window.location.href = logoutUrl;
+ // clear context state credential
+ if (context?.state?.credentials) {
+ delete context.state.credentials;
+ }
+
+ if (context?.rootState?.user?.credentials) {
+ delete context.rootState.user.credentials;
+ }
+
+ // clear session and local storage
window.sessionStorage.clear();
window.localStorage.clear();
+
+ // redirect to logout url
+ const logoutUrl = `${cognitoEndpoint}/logout?response_type=code&client_id=${clientId}&redirect_uri=${redirectUrl}`;
+ window.location.replace(logoutUrl);
};
const getCredentials = async (context) => {